Update package dependencies, enhance Dockerfiles, and improve workspace configuration

- Added glob dependency to package.json and pnpm-lock.yaml for better file handling.
- Updated pnpm-workspace.yaml to include additional built dependencies for improved management.
- Refactored Dockerfiles for BFF and Portal to enhance security and optimize build processes.
- Improved entrypoint scripts to include better logging and readiness checks for services.
- Cleaned up TypeScript configuration files for consistency and alignment with project standards.
This commit is contained in:
barsa 2025-12-10 16:31:18 +09:00
parent 9e27380069
commit bc5c7c9bd4
12 changed files with 946 additions and 386 deletions

View File

@ -3,7 +3,7 @@
# BFF (NestJS) Dockerfile
# =============================================================================
# Multi-stage build with BuildKit cache mounts for fast rebuilds
# Optimized for minimal image size and fast startup
# Optimized for minimal image size, security, and fast startup
# =============================================================================
ARG NODE_VERSION=22
@ -11,12 +11,11 @@ ARG PNPM_VERSION=10.25.0
ARG PRISMA_VERSION=7.1.0
# =============================================================================
# Stage 1: Builder
# Stage 1: Dependencies (cached layer)
# =============================================================================
FROM node:${NODE_VERSION}-alpine AS builder
FROM node:${NODE_VERSION}-alpine AS deps
ARG PNPM_VERSION
ARG PRISMA_VERSION
# Install build dependencies in single layer
RUN apk add --no-cache python3 make g++ openssl libc6-compat \
@ -30,17 +29,24 @@ COPY .npmrc pnpm-workspace.yaml package.json pnpm-lock.yaml ./
COPY packages/domain/package.json ./packages/domain/
COPY apps/bff/package.json ./apps/bff/
# Install dependencies with cache mount (separate layer for caching)
# Install all dependencies with cache mount (separate layer for better caching)
ENV HUSKY=0
RUN --mount=type=cache,id=pnpm-bff,target=/root/.local/share/pnpm/store \
pnpm install --frozen-lockfile
# =============================================================================
# Stage 2: Builder
# =============================================================================
FROM deps AS builder
ARG PRISMA_VERSION
# Copy source files
COPY tsconfig.json tsconfig.base.json ./
COPY packages/domain/ ./packages/domain/
COPY apps/bff/ ./apps/bff/
# Build: domain → Prisma generate → BFF
# Build: domain → Prisma generate → BFF (single RUN for better layer efficiency)
RUN pnpm --filter @customer-portal/domain build \
&& pnpm --filter @customer-portal/bff exec prisma generate \
&& pnpm --filter @customer-portal/bff build
@ -52,26 +58,29 @@ RUN pnpm deploy --filter @customer-portal/bff --prod /app/deploy \
&& cp -r packages/domain/dist deploy/node_modules/@customer-portal/domain/dist
# =============================================================================
# Stage 2: Production
# Stage 3: Production
# =============================================================================
FROM node:${NODE_VERSION}-alpine AS production
ARG PRISMA_VERSION
LABEL org.opencontainers.image.title="Customer Portal BFF" \
org.opencontainers.image.description="NestJS Backend-for-Frontend API"
org.opencontainers.image.description="NestJS Backend-for-Frontend API" \
org.opencontainers.image.vendor="Customer Portal"
# Install runtime dependencies only
# Install runtime dependencies only + security hardening
RUN apk add --no-cache dumb-init libc6-compat netcat-openbsd \
&& addgroup --system --gid 1001 nodejs \
&& adduser --system --uid 1001 nestjs
&& adduser --system --uid 1001 nestjs \
# Remove apk cache and unnecessary files
&& rm -rf /var/cache/apk/* /tmp/* /root/.npm
WORKDIR /app
# Set Prisma schema path before copying files
ENV PRISMA_SCHEMA_PATH=/app/prisma/schema.prisma
# Copy deploy bundle
# Copy deploy bundle with correct ownership in single layer
COPY --from=builder --chown=nestjs:nodejs /app/deploy ./
# Regenerate Prisma client for production paths and cleanup
@ -82,8 +91,8 @@ RUN rm -rf node_modules/.prisma \
&& ln -sf /app/prisma/schema.prisma /app/apps/bff/prisma/schema.prisma \
# Fix ownership
&& chown -R nestjs:nodejs /app/node_modules/.prisma /app/apps/bff/prisma \
# Cleanup npm cache
&& rm -rf /root/.npm /tmp/*
# Cleanup npm cache and temp files
&& rm -rf /root/.npm /tmp/* /root/.cache
# Copy entrypoint and setup directories
COPY --chown=nestjs:nodejs apps/bff/scripts/docker-entrypoint.sh ./docker-entrypoint.sh
@ -91,14 +100,20 @@ RUN chmod +x docker-entrypoint.sh \
&& mkdir -p secrets logs \
&& chown nestjs:nodejs secrets logs
# Security: Run as non-root user
USER nestjs
# Expose BFF port
EXPOSE 4000
# Environment configuration
ENV NODE_ENV=production \
PORT=4000 \
PRISMA_VERSION=${PRISMA_VERSION}
PRISMA_VERSION=${PRISMA_VERSION} \
# Node.js production optimizations
NODE_OPTIONS="--max-old-space-size=512"
# Health check for container orchestration
HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \
CMD node -e "fetch('http://localhost:4000/health').then(r=>r.ok||process.exit(1)).catch(()=>process.exit(1))"

View File

@ -60,7 +60,6 @@
"pg": "^8.16.3",
"pino": "^10.1.0",
"pino-http": "^11.0.0",
"pino-pretty": "^13.1.3",
"rate-limiter-flexible": "^9.0.0",
"reflect-metadata": "^0.2.2",
"rxjs": "^7.8.2",
@ -84,6 +83,7 @@
"@types/ssh2-sftp-client": "^9.0.6",
"@types/supertest": "^6.0.3",
"jest": "^30.2.0",
"pino-pretty": "^13.1.3",
"prisma": "^7.1.0",
"supertest": "^7.1.4",
"ts-jest": "^29.4.6",

View File

@ -11,10 +11,15 @@ set -e
# =============================================================================
echo "🚀 Starting Customer Portal Backend..."
PRISMA_VERSION="${PRISMA_VERSION:-6.16.0}"
echo " Version: ${APP_VERSION:-unknown}"
echo " Node: $(node --version)"
PRISMA_VERSION="${PRISMA_VERSION:-7.1.0}"
export PRISMA_SCHEMA_PATH="/app/prisma/schema.prisma"
# Handle Salesforce private key from base64 environment variable
# =============================================================================
# Salesforce Private Key Handling
# =============================================================================
if [ -n "$SF_PRIVATE_KEY_BASE64" ]; then
echo "📝 Decoding Salesforce private key..."
mkdir -p /app/secrets
@ -24,42 +29,81 @@ if [ -n "$SF_PRIVATE_KEY_BASE64" ]; then
echo "✅ Salesforce private key configured"
fi
# =============================================================================
# Wait for Dependencies
# =============================================================================
# Maximum wait time in seconds
MAX_WAIT="${MAX_WAIT:-120}"
wait_for_service() {
local host="$1"
local port="$2"
local name="$3"
local waited=0
echo "⏳ Waiting for $name ($host:$port)..."
while ! nc -z "$host" "$port" 2>/dev/null; do
waited=$((waited + 2))
if [ $waited -ge "$MAX_WAIT" ]; then
echo "❌ Timeout waiting for $name after ${MAX_WAIT}s"
return 1
fi
sleep 2
done
echo "$name is ready (waited ${waited}s)"
return 0
}
# Wait for database if DATABASE_URL is set
# Extract host:port from postgresql://user:pass@host:port/db
if [ -n "$DATABASE_URL" ]; then
DB_HOST=$(echo "$DATABASE_URL" | sed -E 's|.*@([^:/]+):([0-9]+)/.*|\1|')
DB_PORT=$(echo "$DATABASE_URL" | sed -E 's|.*@([^:/]+):([0-9]+)/.*|\2|')
if [ -n "$DB_HOST" ] && [ -n "$DB_PORT" ]; then
echo "⏳ Waiting for database ($DB_HOST:$DB_PORT)..."
until nc -z "$DB_HOST" "$DB_PORT" 2>/dev/null; do
sleep 2
done
echo "✅ Database is ready"
if ! wait_for_service "$DB_HOST" "$DB_PORT" "database"; then
echo "⚠️ Starting without database connection - some features may not work"
fi
fi
fi
# Wait for Redis if REDIS_URL is set
# Extract host:port from redis://host:port/db
# Extract host:port from redis://host:port/db or redis://:password@host:port/db
if [ -n "$REDIS_URL" ]; then
REDIS_HOST=$(echo "$REDIS_URL" | sed -E 's|redis://([^:/]+):([0-9]+).*|\1|')
REDIS_PORT=$(echo "$REDIS_URL" | sed -E 's|redis://([^:/]+):([0-9]+).*|\2|')
# Handle both redis://host:port and redis://:password@host:port formats
REDIS_HOST=$(echo "$REDIS_URL" | sed -E 's|redis://([^:@]+@)?([^:/]+):([0-9]+).*|\2|')
REDIS_PORT=$(echo "$REDIS_URL" | sed -E 's|redis://([^:@]+@)?([^:/]+):([0-9]+).*|\3|')
if [ -n "$REDIS_HOST" ] && [ -n "$REDIS_PORT" ]; then
echo "⏳ Waiting for cache ($REDIS_HOST:$REDIS_PORT)..."
until nc -z "$REDIS_HOST" "$REDIS_PORT" 2>/dev/null; do
sleep 2
done
echo "✅ Cache is ready"
if ! wait_for_service "$REDIS_HOST" "$REDIS_PORT" "cache"; then
echo "⚠️ Starting without Redis connection - some features may not work"
fi
fi
fi
# Run database migrations if enabled
# =============================================================================
# Database Migrations
# =============================================================================
if [ "$RUN_MIGRATIONS" = "true" ] && [ -n "$DATABASE_URL" ]; then
echo "🗄️ Running database migrations..."
npx prisma@"${PRISMA_VERSION}" migrate deploy --schema=/app/prisma/schema.prisma
echo "✅ Migrations complete"
if npx prisma@"${PRISMA_VERSION}" migrate deploy --schema=/app/prisma/schema.prisma; then
echo "✅ Migrations complete"
else
echo "⚠️ Migration failed - check database connectivity"
# Continue anyway in case migrations are already applied
fi
fi
# =============================================================================
# Start Application
# =============================================================================
echo "🌐 Starting server on port ${PORT:-4000}..."
echo " Environment: ${NODE_ENV:-development}"
echo " Log level: ${LOG_LEVEL:-info}"
echo ""
# Execute the main command (node dist/main.js)
exec "$@"

View File

@ -1,5 +1,5 @@
{
"extends": "./tsconfig.base.json",
"extends": "../../tsconfig.base.json",
"compilerOptions": {
"noEmit": true,
"composite": false,

View File

@ -3,16 +3,16 @@
# Portal (Next.js) Dockerfile
# =============================================================================
# Multi-stage build with standalone output for minimal image size
# Optimized for fast builds and small production images
# Optimized for fast builds, security, and small production images
# =============================================================================
ARG NODE_VERSION=22
ARG PNPM_VERSION=10.25.0
# =============================================================================
# Stage 1: Builder
# Stage 1: Dependencies (cached layer)
# =============================================================================
FROM node:${NODE_VERSION}-alpine AS builder
FROM node:${NODE_VERSION}-alpine AS deps
ARG PNPM_VERSION
@ -33,6 +33,11 @@ ENV HUSKY=0
RUN --mount=type=cache,id=pnpm-portal,target=/root/.local/share/pnpm/store \
pnpm install --frozen-lockfile
# =============================================================================
# Stage 2: Builder
# =============================================================================
FROM deps AS builder
# Copy source files
COPY tsconfig.json tsconfig.base.json ./
COPY packages/domain/ ./packages/domain/
@ -54,34 +59,43 @@ RUN pnpm --filter @customer-portal/domain build \
&& pnpm --filter @customer-portal/portal build
# =============================================================================
# Stage 2: Production
# Stage 3: Production
# =============================================================================
FROM node:${NODE_VERSION}-alpine AS production
LABEL org.opencontainers.image.title="Customer Portal Frontend" \
org.opencontainers.image.description="Next.js Customer Portal"
org.opencontainers.image.description="Next.js Customer Portal" \
org.opencontainers.image.vendor="Customer Portal"
# Minimal runtime dependencies (wget not needed - healthcheck uses node fetch)
# Minimal runtime dependencies + security hardening
RUN apk add --no-cache dumb-init libc6-compat \
&& addgroup --system --gid 1001 nodejs \
&& adduser --system --uid 1001 nextjs
&& adduser --system --uid 1001 nextjs \
# Remove apk cache and unnecessary files
&& rm -rf /var/cache/apk/* /tmp/* /root/.npm
WORKDIR /app
# Copy standalone build artifacts
# Copy standalone build artifacts with correct ownership
COPY --from=builder --chown=nextjs:nodejs /app/apps/portal/.next/standalone ./
COPY --from=builder --chown=nextjs:nodejs /app/apps/portal/.next/static ./apps/portal/.next/static
COPY --from=builder --chown=nextjs:nodejs /app/apps/portal/public ./apps/portal/public
# Security: Run as non-root user
USER nextjs
# Expose frontend port
EXPOSE 3000
# Environment configuration
ENV NODE_ENV=production \
NEXT_TELEMETRY_DISABLED=1 \
PORT=3000 \
HOSTNAME="0.0.0.0"
HOSTNAME="0.0.0.0" \
# Node.js production optimizations
NODE_OPTIONS="--max-old-space-size=512"
# Health check for container orchestration
HEALTHCHECK --interval=30s --timeout=10s --start-period=40s --retries=3 \
CMD node -e "fetch('http://localhost:3000/api/health').then(r=>r.ok||process.exit(1)).catch(()=>process.exit(1))"

View File

@ -14,15 +14,6 @@ const withBundleAnalyzer = bundleAnalyzer({
const nextConfig = {
output: process.env.NODE_ENV === "production" ? "standalone" : undefined,
serverExternalPackages: [
"pino",
"pino-pretty",
"pino-abstract-transport",
"thread-stream",
"sonic-boom",
"tailwind-merge",
],
turbopack: {
resolveAlias: {
"@customer-portal/domain": path.join(workspaceRoot, "packages/domain/dist"),
@ -56,6 +47,11 @@ const nextConfig = {
async headers() {
const isDev = process.env.NODE_ENV === "development";
const connectSources = ["'self'", "https:"];
if (isDev) {
connectSources.push("http://localhost:*");
}
return [
{
source: "/(.*)",
@ -68,11 +64,11 @@ const nextConfig = {
key: "Content-Security-Policy",
value: [
"default-src 'self'",
"script-src 'self' 'unsafe-inline' 'unsafe-eval'",
"script-src 'self'",
"style-src 'self' 'unsafe-inline'",
"img-src 'self' data: https:",
"font-src 'self' data:",
`connect-src 'self' https:${isDev ? " http://localhost:*" : ""}`,
`connect-src ${connectSources.join(" ")}`,
"frame-ancestors 'none'",
].join("; "),
},

View File

@ -0,0 +1,188 @@
# =============================================================================
# Customer Portal - Production Docker Compose
# =============================================================================
# Full stack for standalone production deployments (non-Portainer)
# For Portainer/Plesk, use docker/portainer/docker-compose.yml instead
# =============================================================================
services:
# ---------------------------------------------------------------------------
# Frontend (Next.js)
# ---------------------------------------------------------------------------
frontend:
build:
context: ../..
dockerfile: apps/portal/Dockerfile
args:
- NODE_VERSION=22
- PNPM_VERSION=${PNPM_VERSION:-10.25.0}
- NEXT_PUBLIC_API_BASE=${NEXT_PUBLIC_API_BASE:-/api}
- NEXT_PUBLIC_APP_NAME=${NEXT_PUBLIC_APP_NAME:-Customer Portal}
- NEXT_PUBLIC_APP_VERSION=${NEXT_PUBLIC_APP_VERSION:-1.0.0}
image: portal-frontend:${IMAGE_TAG:-latest}
container_name: portal-frontend
ports:
- "${FRONTEND_PORT:-3000}:3000"
environment:
- NODE_ENV=production
- PORT=3000
- HOSTNAME=0.0.0.0
restart: unless-stopped
depends_on:
backend:
condition: service_healthy
networks:
- portal-network
healthcheck:
test: ["CMD", "node", "-e", "fetch('http://localhost:3000/api/health').then(r=>r.ok||process.exit(1)).catch(()=>process.exit(1))"]
interval: 30s
timeout: 10s
start_period: 40s
retries: 3
# ---------------------------------------------------------------------------
# Backend (NestJS BFF)
# ---------------------------------------------------------------------------
backend:
build:
context: ../..
dockerfile: apps/bff/Dockerfile
args:
- NODE_VERSION=22
- PNPM_VERSION=${PNPM_VERSION:-10.25.0}
- PRISMA_VERSION=7.1.0
image: portal-backend:${IMAGE_TAG:-latest}
container_name: portal-backend
ports:
- "${BACKEND_PORT:-4000}:4000"
environment:
# Core
- NODE_ENV=production
- APP_NAME=${APP_NAME:-customer-portal-bff}
- APP_BASE_URL=${APP_BASE_URL}
- BFF_PORT=4000
- PORT=4000
# Database
- DATABASE_URL=postgresql://${POSTGRES_USER:-portal}:${POSTGRES_PASSWORD}@database:5432/${POSTGRES_DB:-portal_prod}?schema=public
# Redis
- REDIS_URL=redis://cache:6379/0
# Security
- JWT_SECRET=${JWT_SECRET}
- JWT_EXPIRES_IN=${JWT_EXPIRES_IN:-7d}
- BCRYPT_ROUNDS=${BCRYPT_ROUNDS:-12}
- CORS_ORIGIN=${CORS_ORIGIN}
- TRUST_PROXY=true
- CSRF_SECRET_KEY=${CSRF_SECRET_KEY}
# Auth
- AUTH_ALLOW_REDIS_TOKEN_FAILOPEN=${AUTH_ALLOW_REDIS_TOKEN_FAILOPEN:-false}
- AUTH_REQUIRE_REDIS_FOR_TOKENS=${AUTH_REQUIRE_REDIS_FOR_TOKENS:-false}
- AUTH_MAINTENANCE_MODE=${AUTH_MAINTENANCE_MODE:-false}
# Rate Limiting
- RATE_LIMIT_TTL=${RATE_LIMIT_TTL:-60}
- RATE_LIMIT_LIMIT=${RATE_LIMIT_LIMIT:-100}
- EXPOSE_VALIDATION_ERRORS=false
# WHMCS
- WHMCS_BASE_URL=${WHMCS_BASE_URL}
- WHMCS_API_IDENTIFIER=${WHMCS_API_IDENTIFIER}
- WHMCS_API_SECRET=${WHMCS_API_SECRET}
# Salesforce
- SF_LOGIN_URL=${SF_LOGIN_URL}
- SF_CLIENT_ID=${SF_CLIENT_ID}
- SF_USERNAME=${SF_USERNAME}
- SF_EVENTS_ENABLED=${SF_EVENTS_ENABLED:-true}
- SF_PRIVATE_KEY_BASE64=${SF_PRIVATE_KEY_BASE64}
- SF_PRIVATE_KEY_PATH=/app/secrets/sf-private.key
# Freebit
- FREEBIT_BASE_URL=${FREEBIT_BASE_URL:-https://i1.mvno.net/emptool/api}
- FREEBIT_OEM_ID=${FREEBIT_OEM_ID:-PASI}
- FREEBIT_OEM_KEY=${FREEBIT_OEM_KEY}
# Email
- EMAIL_ENABLED=${EMAIL_ENABLED:-true}
- EMAIL_FROM=${EMAIL_FROM:-no-reply@asolutions.jp}
- EMAIL_FROM_NAME=${EMAIL_FROM_NAME:-Assist Solutions}
- SENDGRID_API_KEY=${SENDGRID_API_KEY}
# Portal
- PORTAL_PRICEBOOK_ID=${PORTAL_PRICEBOOK_ID}
- PORTAL_PRICEBOOK_NAME=${PORTAL_PRICEBOOK_NAME:-Portal}
# Logging
- LOG_LEVEL=${LOG_LEVEL:-info}
# Migrations
- RUN_MIGRATIONS=${RUN_MIGRATIONS:-true}
restart: unless-stopped
depends_on:
database:
condition: service_healthy
cache:
condition: service_healthy
networks:
- portal-network
healthcheck:
test: ["CMD", "node", "-e", "fetch('http://localhost:4000/health').then(r=>r.ok||process.exit(1)).catch(()=>process.exit(1))"]
interval: 30s
timeout: 10s
start_period: 60s
retries: 3
# ---------------------------------------------------------------------------
# PostgreSQL Database
# ---------------------------------------------------------------------------
database:
image: postgres:17-alpine
container_name: portal-database
environment:
- POSTGRES_DB=${POSTGRES_DB:-portal_prod}
- POSTGRES_USER=${POSTGRES_USER:-portal}
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
- POSTGRES_INITDB_ARGS=--encoding=UTF-8 --lc-collate=C --lc-ctype=C
volumes:
- postgres_data:/var/lib/postgresql/data
restart: unless-stopped
networks:
- portal-network
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-portal} -d ${POSTGRES_DB:-portal_prod}"]
interval: 10s
timeout: 5s
start_period: 30s
retries: 5
# ---------------------------------------------------------------------------
# Redis Cache
# ---------------------------------------------------------------------------
cache:
image: redis:7-alpine
container_name: portal-cache
command: ["redis-server", "--save", "60", "1", "--loglevel", "warning", "--maxmemory", "128mb", "--maxmemory-policy", "allkeys-lru"]
volumes:
- redis_data:/data
restart: unless-stopped
networks:
- portal-network
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 10s
timeout: 5s
retries: 5
volumes:
postgres_data:
driver: local
redis_data:
driver: local
networks:
portal-network:
driver: bridge

View File

@ -64,7 +64,8 @@
},
"pnpm": {
"overrides": {
"js-yaml": ">=4.1.1"
"js-yaml": ">=4.1.1",
"glob": "^8.1.0"
}
}
}

200
pnpm-lock.yaml generated
View File

@ -7,6 +7,7 @@ settings:
overrides:
js-yaml: '>=4.1.1'
glob: ^8.1.0
importers:
@ -138,9 +139,6 @@ importers:
pino-http:
specifier: ^11.0.0
version: 11.0.0
pino-pretty:
specifier: ^13.1.3
version: 13.1.3
rate-limiter-flexible:
specifier: ^9.0.0
version: 9.0.0
@ -205,6 +203,9 @@ importers:
jest:
specifier: ^30.2.0
version: 30.2.0(@types/node@24.10.2)(ts-node@10.9.2(@swc/core@1.15.3)(@types/node@24.10.2)(typescript@5.9.3))
pino-pretty:
specifier: ^13.1.3
version: 13.1.3
prisma:
specifier: ^7.1.0
version: 7.1.0(@types/react@19.2.7)(react-dom@19.2.1(react@19.2.1))(react@19.2.1)(typescript@5.9.3)
@ -1083,18 +1084,6 @@ packages:
'@ioredis/commands@1.4.0':
resolution: {integrity: sha512-aFT2yemJJo+TZCmieA7qnYGQooOS7QfNmYrzGtsYd3g9j5iDP8AimYYAesf79ohjbLG12XxC4nG5DyEnC88AsQ==}
'@isaacs/balanced-match@4.0.1':
resolution: {integrity: sha512-yzMTt9lEb8Gv7zRioUilSglI0c0smZ9k5D65677DLWLtWJaXIS3CqcGyUFByYKlnUj6TkjLVs54fBl6+TiGQDQ==}
engines: {node: 20 || >=22}
'@isaacs/brace-expansion@5.0.0':
resolution: {integrity: sha512-ZT55BDLV0yv0RBm2czMiZ+SqCGO7AvmOM3G/w2xhVPH+te0aKgFjmBvGlL1dH+ql2tgGO3MVrbb3jCKyvpgnxA==}
engines: {node: 20 || >=22}
'@isaacs/cliui@8.0.2':
resolution: {integrity: sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==}
engines: {node: '>=12'}
'@istanbuljs/load-nyc-config@1.1.0':
resolution: {integrity: sha512-VjeHSlIzpv/NyD3N0YuHfXOPDIixcA1q2ZV98wsMqcYlPmv2n3Yb2lYP9XMElnaFVXg5A7YLTeLu6V84uQDjmQ==}
engines: {node: '>=8'}
@ -1583,10 +1572,6 @@ packages:
'@pinojs/redact@0.4.0':
resolution: {integrity: sha512-k2ENnmBugE/rzQfEcdWHcCY+/FM3VLzH9cYEsbdsoqrvzAKRhUZeRNhAZvB8OitQJ1TBed3yqWtdjzS6wJKBwg==}
'@pkgjs/parseargs@0.11.0':
resolution: {integrity: sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==}
engines: {node: '>=14'}
'@pkgr/core@0.2.9':
resolution: {integrity: sha512-QNqXyfVS2wm9hweSYD2O7F0G06uurj9kZ96TRQE5Y9hU7+tgdZwIkbAKc5Ocy1HxEY2kuDQa6cQ1WRs/O5LFKA==}
engines: {node: ^12.20.0 || ^14.18.0 || >=16.0.0}
@ -2406,10 +2391,6 @@ packages:
resolution: {integrity: sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==}
engines: {node: '>=8'}
ansi-regex@6.2.2:
resolution: {integrity: sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==}
engines: {node: '>=12'}
ansi-styles@4.3.0:
resolution: {integrity: sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==}
engines: {node: '>=8'}
@ -2418,10 +2399,6 @@ packages:
resolution: {integrity: sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==}
engines: {node: '>=10'}
ansi-styles@6.2.3:
resolution: {integrity: sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg==}
engines: {node: '>=12'}
ansis@4.2.0:
resolution: {integrity: sha512-HqZ5rWlFjGiV0tDm3UxxgNRqsOTniqoKZu0pIAfh7TZQMGuZK+hH0drySty0si0QXj1ieop4+SkSfPZBPPkHig==}
engines: {node: '>=14'}
@ -3075,9 +3052,6 @@ packages:
duplexer@0.1.2:
resolution: {integrity: sha512-jtD6YG370ZCIi/9GTaJKQxWTZD045+4R4hTk/x1UyoqadyJ9x9CgSi1RlVDQF8U2sxLLSnFkCaMihqljHIWgMg==}
eastasianwidth@0.2.0:
resolution: {integrity: sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==}
ecdsa-sig-formatter@1.0.11:
resolution: {integrity: sha512-nagl3RYrbNv6kQkeJIpt6NJZy8twLB/2vtz6yN9Z4vRKHN4/QZJIEbqohALSgwKdnksuY3k5Addp5lg8sVoVcQ==}
@ -3599,16 +3573,9 @@ packages:
glob-to-regexp@0.4.1:
resolution: {integrity: sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw==}
glob@10.5.0:
resolution: {integrity: sha512-DfXN8DfhJ7NH3Oe7cFmu3NCu1wKbkReJ8TorzSAFbSKrlNaQSKfIzqYqVY8zlbs2NLBbWpRiU52GX2PbaBVNkg==}
hasBin: true
glob@13.0.0:
resolution: {integrity: sha512-tvZgpqk6fz4BaNZ66ZsRaZnbHvP/jG3uKJvAZOwEVUL4RTA5nJeeLYfyN9/VA8NX/V3IBG+hkeuGpKjvELkVhA==}
engines: {node: 20 || >=22}
glob@7.2.3:
resolution: {integrity: sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==}
glob@8.1.0:
resolution: {integrity: sha512-r8hpEjiQEYlF2QU0df3dS+nxxSIreXQS1qRhMJM0Q5NDdR386C7jb7Hwwod8Fgiuex+k0GFjgft18yvxm5XoCQ==}
engines: {node: '>=12'}
deprecated: Glob versions prior to v9 are no longer supported
globals@14.0.0:
@ -3972,9 +3939,6 @@ packages:
resolution: {integrity: sha512-H0dkQoCa3b2VEeKQBOxFph+JAbcrQdE7KC0UkqwpLmv2EC4P41QXP+rqo9wYodACiG5/WM5s9oDApTU8utwj9g==}
engines: {node: '>= 0.4'}
jackspeak@3.4.3:
resolution: {integrity: sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw==}
jest-changed-files@30.2.0:
resolution: {integrity: sha512-L8lR1ChrRnSdfeOvTrwZMlnWV8G/LLjQ0nG9MBclwWZidA2N5FviRki0Bvh20WRMOX31/JYvzdqTJrk5oBdydQ==}
engines: {node: ^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0}
@ -4348,13 +4312,6 @@ packages:
resolution: {integrity: sha512-ozCC6gdQ+glXOQsveKD0YsDy8DSQFjDTz4zyzEHNV5+JP5D62LmfDZ6o1cycFx9ouG940M5dE8C8CTewdj2YWQ==}
engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0}
lru-cache@10.4.3:
resolution: {integrity: sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==}
lru-cache@11.2.4:
resolution: {integrity: sha512-B5Y16Jr9LB9dHVkh6ZevG+vAbOsNOYCX+sXvFWFu7B3Iz5mijW3zdbMyhsh8ANd2mSWBYdJgnqi+mL7/LrOPYg==}
engines: {node: 20 || >=22}
lru-cache@5.1.1:
resolution: {integrity: sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==}
@ -4450,13 +4407,13 @@ packages:
resolution: {integrity: sha512-e5ISH9xMYU0DzrT+jl8q2ze9D6eWBto+I8CNpe+VI+K2J/F/k3PdkdTdz4wvGVH4NTpo+NRYTVIuMQEMMcsLqg==}
engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0}
minimatch@10.1.1:
resolution: {integrity: sha512-enIvLvRAFZYXJzkCYG5RKmPfrFArdLv+R+lbQ53BmIMLIry74bjKzX6iHAm8WYamJkhSSEabrWN5D97XnKObjQ==}
engines: {node: 20 || >=22}
minimatch@3.1.2:
resolution: {integrity: sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==}
minimatch@5.1.6:
resolution: {integrity: sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==}
engines: {node: '>=10'}
minimatch@9.0.5:
resolution: {integrity: sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==}
engines: {node: '>=16 || 14 >=14.17'}
@ -4464,10 +4421,6 @@ packages:
minimist@1.2.8:
resolution: {integrity: sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==}
minipass@7.1.2:
resolution: {integrity: sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==}
engines: {node: '>=16 || 14 >=14.17'}
mkdirp@0.5.6:
resolution: {integrity: sha512-FP+p8RB8OWpF3YZBCrP5gtADmtXApB5AMLn+vdyA+PyxCjrCs00mjyUozssO33cwDeT3wNGdLxJ5M//YqtHAJw==}
hasBin: true
@ -4731,9 +4684,6 @@ packages:
resolution: {integrity: sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==}
engines: {node: '>=6'}
package-json-from-dist@1.0.1:
resolution: {integrity: sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw==}
parent-module@1.0.1:
resolution: {integrity: sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==}
engines: {node: '>=6'}
@ -4765,10 +4715,6 @@ packages:
resolution: {integrity: sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==}
engines: {node: '>=8'}
path-is-absolute@1.0.1:
resolution: {integrity: sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==}
engines: {node: '>=0.10.0'}
path-key@3.1.1:
resolution: {integrity: sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==}
engines: {node: '>=8'}
@ -4776,14 +4722,6 @@ packages:
path-parse@1.0.7:
resolution: {integrity: sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==}
path-scurry@1.11.1:
resolution: {integrity: sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==}
engines: {node: '>=16 || 14 >=14.18'}
path-scurry@2.0.1:
resolution: {integrity: sha512-oWyT4gICAu+kaA7QWk/jvCHWarMKNs6pXOGWKDTr7cw4IGcUbW+PeTfbaQiLGheFRpjo6O9J0PmyMfQPjH71oA==}
engines: {node: 20 || >=22}
path-to-regexp@8.2.0:
resolution: {integrity: sha512-TdrF7fW9Rphjq4RjrW0Kp2AW0Ahwu9sRGTkS6bvDi0SCwZlEZYmcfDbEsTz8RVk0EHIS/Vd1bv3JhG+1xZuAyQ==}
engines: {node: '>=16'}
@ -5380,10 +5318,6 @@ packages:
resolution: {integrity: sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==}
engines: {node: '>=8'}
string-width@5.1.2:
resolution: {integrity: sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==}
engines: {node: '>=12'}
string.prototype.includes@2.0.1:
resolution: {integrity: sha512-o7+c9bW6zpAdJHTtujeePODAhkuicdAryFsfVKwA+wGw89wJ4GTY484WTucM9hLtDEOpOvI+aHnzqnC5lHp4Rg==}
engines: {node: '>= 0.4'}
@ -5414,10 +5348,6 @@ packages:
resolution: {integrity: sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==}
engines: {node: '>=8'}
strip-ansi@7.1.2:
resolution: {integrity: sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==}
engines: {node: '>=12'}
strip-bom@3.0.0:
resolution: {integrity: sha512-vavAMRXOgBVNF6nyEEmL3DBK19iRpDcoIwW+swQ+CbGiu7lju6t+JklA1MHweoWtadgt4ISVUsXLyDq34ddcwA==}
engines: {node: '>=4'}
@ -5890,10 +5820,6 @@ packages:
resolution: {integrity: sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==}
engines: {node: '>=10'}
wrap-ansi@8.1.0:
resolution: {integrity: sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==}
engines: {node: '>=12'}
wrappy@1.0.2:
resolution: {integrity: sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==}
@ -6686,21 +6612,6 @@ snapshots:
'@ioredis/commands@1.4.0': {}
'@isaacs/balanced-match@4.0.1': {}
'@isaacs/brace-expansion@5.0.0':
dependencies:
'@isaacs/balanced-match': 4.0.1
'@isaacs/cliui@8.0.2':
dependencies:
string-width: 5.1.2
string-width-cjs: string-width@4.2.3
strip-ansi: 7.1.2
strip-ansi-cjs: strip-ansi@6.0.1
wrap-ansi: 8.1.0
wrap-ansi-cjs: wrap-ansi@7.0.0
'@istanbuljs/load-nyc-config@1.1.0':
dependencies:
camelcase: 5.3.1
@ -6813,7 +6724,7 @@ snapshots:
chalk: 4.1.2
collect-v8-coverage: 1.0.3
exit-x: 0.2.2
glob: 10.5.0
glob: 8.1.0
graceful-fs: 4.2.11
istanbul-lib-coverage: 3.2.2
istanbul-lib-instrument: 6.0.3
@ -7055,7 +6966,7 @@ snapshots:
cli-table3: 0.6.5
commander: 4.1.1
fork-ts-checker-webpack-plugin: 9.1.0(typescript@5.9.3)(webpack@5.103.0(@swc/core@1.15.3))
glob: 13.0.0
glob: 8.1.0
node-emoji: 1.11.0
ora: 5.4.1
tsconfig-paths: 4.2.0
@ -7245,9 +7156,6 @@ snapshots:
'@pinojs/redact@0.4.0': {}
'@pkgjs/parseargs@0.11.0':
optional: true
'@pkgr/core@0.2.9': {}
'@polka/url@1.0.0-next.29': {}
@ -8191,16 +8099,12 @@ snapshots:
ansi-regex@5.0.1: {}
ansi-regex@6.2.2: {}
ansi-styles@4.3.0:
dependencies:
color-convert: 2.0.1
ansi-styles@5.2.0: {}
ansi-styles@6.2.3: {}
ansis@4.2.0: {}
anymatch@3.1.3:
@ -8880,8 +8784,6 @@ snapshots:
duplexer@0.1.2: {}
eastasianwidth@0.2.0: {}
ecdsa-sig-formatter@1.0.11:
dependencies:
safe-buffer: 5.2.1
@ -9661,29 +9563,13 @@ snapshots:
glob-to-regexp@0.4.1: {}
glob@10.5.0:
dependencies:
foreground-child: 3.3.1
jackspeak: 3.4.3
minimatch: 9.0.5
minipass: 7.1.2
package-json-from-dist: 1.0.1
path-scurry: 1.11.1
glob@13.0.0:
dependencies:
minimatch: 10.1.1
minipass: 7.1.2
path-scurry: 2.0.1
glob@7.2.3:
glob@8.1.0:
dependencies:
fs.realpath: 1.0.0
inflight: 1.0.6
inherits: 2.0.4
minimatch: 3.1.2
minimatch: 5.1.6
once: 1.4.0
path-is-absolute: 1.0.1
globals@14.0.0: {}
@ -10072,12 +9958,6 @@ snapshots:
has-symbols: 1.1.0
set-function-name: 2.0.2
jackspeak@3.4.3:
dependencies:
'@isaacs/cliui': 8.0.2
optionalDependencies:
'@pkgjs/parseargs': 0.11.0
jest-changed-files@30.2.0:
dependencies:
execa: 5.1.1
@ -10140,7 +10020,7 @@ snapshots:
chalk: 4.1.2
ci-info: 4.3.1
deepmerge: 4.3.1
glob: 10.5.0
glob: 8.1.0
graceful-fs: 4.2.11
jest-circus: 30.2.0
jest-docblock: 30.2.0
@ -10300,7 +10180,7 @@ snapshots:
chalk: 4.1.2
cjs-module-lexer: 2.1.1
collect-v8-coverage: 1.0.3
glob: 10.5.0
glob: 8.1.0
graceful-fs: 4.2.11
jest-haste-map: 30.2.0
jest-message-util: 30.2.0
@ -10614,10 +10494,6 @@ snapshots:
lowercase-keys@3.0.0:
optional: true
lru-cache@10.4.3: {}
lru-cache@11.2.4: {}
lru-cache@5.1.1:
dependencies:
yallist: 3.1.1
@ -10689,22 +10565,20 @@ snapshots:
mimic-response@4.0.0:
optional: true
minimatch@10.1.1:
dependencies:
'@isaacs/brace-expansion': 5.0.0
minimatch@3.1.2:
dependencies:
brace-expansion: 1.1.12
minimatch@5.1.6:
dependencies:
brace-expansion: 2.0.2
minimatch@9.0.5:
dependencies:
brace-expansion: 2.0.2
minimist@1.2.8: {}
minipass@7.1.2: {}
mkdirp@0.5.6:
dependencies:
minimist: 1.2.8
@ -10980,8 +10854,6 @@ snapshots:
p-try@2.2.0: {}
package-json-from-dist@1.0.1: {}
parent-module@1.0.1:
dependencies:
callsites: 3.1.0
@ -11014,22 +10886,10 @@ snapshots:
path-exists@4.0.0: {}
path-is-absolute@1.0.1: {}
path-key@3.1.1: {}
path-parse@1.0.7: {}
path-scurry@1.11.1:
dependencies:
lru-cache: 10.4.3
minipass: 7.1.2
path-scurry@2.0.1:
dependencies:
lru-cache: 11.2.4
minipass: 7.1.2
path-to-regexp@8.2.0:
optional: true
@ -11721,12 +11581,6 @@ snapshots:
is-fullwidth-code-point: 3.0.0
strip-ansi: 6.0.1
string-width@5.1.2:
dependencies:
eastasianwidth: 0.2.0
emoji-regex: 9.2.2
strip-ansi: 7.1.2
string.prototype.includes@2.0.1:
dependencies:
call-bind: 1.0.8
@ -11785,10 +11639,6 @@ snapshots:
dependencies:
ansi-regex: 5.0.1
strip-ansi@7.1.2:
dependencies:
ansi-regex: 6.2.2
strip-bom@3.0.0: {}
strip-bom@4.0.0: {}
@ -11895,7 +11745,7 @@ snapshots:
test-exclude@6.0.0:
dependencies:
'@istanbuljs/schema': 0.1.3
glob: 7.2.3
glob: 8.1.0
minimatch: 3.1.2
text-decoder@1.2.3:
@ -12362,12 +12212,6 @@ snapshots:
string-width: 4.2.3
strip-ansi: 6.0.1
wrap-ansi@8.1.0:
dependencies:
ansi-styles: 6.2.3
string-width: 5.1.2
strip-ansi: 7.1.2
wrappy@1.0.2: {}
write-file-atomic@5.0.1:

View File

@ -2,4 +2,13 @@ packages:
- apps/*
- packages/*
onlyBuiltDependencies: '["@swc/core"]'
onlyBuiltDependencies:
- "@swc/core"
- "esbuild"
- "bcrypt"
- "ssh2"
- "cpu-features"
- "prisma"
- "@prisma/engines"
- "@prisma/client"
- "unrs-resolver"

View File

@ -1,54 +1,307 @@
#!/bin/bash
# =============================================================================
# 🐳 Plesk Docker Deployment Script
# Updated for organized Docker structure
# =============================================================================
# Deploys pre-built Docker images to a Plesk server
# For building images locally, use: pnpm plesk:images
# =============================================================================
set -e
set -euo pipefail
# =============================================================================
# Configuration
REPO_PATH="/var/www/vhosts/yourdomain.com/git/customer-portal"
# =============================================================================
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
# Colors
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
RED='\033[0;31m'
NC='\033[0m'
# Default paths (override via env vars)
REPO_PATH="${REPO_PATH:-/var/www/vhosts/yourdomain.com/git/customer-portal}"
COMPOSE_FILE="${COMPOSE_FILE:-$PROJECT_ROOT/docker/portainer/docker-compose.yml}"
ENV_FILE="${ENV_FILE:-$PROJECT_ROOT/.env}"
log() { echo -e "${GREEN}[PLESK] $1${NC}"; }
warn() { echo -e "${YELLOW}[PLESK] WARNING: $1${NC}"; }
error() { echo -e "${RED}[PLESK] ERROR: $1${NC}"; exit 1; }
# Image settings
IMAGE_FRONTEND="${IMAGE_FRONTEND:-portal-frontend}"
IMAGE_BACKEND="${IMAGE_BACKEND:-portal-backend}"
IMAGE_TAG="${IMAGE_TAG:-latest}"
# Navigate to repository
cd "$REPO_PATH"
# =============================================================================
# Colors and Logging
# =============================================================================
if [[ -t 1 ]]; then
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
RED='\033[0;31m'
BLUE='\033[0;34m'
NC='\033[0m'
else
GREEN='' YELLOW='' RED='' BLUE='' NC=''
fi
log "🚀 Starting Plesk Docker deployment..."
log() { echo -e "${GREEN}[PLESK]${NC} $*"; }
warn() { echo -e "${YELLOW}[PLESK]${NC} WARNING: $*"; }
error() { echo -e "${RED}[PLESK]${NC} ERROR: $*"; exit 1; }
info() { echo -e "${BLUE}[PLESK]${NC} $*"; }
# Check if Docker is available
if ! command -v docker &> /dev/null; then
# =============================================================================
# Usage
# =============================================================================
usage() {
cat <<EOF
Deploy Docker images to Plesk server.
Usage: $0 [COMMAND] [OPTIONS]
Commands:
deploy Full deployment (load images, run migrations, start services)
start Start services
stop Stop services
restart Restart services
status Show service status
logs Show service logs
load Load Docker images from tarballs
help Show this help
Options:
--env-file <path> Path to environment file (default: .env)
--compose <path> Path to docker-compose file
--tag <tag> Image tag to use (default: latest)
Examples:
$0 deploy # Full deployment
$0 load # Load images from tarballs
$0 status # Check service status
$0 logs backend # Show backend logs
EOF
exit 0
}
# =============================================================================
# Pre-flight Checks
# =============================================================================
preflight_checks() {
log "🔍 Running pre-flight checks..."
# Check Docker
if ! command -v docker &> /dev/null; then
error "Docker is not installed. Please install Docker first."
fi
fi
# Check Docker daemon
if ! docker info >/dev/null 2>&1; then
error "Docker daemon is not running."
fi
# Check docker compose
if ! docker compose version >/dev/null 2>&1; then
error "Docker Compose V2 is required. Please upgrade Docker."
fi
# Check environment file
if [[ ! -f "$ENV_FILE" ]]; then
if [[ -f "$PROJECT_ROOT/.env.production.example" ]]; then
log "Creating environment file from template..."
cp "$PROJECT_ROOT/.env.production.example" "$ENV_FILE"
warn "Please edit $ENV_FILE with your production values!"
error "Production environment not configured. Please set up .env"
else
error "Environment file not found: $ENV_FILE"
fi
fi
# Check compose file
if [[ ! -f "$COMPOSE_FILE" ]]; then
error "Docker Compose file not found: $COMPOSE_FILE"
fi
log "✅ Pre-flight checks passed"
}
if ! command -v docker-compose &> /dev/null; then
error "Docker Compose is not installed. Please install Docker Compose."
fi
# =============================================================================
# Load Images from Tarballs
# =============================================================================
load_images() {
log "📦 Loading Docker images..."
local search_dir="${1:-$PROJECT_ROOT}"
local loaded=0
for img in "$IMAGE_FRONTEND" "$IMAGE_BACKEND"; do
local tarball
# Try compressed first, then uncompressed
if [[ -f "$search_dir/${img}.latest.tar.gz" ]]; then
tarball="$search_dir/${img}.latest.tar.gz"
log "Loading $tarball..."
gunzip -c "$tarball" | docker load
loaded=$((loaded + 1))
elif [[ -f "$search_dir/${img}.${IMAGE_TAG}.tar.gz" ]]; then
tarball="$search_dir/${img}.${IMAGE_TAG}.tar.gz"
log "Loading $tarball..."
gunzip -c "$tarball" | docker load
loaded=$((loaded + 1))
elif [[ -f "$search_dir/${img}.latest.tar" ]]; then
tarball="$search_dir/${img}.latest.tar"
log "Loading $tarball..."
docker load -i "$tarball"
loaded=$((loaded + 1))
elif [[ -f "$search_dir/${img}.${IMAGE_TAG}.tar" ]]; then
tarball="$search_dir/${img}.${IMAGE_TAG}.tar"
log "Loading $tarball..."
docker load -i "$tarball"
loaded=$((loaded + 1))
else
warn "No tarball found for $img"
fi
done
if [[ $loaded -eq 0 ]]; then
error "No image tarballs found in $search_dir"
fi
log "✅ Loaded $loaded images"
}
# Check if production environment exists
ENV_FILE=".env"
if [ ! -f "$ENV_FILE" ]; then
log "Creating environment file from template..."
cp .env.production.example .env
warn "Please edit .env with your actual production values!"
error "Production environment not configured. Please set up .env"
fi
# =============================================================================
# Docker Compose Helpers
# =============================================================================
dc() {
docker compose -f "$COMPOSE_FILE" --env-file "$ENV_FILE" "$@"
}
# Use the organized production management script
log "Running production deployment script..."
./scripts/prod/manage.sh deploy
# =============================================================================
# Deployment
# =============================================================================
deploy() {
log "🚀 Starting Plesk Docker deployment..."
preflight_checks
# Check if images exist, if not try to load from tarballs
if ! docker image inspect "${IMAGE_FRONTEND}:latest" >/dev/null 2>&1 || \
! docker image inspect "${IMAGE_BACKEND}:latest" >/dev/null 2>&1; then
log "Images not found, attempting to load from tarballs..."
load_images "$PROJECT_ROOT" || true
fi
# Verify images exist
for img in "${IMAGE_FRONTEND}:latest" "${IMAGE_BACKEND}:latest"; do
if ! docker image inspect "$img" >/dev/null 2>&1; then
error "Required image not found: $img. Build images first with: pnpm plesk:images"
fi
done
# Start infrastructure first
log "🗄️ Starting database and cache..."
dc up -d database cache
# Wait for database
log "⏳ Waiting for database..."
local timeout=60
while [[ $timeout -gt 0 ]]; do
if dc exec -T database pg_isready -U portal -d portal_prod 2>/dev/null; then
log "✅ Database is ready"
break
fi
sleep 2
timeout=$((timeout - 2))
done
if [[ $timeout -eq 0 ]]; then
error "Database failed to start within 60 seconds"
fi
# Start application services
log "🚀 Starting application services..."
dc up -d frontend backend
# Health check
log "🏥 Waiting for services to be healthy..."
sleep 15
if dc ps | grep -q "unhealthy"; then
warn "Some services may not be healthy - check logs with: $0 logs"
else
log "✅ All services healthy"
fi
log "🎉 Plesk Docker deployment completed!"
echo ""
info "📝 Next steps:"
echo " 1. Configure Plesk reverse proxy to point to port 3000 (frontend)"
echo " 2. Set up SSL certificates in Plesk"
echo " 3. Test your application at your domain"
echo ""
info "📋 Useful commands:"
echo " $0 status - Check service status"
echo " $0 logs - View logs"
echo " $0 restart - Restart services"
}
log "🎉 Plesk Docker deployment completed!"
log "📝 Don't forget to:"
echo "1. Configure Plesk reverse proxy to point to port 3000"
echo "2. Set up SSL certificates in Plesk"
echo "3. Test your application at your domain"
# =============================================================================
# Service Management
# =============================================================================
start_services() {
preflight_checks
log "▶️ Starting services..."
dc up -d
log "✅ Services started"
}
stop_services() {
log "⏹️ Stopping services..."
dc down
log "✅ Services stopped"
}
restart_services() {
log "🔄 Restarting services..."
dc restart
log "✅ Services restarted"
}
show_status() {
log "📊 Service Status:"
dc ps
echo ""
log "🏥 Health Status:"
dc ps --format "table {{.Name}}\t{{.Status}}\t{{.Ports}}"
}
show_logs() {
local service="${1:-}"
if [[ -n "$service" ]]; then
dc logs -f "$service"
else
dc logs -f
fi
}
# =============================================================================
# Argument Parsing
# =============================================================================
COMMAND="${1:-help}"
shift || true
while [[ $# -gt 0 ]]; do
case "$1" in
--env-file) ENV_FILE="$2"; shift 2 ;;
--compose) COMPOSE_FILE="$2"; shift 2 ;;
--tag) IMAGE_TAG="$2"; shift 2 ;;
-h|--help) usage ;;
*) break ;;
esac
done
# =============================================================================
# Main
# =============================================================================
case "$COMMAND" in
deploy) deploy ;;
start) start_services ;;
stop) stop_services ;;
restart) restart_services ;;
status) show_status ;;
logs) show_logs "$@" ;;
load) preflight_checks; load_images "${1:-$PROJECT_ROOT}" ;;
help|*) usage ;;
esac

View File

@ -1,13 +1,23 @@
#!/usr/bin/env bash
# 🐳 Build production Docker images for Plesk deployment
# Features: Parallel builds, BuildKit, compressed tarballs, multi-platform support
# =============================================================================
# 🐳 Build Production Docker Images for Plesk Deployment
# =============================================================================
# Features:
# - Parallel builds with BuildKit
# - Multi-platform support (amd64/arm64)
# - Compressed tarballs with SHA256 checksums
# - Buildx builder for cross-platform builds
# - Intelligent layer caching
# =============================================================================
set -Eeuo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
# =============================================================================
# Configuration (override via env vars or flags)
# =============================================================================
IMAGE_FRONTEND="${IMAGE_FRONTEND_NAME:-portal-frontend}"
IMAGE_BACKEND="${IMAGE_BACKEND_NAME:-portal-backend}"
IMAGE_TAG="${IMAGE_TAG:-}"
@ -15,22 +25,36 @@ OUTPUT_DIR="${OUTPUT_DIR:-$PROJECT_ROOT}"
PUSH_REMOTE="${PUSH_REMOTE:-}"
PARALLEL="${PARALLEL_BUILD:-1}"
COMPRESS="${COMPRESS:-1}"
USE_LATEST_FILENAME="${USE_LATEST_FILENAME:-1}" # Default: save as .latest.tar.gz
USE_LATEST_FILENAME="${USE_LATEST_FILENAME:-1}"
SAVE_TARS=1
PLATFORM="${PLATFORM:-linux/amd64}" # Override for ARM: linux/arm64
PROGRESS="${PROGRESS:-auto}" # "plain" for CI, "auto" for interactive
PLATFORM="${PLATFORM:-linux/amd64}"
PROGRESS="${PROGRESS:-auto}"
USE_BUILDX="${USE_BUILDX:-0}"
CLEAN_CACHE="${CLEAN_CACHE:-0}"
DRY_RUN="${DRY_RUN:-0}"
# =============================================================================
# Colors and Logging
# =============================================================================
if [[ -t 1 ]]; then
G='\033[0;32m' Y='\033[1;33m' R='\033[0;31m' B='\033[0;34m' C='\033[0;36m' M='\033[0;35m' N='\033[0m'
else
G='' Y='' R='' B='' C='' M='' N=''
fi
# Colors
G='\033[0;32m' Y='\033[1;33m' R='\033[0;31m' B='\033[0;34m' C='\033[0;36m' N='\033[0m'
log() { echo -e "${G}[BUILD]${N} $*"; }
info() { echo -e "${B}[INFO]${N} $*"; }
warn() { echo -e "${Y}[WARN]${N} $*"; }
fail() { echo -e "${R}[ERROR]${N} $*"; exit 1; }
step() { echo -e "${C}[STEP]${N} $*"; }
debug() { [[ "${DEBUG:-0}" -eq 1 ]] && echo -e "${M}[DEBUG]${N} $*" || true; }
# =============================================================================
# Usage
# =============================================================================
usage() {
cat <<EOF
Build Docker images and save tarballs for Plesk.
Build Docker images and save tarballs for Plesk deployment.
Usage: $0 [OPTIONS]
@ -42,22 +66,40 @@ Options:
--no-compress Save as .tar instead of .tar.gz
--versioned Name files with version tag (default: .latest.tar.gz)
--sequential Build one at a time (default: parallel)
--platform <p> Target platform (default: linux/amd64, use linux/arm64 for ARM)
--platform <p> Target platform (default: linux/amd64)
--buildx Use Docker Buildx for builds (better caching)
--clean-cache Clean Docker build cache before building
--dry-run Show what would be done without executing
--ci CI mode: plain progress output, no colors
--debug Enable debug output
-h, --help Show this help
Platform Options:
linux/amd64 Standard x86_64 servers (default)
linux/arm64 ARM64 servers (Apple Silicon, Graviton)
Examples:
$0 # Output: portal-frontend.latest.tar.gz (default)
$0 # Output: portal-frontend.latest.tar.gz
$0 --versioned # Output: portal-frontend.20251201-abc123.tar.gz
$0 --tag v1.2.3 --versioned # Output: portal-frontend.v1.2.3.tar.gz
$0 --sequential --no-save # Debug build
$0 --platform linux/arm64 # Build for ARM64 (Apple Silicon)
$0 --platform linux/arm64 # Build for ARM64
$0 --buildx --clean-cache # Fresh buildx build
$0 --ci # CI-friendly output
Environment Variables:
IMAGE_FRONTEND_NAME Override frontend image name (default: portal-frontend)
IMAGE_BACKEND_NAME Override backend image name (default: portal-backend)
PNPM_VERSION Override PNPM version (default: from package.json)
NEXT_PUBLIC_API_BASE Next.js API base path (default: /api)
DEBUG=1 Enable debug output
EOF
exit 0
}
# Parse arguments
# =============================================================================
# Argument Parsing
# =============================================================================
while [[ $# -gt 0 ]]; do
case "$1" in
--tag) IMAGE_TAG="${2:-}"; shift 2 ;;
@ -68,46 +110,117 @@ while [[ $# -gt 0 ]]; do
--versioned) USE_LATEST_FILENAME=0; shift ;;
--sequential) PARALLEL=0; shift ;;
--platform) PLATFORM="${2:-linux/amd64}"; shift 2 ;;
--ci) PROGRESS="plain"; G=''; Y=''; R=''; B=''; C=''; N=''; shift ;;
--buildx) USE_BUILDX=1; shift ;;
--clean-cache) CLEAN_CACHE=1; shift ;;
--dry-run) DRY_RUN=1; shift ;;
--ci) PROGRESS="plain"; G=''; Y=''; R=''; B=''; C=''; M=''; N=''; shift ;;
--debug) DEBUG=1; shift ;;
-h|--help) usage ;;
*) fail "Unknown option: $1" ;;
esac
done
# =============================================================================
# Validation
command -v docker >/dev/null 2>&1 || fail "Docker required"
# =============================================================================
command -v docker >/dev/null 2>&1 || fail "Docker is required but not installed"
cd "$PROJECT_ROOT"
[[ -f apps/portal/Dockerfile ]] || fail "Missing apps/portal/Dockerfile"
[[ -f apps/bff/Dockerfile ]] || fail "Missing apps/bff/Dockerfile"
[[ -f package.json ]] || fail "Missing package.json"
# Verify Docker daemon is running
docker info >/dev/null 2>&1 || fail "Docker daemon is not running"
# =============================================================================
# Setup
# =============================================================================
# Auto-generate tag if not provided
[[ -z "$IMAGE_TAG" ]] && IMAGE_TAG="$(date +%Y%m%d)-$(git rev-parse --short HEAD 2>/dev/null || echo 'local')"
if [[ -z "$IMAGE_TAG" ]]; then
GIT_SHA=$(git rev-parse --short HEAD 2>/dev/null || echo 'local')
IMAGE_TAG="$(date +%Y%m%d)-${GIT_SHA}"
fi
# Enable BuildKit
export DOCKER_BUILDKIT=1
# Extract PNPM version from package.json (packageManager field)
# Format: "pnpm@10.25.0+sha512..."
PNPM_VERSION_FROM_PKG=$(grep -oP '"packageManager":\s*"pnpm@\K[0-9.]+' package.json 2>/dev/null || echo "")
PNPM_VERSION="${PNPM_VERSION:-${PNPM_VERSION_FROM_PKG:-10.25.0}}"
# Build args (can be overridden via env vars)
# Build args
NEXT_PUBLIC_API_BASE="${NEXT_PUBLIC_API_BASE:-/api}"
NEXT_PUBLIC_APP_NAME="${NEXT_PUBLIC_APP_NAME:-Customer Portal}"
GIT_SOURCE="$(git config --get remote.origin.url 2>/dev/null || echo unknown)"
GIT_COMMIT="$(git rev-parse HEAD 2>/dev/null || echo unknown)"
BUILD_DATE="$(date -u +%Y-%m-%dT%H:%M:%SZ)"
log "🏷️ Tag: ${IMAGE_TAG}"
info "📦 PNPM: ${PNPM_VERSION} | Platform: ${PLATFORM}"
# Log directory
LOG_DIR="${OUTPUT_DIR}/.build-logs"
mkdir -p "$LOG_DIR"
# =============================================================================
# Buildx Setup
# =============================================================================
BUILDER_NAME="portal-builder"
setup_buildx() {
if [[ "$USE_BUILDX" -eq 1 ]]; then
step "Setting up Docker Buildx..."
# Check if buildx is available
if ! docker buildx version >/dev/null 2>&1; then
warn "Docker Buildx not available, falling back to standard build"
USE_BUILDX=0
return
fi
# Create or use existing builder
if ! docker buildx inspect "$BUILDER_NAME" >/dev/null 2>&1; then
docker buildx create --name "$BUILDER_NAME" --driver docker-container --bootstrap
info "Created buildx builder: $BUILDER_NAME"
else
docker buildx use "$BUILDER_NAME"
debug "Using existing buildx builder: $BUILDER_NAME"
fi
fi
}
# =============================================================================
# Clean Cache
# =============================================================================
clean_cache() {
if [[ "$CLEAN_CACHE" -eq 1 ]]; then
step "Cleaning Docker build cache..."
docker builder prune -f --filter type=exec.cachemount 2>/dev/null || true
docker builder prune -f --filter unused-for=24h 2>/dev/null || true
log "✅ Build cache cleaned"
fi
}
# =============================================================================
# Build Functions
# =============================================================================
# Build functions moved to build_frontend and build_backend for better argument handling
build_frontend() {
local logfile="$LOG_DIR/frontend.log"
step "Building frontend image..."
docker build -f apps/portal/Dockerfile \
if [[ "$DRY_RUN" -eq 1 ]]; then
info "[DRY-RUN] Would build frontend"
return 0
fi
local exit_code=0
docker build \
--load \
-f apps/portal/Dockerfile \
--platform "${PLATFORM}" \
--progress "${PROGRESS}" \
--build-arg "PNPM_VERSION=${PNPM_VERSION}" \
@ -120,22 +233,35 @@ build_frontend() {
--label "org.opencontainers.image.source=${GIT_SOURCE}" \
--label "org.opencontainers.image.revision=${GIT_COMMIT}" \
--label "org.opencontainers.image.created=${BUILD_DATE}" \
. > "$logfile" 2>&1
local exit_code=$?
. > "$logfile" 2>&1 || exit_code=$?
if [[ $exit_code -eq 0 ]]; then
local size=$(docker image inspect "${IMAGE_FRONTEND}:latest" --format='{{.Size}}' 2>/dev/null | numfmt --to=iec 2>/dev/null || echo "?")
local size
size=$(docker image inspect "${IMAGE_FRONTEND}:latest" --format='{{.Size}}' 2>/dev/null | numfmt --to=iec 2>/dev/null || echo "?")
log "✅ Frontend built (${size})"
return 0
else
warn "❌ Frontend FAILED - see $logfile"
tail -30 "$logfile"
tail -50 "$logfile" || true
return 1
fi
return $exit_code
}
build_backend() {
local logfile="$LOG_DIR/backend.log"
step "Building backend image..."
docker build -f apps/bff/Dockerfile \
if [[ "$DRY_RUN" -eq 1 ]]; then
info "[DRY-RUN] Would build backend"
return 0
fi
local exit_code=0
docker build \
--load \
-f apps/bff/Dockerfile \
--platform "${PLATFORM}" \
--progress "${PROGRESS}" \
--build-arg "PNPM_VERSION=${PNPM_VERSION}" \
@ -145,106 +271,91 @@ build_backend() {
--label "org.opencontainers.image.source=${GIT_SOURCE}" \
--label "org.opencontainers.image.revision=${GIT_COMMIT}" \
--label "org.opencontainers.image.created=${BUILD_DATE}" \
. > "$logfile" 2>&1
local exit_code=$?
. > "$logfile" 2>&1 || exit_code=$?
if [[ $exit_code -eq 0 ]]; then
local size=$(docker image inspect "${IMAGE_BACKEND}:latest" --format='{{.Size}}' 2>/dev/null | numfmt --to=iec 2>/dev/null || echo "?")
local size
size=$(docker image inspect "${IMAGE_BACKEND}:latest" --format='{{.Size}}' 2>/dev/null | numfmt --to=iec 2>/dev/null || echo "?")
log "✅ Backend built (${size})"
return 0
else
warn "❌ Backend FAILED - see $logfile"
tail -30 "$logfile"
tail -50 "$logfile" || true
return 1
fi
return $exit_code
}
# Build images
START=$(date +%s)
echo ""
log "🐳 Starting Docker builds..."
info "📁 Build logs: $LOG_DIR/"
echo ""
if [[ "$PARALLEL" -eq 1 ]]; then
log "🚀 Building frontend & backend in parallel..."
# =============================================================================
# Save Tarballs
# =============================================================================
save_tarballs() {
if [[ "$SAVE_TARS" -eq 0 ]] || [[ "$DRY_RUN" -eq 1 ]]; then
return 0
fi
build_frontend & FE_PID=$!
build_backend & BE_PID=$!
# Track progress
ELAPSED=0
while kill -0 $FE_PID 2>/dev/null || kill -0 $BE_PID 2>/dev/null; do
sleep 10
ELAPSED=$((ELAPSED + 10))
info "⏳ Building... (${ELAPSED}s elapsed)"
done
# Check results
FE_EXIT=0; BE_EXIT=0
wait $FE_PID || FE_EXIT=$?
wait $BE_PID || BE_EXIT=$?
[[ $FE_EXIT -ne 0 ]] && fail "Frontend build failed (exit $FE_EXIT) - check $LOG_DIR/frontend.log"
[[ $BE_EXIT -ne 0 ]] && fail "Backend build failed (exit $BE_EXIT) - check $LOG_DIR/backend.log"
else
log "🔧 Sequential build mode..."
build_frontend || fail "Frontend build failed - check $LOG_DIR/frontend.log"
build_backend || fail "Backend build failed - check $LOG_DIR/backend.log"
fi
BUILD_TIME=$(($(date +%s) - START))
echo ""
log "⏱️ Build completed in ${BUILD_TIME}s"
# Save tarballs
if [[ "$SAVE_TARS" -eq 1 ]]; then
mkdir -p "$OUTPUT_DIR"
SAVE_START=$(date +%s)
local save_start
save_start=$(date +%s)
# Determine filename suffix
local file_tag
if [[ "$USE_LATEST_FILENAME" -eq 1 ]]; then
FILE_TAG="latest"
file_tag="latest"
else
FILE_TAG="$IMAGE_TAG"
file_tag="$IMAGE_TAG"
fi
local fe_tar be_tar
if [[ "$COMPRESS" -eq 1 ]]; then
# Pick fastest available compressor: pigz (parallel) > gzip
local compressor comp_name
if command -v pigz >/dev/null 2>&1; then
COMPRESSOR="pigz -p $(nproc)" # Use all CPU cores
COMP_NAME="pigz"
compressor="pigz -p $(nproc)"
comp_name="pigz"
else
COMPRESSOR="gzip -1" # Fast mode if no pigz
COMP_NAME="gzip"
compressor="gzip -1"
comp_name="gzip"
fi
FE_TAR="$OUTPUT_DIR/${IMAGE_FRONTEND}.${FILE_TAG}.tar.gz"
BE_TAR="$OUTPUT_DIR/${IMAGE_BACKEND}.${FILE_TAG}.tar.gz"
log "💾 Compressing with $COMP_NAME..."
fe_tar="$OUTPUT_DIR/${IMAGE_FRONTEND}.${file_tag}.tar.gz"
be_tar="$OUTPUT_DIR/${IMAGE_BACKEND}.${file_tag}.tar.gz"
log "💾 Compressing with $comp_name..."
(docker save "${IMAGE_FRONTEND}:latest" | $COMPRESSOR > "$FE_TAR") &
(docker save "${IMAGE_BACKEND}:latest" | $COMPRESSOR > "$BE_TAR") &
(docker save "${IMAGE_FRONTEND}:latest" | $compressor > "$fe_tar") &
(docker save "${IMAGE_BACKEND}:latest" | $compressor > "$be_tar") &
wait
else
FE_TAR="$OUTPUT_DIR/${IMAGE_FRONTEND}.${FILE_TAG}.tar"
BE_TAR="$OUTPUT_DIR/${IMAGE_BACKEND}.${FILE_TAG}.tar"
fe_tar="$OUTPUT_DIR/${IMAGE_FRONTEND}.${file_tag}.tar"
be_tar="$OUTPUT_DIR/${IMAGE_BACKEND}.${file_tag}.tar"
log "💾 Saving uncompressed tarballs..."
docker save -o "$FE_TAR" "${IMAGE_FRONTEND}:latest" &
docker save -o "$BE_TAR" "${IMAGE_BACKEND}:latest" &
docker save -o "$fe_tar" "${IMAGE_FRONTEND}:latest" &
docker save -o "$be_tar" "${IMAGE_BACKEND}:latest" &
wait
fi
local save_time
save_time=$(($(date +%s) - save_start))
# Generate checksums
sha256sum "$fe_tar" > "${fe_tar}.sha256"
sha256sum "$be_tar" > "${be_tar}.sha256"
log "✅ Saved in ${save_time}s:"
printf " %-50s %s\n" "$fe_tar" "$(du -h "$fe_tar" | cut -f1)"
printf " %-50s %s\n" "$be_tar" "$(du -h "$be_tar" | cut -f1)"
}
SAVE_TIME=$(($(date +%s) - SAVE_START))
sha256sum "$FE_TAR" > "${FE_TAR}.sha256"
sha256sum "$BE_TAR" > "${BE_TAR}.sha256"
log "✅ Saved in ${SAVE_TIME}s:"
printf " %-50s %s\n" "$FE_TAR" "$(du -h "$FE_TAR" | cut -f1)"
printf " %-50s %s\n" "$BE_TAR" "$(du -h "$BE_TAR" | cut -f1)"
fi
# Push to registry
if [[ -n "$PUSH_REMOTE" ]]; then
# =============================================================================
# Push to Registry
# =============================================================================
push_images() {
if [[ -z "$PUSH_REMOTE" ]] || [[ "$DRY_RUN" -eq 1 ]]; then
return 0
fi
log "📤 Pushing to ${PUSH_REMOTE}..."
for img in "${IMAGE_FRONTEND}" "${IMAGE_BACKEND}"; do
for tag in "latest" "${IMAGE_TAG}"; do
docker tag "${img}:${tag}" "${PUSH_REMOTE}/${img}:${tag}"
@ -252,20 +363,105 @@ if [[ -n "$PUSH_REMOTE" ]]; then
done
done
wait
log "✅ Pushed"
fi
log "✅ Pushed to registry"
}
TOTAL_TIME=$(($(date +%s) - START))
log "🎉 Complete in ${TOTAL_TIME}s"
echo ""
info "Next: Upload to Plesk, then:"
if [[ "$COMPRESS" -eq 1 ]]; then
echo " gunzip -c ${IMAGE_FRONTEND}.${FILE_TAG}.tar.gz | docker load"
echo " gunzip -c ${IMAGE_BACKEND}.${FILE_TAG}.tar.gz | docker load"
else
echo " docker load -i ${IMAGE_FRONTEND}.${FILE_TAG}.tar"
echo " docker load -i ${IMAGE_BACKEND}.${FILE_TAG}.tar"
fi
if [[ "$USE_LATEST_FILENAME" -eq 0 ]]; then
echo " Update Portainer with tag: ${IMAGE_TAG}"
fi
# =============================================================================
# Main Execution
# =============================================================================
main() {
local start_time
start_time=$(date +%s)
echo ""
log "🐳 Customer Portal Docker Build"
log "================================"
info "🏷️ Tag: ${IMAGE_TAG}"
info "📦 PNPM: ${PNPM_VERSION} | Platform: ${PLATFORM}"
info "📁 Build logs: $LOG_DIR/"
[[ "$DRY_RUN" -eq 1 ]] && warn "🔍 DRY-RUN MODE - no actual builds"
echo ""
# Setup
setup_buildx
clean_cache
# Build images
log "🚀 Starting Docker builds..."
if [[ "$PARALLEL" -eq 1 ]]; then
log "Building frontend & backend in parallel..."
build_frontend & FE_PID=$!
build_backend & BE_PID=$!
# Track progress
local elapsed=0
while kill -0 $FE_PID 2>/dev/null || kill -0 $BE_PID 2>/dev/null; do
sleep 10
elapsed=$((elapsed + 10))
info "⏳ Building... (${elapsed}s elapsed)"
done
# Check results
local fe_exit=0 be_exit=0
wait $FE_PID || fe_exit=$?
wait $BE_PID || be_exit=$?
[[ $fe_exit -ne 0 ]] && fail "Frontend build failed (exit $fe_exit) - check $LOG_DIR/frontend.log"
[[ $be_exit -ne 0 ]] && fail "Backend build failed (exit $be_exit) - check $LOG_DIR/backend.log"
else
log "🔧 Sequential build mode..."
build_frontend || fail "Frontend build failed - check $LOG_DIR/frontend.log"
build_backend || fail "Backend build failed - check $LOG_DIR/backend.log"
fi
local build_time
build_time=$(($(date +%s) - start_time))
log "⏱️ Build completed in ${build_time}s"
# Save and push
save_tarballs
push_images
# Summary
local total_time
total_time=$(($(date +%s) - start_time))
echo ""
log "🎉 Complete in ${total_time}s"
echo ""
# Show next steps
if [[ "$SAVE_TARS" -eq 1 ]] && [[ "$DRY_RUN" -eq 0 ]]; then
local file_tag
[[ "$USE_LATEST_FILENAME" -eq 1 ]] && file_tag="latest" || file_tag="$IMAGE_TAG"
info "📋 Next steps for Plesk deployment:"
echo ""
echo " 1. Upload tarballs to your server:"
echo " scp ${IMAGE_FRONTEND}.${file_tag}.tar.gz* ${IMAGE_BACKEND}.${file_tag}.tar.gz* user@server:/path/"
echo ""
echo " 2. Load images on the server:"
if [[ "$COMPRESS" -eq 1 ]]; then
echo " gunzip -c ${IMAGE_FRONTEND}.${file_tag}.tar.gz | docker load"
echo " gunzip -c ${IMAGE_BACKEND}.${file_tag}.tar.gz | docker load"
else
echo " docker load -i ${IMAGE_FRONTEND}.${file_tag}.tar"
echo " docker load -i ${IMAGE_BACKEND}.${file_tag}.tar"
fi
echo ""
echo " 3. Verify checksums:"
echo " sha256sum -c ${IMAGE_FRONTEND}.${file_tag}.tar.gz.sha256"
echo " sha256sum -c ${IMAGE_BACKEND}.${file_tag}.tar.gz.sha256"
echo ""
if [[ "$USE_LATEST_FILENAME" -eq 0 ]]; then
echo " 4. Update Portainer with tag: ${IMAGE_TAG}"
echo ""
fi
fi
}
# Run main
main