Update pnpm-lock.yaml, Dockerfile, and error handling in BFF

- Enabled workspace package injection in pnpm-lock.yaml for improved dependency management.
- Removed outdated SHA256 files for backend and frontend tarballs.
- Refactored Dockerfile for BFF to streamline the build process and optimize production image size.
- Updated Prisma client configuration to specify binary targets for Alpine compatibility.
- Enhanced error handling in WhmcsLinkWorkflowService to use BadRequestException for clearer client feedback.
- Adjusted entrypoint script to ensure proper database migration execution.
This commit is contained in:
barsa 2025-12-02 10:05:11 +09:00
parent f4d4cb0ab0
commit 68561fdf1d
17 changed files with 354 additions and 320 deletions

4
.npmrc Normal file
View File

@ -0,0 +1,4 @@
# pnpm configuration
# Enable injected workspace packages for pnpm v10 deploy
inject-workspace-packages=true

View File

@ -1,145 +1,128 @@
# 🚀 Backend (BFF) Dockerfile - Plesk Optimized # 🚀 Backend (BFF) Dockerfile - Production Grade (pnpm v10)
# Multi-stage build for NestJS production deployment via Plesk # - Uses pnpm's injected workspace packages (no legacy flags)
# - pnpm deploy creates minimal production-only install
# - Prisma + bcrypt built only for Alpine
# - No redundant installs
# ===================================================== # =====================================================
# Dependencies Stage - Install all dependencies # Stage 1: Dependencies (Debian for native builds)
# ===================================================== # =====================================================
FROM node:22-bookworm-slim AS deps FROM node:22-bookworm-slim AS deps
# Install system dependencies for building RUN apt-get update && apt-get install -y dumb-init ca-certificates \
RUN apt-get update && apt-get install -y dumb-init ca-certificates && rm -rf /var/lib/apt/lists/* \ && rm -rf /var/lib/apt/lists/* \
&& corepack enable && corepack prepare pnpm@10.15.0 --activate && corepack enable && corepack prepare pnpm@10.15.0 --activate
WORKDIR /app WORKDIR /app
# Copy workspace configuration COPY .npmrc pnpm-workspace.yaml package.json pnpm-lock.yaml ./
COPY pnpm-workspace.yaml package.json pnpm-lock.yaml ./
# Copy package.json files for dependency resolution
COPY packages/domain/package.json ./packages/domain/ COPY packages/domain/package.json ./packages/domain/
COPY packages/logging/package.json ./packages/logging/ COPY packages/logging/package.json ./packages/logging/
COPY packages/validation/package.json ./packages/validation/ COPY packages/validation/package.json ./packages/validation/
COPY apps/bff/package.json ./apps/bff/ COPY apps/bff/package.json ./apps/bff/
# Install ALL dependencies (needed for build) RUN pnpm install --frozen-lockfile --prefer-offline --config.ignore-scripts=false
RUN pnpm install --frozen-lockfile --prefer-offline
# ===================================================== # =====================================================
# Builder Stage - Build the application # Stage 2: Builder (compile TypeScript)
# ===================================================== # =====================================================
FROM node:22-bookworm-slim AS builder FROM node:22-bookworm-slim AS builder
# Install pnpm RUN apt-get update && apt-get install -y ca-certificates \
RUN apt-get update && apt-get install -y ca-certificates && rm -rf /var/lib/apt/lists/* \ && rm -rf /var/lib/apt/lists/* \
&& corepack enable && corepack prepare pnpm@10.15.0 --activate && corepack enable && corepack prepare pnpm@10.15.0 --activate
WORKDIR /app WORKDIR /app
# Copy workspace configuration COPY .npmrc pnpm-workspace.yaml package.json pnpm-lock.yaml tsconfig.json tsconfig.base.json ./
COPY pnpm-workspace.yaml package.json pnpm-lock.yaml ./
# Copy source code
COPY packages/ ./packages/ COPY packages/ ./packages/
COPY apps/bff/ ./apps/bff/ COPY apps/bff/ ./apps/bff/
COPY tsconfig.json tsconfig.base.json ./
# Copy node_modules from deps stage
COPY --from=deps /app/node_modules ./node_modules COPY --from=deps /app/node_modules ./node_modules
WORKDIR /app # No second pnpm install reuse deps layer
# Align workspace modules in builder (ensures proper symlinks and resolution)
RUN pnpm install --frozen-lockfile --prefer-offline # Build shared packages
# Build workspace packages so downstream apps can consume compiled artifacts RUN pnpm --filter @customer-portal/domain build \
RUN pnpm --filter @customer-portal/domain build && \ && pnpm --filter @customer-portal/logging build \
pnpm --filter @customer-portal/logging build && \ && pnpm --filter @customer-portal/validation build
pnpm --filter @customer-portal/validation build
# Build BFF (generate Prisma client then compile) # Build BFF (prisma types generated in dev, not needed here)
RUN pnpm --filter @customer-portal/bff exec prisma generate && \ RUN pnpm --filter @customer-portal/bff build
pnpm --filter @customer-portal/bff build
# ===================================================== # =====================================================
# Production Stage - Final optimized image for Plesk # Stage 3: Production Dependencies (Alpine, pnpm deploy)
# ===================================================== # =====================================================
FROM node:22-alpine AS production FROM node:22-alpine AS prod-deps
# Install runtime dependencies including dumb-init for proper signal handling
RUN apk add --no-cache \
wget \
curl \
dumb-init \
# Toolchain for native rebuilds (bcrypt) and Prisma (openssl), and nc for wait-for
python3 \
make \
g++ \
pkgconfig \
openssl \
netcat-openbsd \
&& rm -rf /var/cache/apk/*
# Install pnpm for production dependencies
RUN corepack enable && corepack prepare pnpm@10.15.0 --activate RUN corepack enable && corepack prepare pnpm@10.15.0 --activate
WORKDIR /app WORKDIR /app
# Copy workspace configuration # Minimal manifests for dependency graph
COPY pnpm-workspace.yaml package.json pnpm-lock.yaml ./ COPY .npmrc pnpm-workspace.yaml package.json pnpm-lock.yaml ./
# Copy package.json files for dependency resolution
COPY packages/domain/package.json ./packages/domain/ COPY packages/domain/package.json ./packages/domain/
COPY packages/logging/package.json ./packages/logging/ COPY packages/logging/package.json ./packages/logging/
COPY packages/validation/package.json ./packages/validation/ COPY packages/validation/package.json ./packages/validation/
COPY apps/bff/package.json ./apps/bff/ COPY apps/bff/package.json ./apps/bff/
COPY apps/bff/prisma ./apps/bff/prisma
# Install production dependencies only (clean approach)
ENV HUSKY=0 ENV HUSKY=0
RUN pnpm install --frozen-lockfile --prod --ignore-scripts
# Rebuild native modules for Alpine environment RUN apk add --no-cache --virtual .build-deps python3 make g++ pkgconfig openssl-dev \
RUN pnpm rebuild bcrypt # 1) Install full deps (needed for prisma CLI + bcrypt build)
&& pnpm install --frozen-lockfile --ignore-scripts \
# 2) Rebuild bcrypt for musl
&& pnpm rebuild bcrypt \
# 3) Generate Prisma client for Alpine (musl) the only runtime client
&& cd apps/bff && pnpm exec prisma generate && cd ../.. \
# 4) Create production-only deployment for BFF
&& pnpm deploy --filter @customer-portal/bff --prod /app/deploy \
# 5) Remove build-time node_modules and cleanup
&& rm -rf /app/node_modules /app/pnpm-lock.yaml \
/root/.cache /root/.npm /tmp/* /var/cache/apk/* \
&& apk del .build-deps
# Copy built applications and shared package from builder # /app/deploy now contains: package.json + node_modules for BFF prod deps only
COPY --from=builder /app/packages/domain/dist ./packages/domain/dist
COPY --from=builder /app/packages/logging/dist ./packages/logging/dist
COPY --from=builder /app/packages/validation/dist ./packages/validation/dist
COPY --from=builder /app/apps/bff/dist ./apps/bff/dist
COPY --from=builder /app/apps/bff/prisma ./apps/bff/prisma
# Generate Prisma client in production environment # =====================================================
WORKDIR /app/apps/bff # Stage 4: Production Runtime (minimal)
RUN pnpm dlx prisma@6.14.0 generate # =====================================================
FROM node:22-alpine AS production
# Strip build toolchain to shrink image RUN addgroup --system --gid 1001 nodejs \
RUN apk del --no-cache python3 make g++ pkgconfig && rm -rf /root/.cache /var/cache/apk/* && adduser --system --uid 1001 nestjs
# Copy entrypoint script # Only tools needed at runtime
COPY apps/bff/scripts/docker-entrypoint.sh /app/docker-entrypoint.sh RUN apk add --no-cache wget dumb-init openssl netcat-openbsd \
RUN chmod +x /app/docker-entrypoint.sh && rm -rf /var/cache/apk/*
# Create non-root user for security [[memory:6689308]] WORKDIR /app
RUN addgroup --system --gid 1001 nodejs && \
adduser --system --uid 1001 nestjs
# Create necessary directories and set permissions # Deploy tree (prod deps for BFF only)
RUN mkdir -p /app/secrets /app/logs && \ COPY --from=prod-deps --chown=nestjs:nodejs /app/deploy ./
chown -R nestjs:nodejs /app
# Compiled code and prisma schema
COPY --from=builder --chown=nestjs:nodejs /app/packages/domain/dist ./packages/domain/dist
COPY --from=builder --chown=nestjs:nodejs /app/packages/logging/dist ./packages/logging/dist
COPY --from=builder --chown=nestjs:nodejs /app/packages/validation/dist ./packages/validation/dist
COPY --from=builder --chown=nestjs:nodejs /app/apps/bff/dist ./apps/bff/dist
COPY --from=builder --chown=nestjs:nodejs /app/apps/bff/prisma ./apps/bff/prisma
# Entrypoint and runtime dirs
COPY --chown=nestjs:nodejs apps/bff/scripts/docker-entrypoint.sh /app/docker-entrypoint.sh
RUN chmod +x /app/docker-entrypoint.sh \
&& mkdir -p /app/secrets /app/logs \
&& chown nestjs:nodejs /app/secrets /app/logs
# Switch to non-root user
USER nestjs USER nestjs
# Expose port
EXPOSE 4000 EXPOSE 4000
ENV NODE_ENV=production PORT=4000
# Environment variables
ENV NODE_ENV=production
ENV PORT=4000
# Set working directory for the app
WORKDIR /app/apps/bff WORKDIR /app/apps/bff
# Health check for container monitoring
HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \ HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \
CMD wget --no-verbose --tries=1 --spider http://localhost:4000/health || exit 1 CMD wget --no-verbose --tries=1 --spider http://localhost:4000/health || exit 1
# Use dumb-init for proper signal handling, then entrypoint script
ENTRYPOINT ["dumb-init", "--", "/app/docker-entrypoint.sh"] ENTRYPOINT ["dumb-init", "--", "/app/docker-entrypoint.sh"]
CMD ["node", "dist/main.js"] CMD ["node", "dist/main.js"]

View File

@ -1,5 +1,9 @@
generator client { generator client {
provider = "prisma-client-js" provider = "prisma-client-js"
// Only include engines we actually need:
// - native: for local development
// - linux-musl-openssl-3.0.x: for Alpine production
binaryTargets = ["native", "linux-musl-openssl-3.0.x"]
} }
datasource db { datasource db {

View File

@ -24,7 +24,7 @@ fi
# Run database migrations if enabled # Run database migrations if enabled
if [ "$RUN_MIGRATIONS" = "true" ] && [ -n "$DATABASE_URL" ]; then if [ "$RUN_MIGRATIONS" = "true" ] && [ -n "$DATABASE_URL" ]; then
echo "🗄️ Running database migrations..." echo "🗄️ Running database migrations..."
npx prisma migrate deploy --schema=/app/apps/bff/prisma/schema.prisma npx prisma@6.14.0 migrate deploy --schema=/app/apps/bff/prisma/schema.prisma
echo "✅ Migrations complete" echo "✅ Migrations complete"
fi fi

View File

@ -1,4 +1,12 @@
import "tsconfig-paths/register"; // tsconfig-paths only needed in development - production builds resolve paths at compile time
if (process.env.NODE_ENV !== "production") {
try {
require("tsconfig-paths/register");
} catch {
// Not available, paths already resolved
}
}
import { Logger, type INestApplication } from "@nestjs/common"; import { Logger, type INestApplication } from "@nestjs/common";
import { bootstrap } from "./app/bootstrap"; import { bootstrap } from "./app/bootstrap";

View File

@ -59,13 +59,15 @@ export class WhmcsLinkWorkflowService {
}); });
// Provide more specific error messages based on the error type // Provide more specific error messages based on the error type
// Use BadRequestException (400) instead of UnauthorizedException (401)
// to avoid triggering "session expired" logic in the frontend
if (error instanceof Error && error.message.includes("not found")) { if (error instanceof Error && error.message.includes("not found")) {
throw new UnauthorizedException( throw new BadRequestException(
"No billing account found with this email address. Please check your email or contact support." "No billing account found with this email address. Please check your email or contact support."
); );
} }
throw new UnauthorizedException("Unable to verify account. Please try again later."); throw new BadRequestException("Unable to verify account. Please try again later.");
} }
const clientNumericId = clientDetails.id; const clientNumericId = clientDetails.id;
@ -84,15 +86,17 @@ export class WhmcsLinkWorkflowService {
const validateResult = await this.whmcsService.validateLogin(email, password); const validateResult = await this.whmcsService.validateLogin(email, password);
this.logger.debug("WHMCS validation successful"); this.logger.debug("WHMCS validation successful");
if (!validateResult || !validateResult.userId) { if (!validateResult || !validateResult.userId) {
throw new UnauthorizedException("Invalid email or password. Please try again."); throw new BadRequestException("Invalid email or password. Please try again.");
} }
} catch (error) { } catch (error) {
if (error instanceof UnauthorizedException) throw error; // Re-throw BadRequestException from the validation above
if (error instanceof BadRequestException) throw error;
const errorMessage = getErrorMessage(error); const errorMessage = getErrorMessage(error);
this.logger.error("WHMCS credential validation failed", { error: errorMessage }); this.logger.error("WHMCS credential validation failed", { error: errorMessage });
// Check if this is a WHMCS authentication error and provide user-friendly message // Check if this is a WHMCS authentication error and provide user-friendly message
// Use BadRequestException (400) to avoid triggering "session expired" in frontend
const normalizedMessage = errorMessage.toLowerCase(); const normalizedMessage = errorMessage.toLowerCase();
const authErrorPhrases = [ const authErrorPhrases = [
"email or password invalid", "email or password invalid",
@ -101,13 +105,13 @@ export class WhmcsLinkWorkflowService {
"login failed", "login failed",
]; ];
if (authErrorPhrases.some(phrase => normalizedMessage.includes(phrase))) { if (authErrorPhrases.some(phrase => normalizedMessage.includes(phrase))) {
throw new UnauthorizedException( throw new BadRequestException(
"Invalid email or password. Please check your credentials and try again." "Invalid email or password. Please check your credentials and try again."
); );
} }
// For other errors, provide generic message to avoid exposing system details // For other errors, provide generic message to avoid exposing system details
throw new UnauthorizedException("Unable to verify credentials. Please try again later."); throw new BadRequestException("Unable to verify credentials. Please try again later.");
} }
const customerNumber = const customerNumber =
@ -190,13 +194,14 @@ export class WhmcsLinkWorkflowService {
throw error; throw error;
} }
// Treat missing WHMCS mappings/records as an auth-style failure rather than a system error // Treat missing WHMCS mappings/records as a validation failure
// Use BadRequestException (400) to avoid triggering "session expired" in frontend
if ( if (
error instanceof NotFoundException || error instanceof NotFoundException ||
/whmcs client mapping not found/i.test(message) || /whmcs client mapping not found/i.test(message) ||
/whmcs.*not found/i.test(message) /whmcs.*not found/i.test(message)
) { ) {
throw new UnauthorizedException( throw new BadRequestException(
"No billing account found with this email address. Please check your email or contact support." "No billing account found with this email address. Please check your email or contact support."
); );
} }

View File

@ -2,106 +2,93 @@
# Multi-stage build for Next.js production deployment via Plesk # Multi-stage build for Next.js production deployment via Plesk
# ===================================================== # =====================================================
# Dependencies Stage - Install all dependencies # Stage 1: Dependencies - Install all dependencies
# ===================================================== # =====================================================
FROM node:22-alpine AS deps FROM node:22-alpine AS deps
# Install system dependencies for building RUN apk add --no-cache libc6-compat dumb-init \
RUN apk add --no-cache libc6-compat dumb-init && corepack enable && corepack prepare pnpm@10.15.0 --activate
# Install pnpm
RUN corepack enable && corepack prepare pnpm@10.15.0 --activate
WORKDIR /app WORKDIR /app
# Copy workspace configuration # Copy workspace configuration
COPY pnpm-workspace.yaml package.json pnpm-lock.yaml ./ COPY .npmrc pnpm-workspace.yaml package.json pnpm-lock.yaml ./
# Copy package.json files for dependency resolution
COPY packages/domain/package.json ./packages/domain/ COPY packages/domain/package.json ./packages/domain/
COPY packages/validation/package.json ./packages/validation/ COPY packages/validation/package.json ./packages/validation/
COPY apps/portal/package.json ./apps/portal/ COPY apps/portal/package.json ./apps/portal/
# Install dependencies with frozen lockfile # Install all dependencies with scripts enabled (esbuild, sharp, etc.)
RUN pnpm install --frozen-lockfile --prefer-offline RUN pnpm install --frozen-lockfile --prefer-offline --config.ignore-scripts=false
# ===================================================== # =====================================================
# Builder Stage - Build the application # Stage 2: Builder - Compile and build Next.js
# ===================================================== # =====================================================
FROM node:22-alpine AS builder FROM node:22-alpine AS builder
# Install pnpm
RUN corepack enable && corepack prepare pnpm@10.15.0 --activate RUN corepack enable && corepack prepare pnpm@10.15.0 --activate
WORKDIR /app WORKDIR /app
# Copy workspace configuration # Copy workspace configuration and source
COPY pnpm-workspace.yaml package.json pnpm-lock.yaml ./ COPY .npmrc pnpm-workspace.yaml package.json pnpm-lock.yaml tsconfig.json tsconfig.base.json ./
# Copy source code
COPY packages/ ./packages/ COPY packages/ ./packages/
COPY apps/portal/ ./apps/portal/ COPY apps/portal/ ./apps/portal/
COPY tsconfig.json tsconfig.base.json ./
# Ensure public directory exists even if the repo doesn't have one # Ensure public directory exists
RUN mkdir -p /app/apps/portal/public RUN mkdir -p /app/apps/portal/public
# Copy node_modules from deps stage # Copy pre-installed node_modules from deps
COPY --from=deps /app/node_modules ./node_modules COPY --from=deps /app/node_modules ./node_modules
# Build shared workspace packages first # Build shared packages
RUN pnpm --filter @customer-portal/domain build && \ RUN pnpm --filter @customer-portal/domain build && \
pnpm --filter @customer-portal/validation build pnpm --filter @customer-portal/validation build
# Build portal with standalone output # Build-time environment variables (baked into Next.js client bundle)
ARG NEXT_PUBLIC_API_BASE=/api
ARG NEXT_PUBLIC_APP_NAME="Customer Portal"
ARG NEXT_PUBLIC_APP_VERSION=1.0.0
ENV NODE_ENV=production \
NEXT_PUBLIC_API_BASE=${NEXT_PUBLIC_API_BASE} \
NEXT_PUBLIC_APP_NAME=${NEXT_PUBLIC_APP_NAME} \
NEXT_PUBLIC_APP_VERSION=${NEXT_PUBLIC_APP_VERSION}
WORKDIR /app/apps/portal WORKDIR /app/apps/portal
ENV NODE_ENV=production
RUN pnpm build RUN pnpm build
# ===================================================== # =====================================================
# Production Stage - Final optimized image for Plesk # Stage 3: Production - Minimal Alpine runtime image
# ===================================================== # =====================================================
FROM node:22-alpine AS production FROM node:22-alpine AS production
# Install runtime dependencies including dumb-init for proper signal handling RUN apk add --no-cache wget curl dumb-init libc6-compat \
RUN apk add --no-cache \
wget \
curl \
dumb-init \
libc6-compat \
&& rm -rf /var/cache/apk/* && rm -rf /var/cache/apk/*
WORKDIR /app WORKDIR /app
# Create non-root user for security [[memory:6689308]] # Create non-root user
RUN addgroup --system --gid 1001 nodejs && \ RUN addgroup --system --gid 1001 nodejs && \
adduser --system --uid 1001 nextjs adduser --system --uid 1001 nextjs
# Copy the Next.js standalone build with proper ownership # Copy Next.js standalone build
COPY --from=builder --chown=nextjs:nodejs /app/apps/portal/.next/standalone ./ COPY --from=builder --chown=nextjs:nodejs /app/apps/portal/.next/standalone ./
COPY --from=builder --chown=nextjs:nodejs /app/apps/portal/.next/static ./apps/portal/.next/static COPY --from=builder --chown=nextjs:nodejs /app/apps/portal/.next/static ./apps/portal/.next/static
COPY --from=builder --chown=nextjs:nodejs /app/apps/portal/public ./apps/portal/public COPY --from=builder --chown=nextjs:nodejs /app/apps/portal/public ./apps/portal/public
# Create necessary directories and set permissions RUN mkdir -p /app/logs && chown -R nextjs:nodejs /app
RUN mkdir -p /app/logs && \
chown -R nextjs:nodejs /app
# Switch to non-root user
USER nextjs USER nextjs
# Expose port (required for Plesk port mapping)
EXPOSE 3000 EXPOSE 3000
# Environment variables ENV NODE_ENV=production \
ENV NODE_ENV=production NEXT_TELEMETRY_DISABLED=1 \
ENV NEXT_TELEMETRY_DISABLED=1 PORT=3000 \
ENV PORT=3000 HOSTNAME="0.0.0.0"
ENV HOSTNAME="0.0.0.0"
# Health check for container monitoring
HEALTHCHECK --interval=30s --timeout=10s --start-period=40s --retries=3 \ HEALTHCHECK --interval=30s --timeout=10s --start-period=40s --retries=3 \
CMD wget --no-verbose --tries=1 --spider http://localhost:3000/api/health || exit 1 CMD wget --no-verbose --tries=1 --spider http://localhost:3000/api/health || exit 1
# Use dumb-init for proper signal handling in containers
ENTRYPOINT ["dumb-init", "--"] ENTRYPOINT ["dumb-init", "--"]
CMD ["node", "apps/portal/server.js"] CMD ["node", "apps/portal/server.js"]

View File

@ -88,8 +88,9 @@ const nextConfig = {
"style-src 'self' 'unsafe-inline'", "style-src 'self' 'unsafe-inline'",
"img-src 'self' data: https:", "img-src 'self' data: https:",
"font-src 'self' data:", "font-src 'self' data:",
// Allow API connections // Allow API connections (include localhost for development)
"connect-src 'self' https:", // Allow localhost in development for API calls to BFF
`connect-src 'self' https: ${process.env.NODE_ENV === "development" ? "http://localhost:*" : ""}`,
"frame-ancestors 'none'", "frame-ancestors 'none'",
].join("; "), ].join("; "),
}, },

View File

@ -54,68 +54,35 @@ export interface ApiClient {
DELETE: ApiMethod; DELETE: ApiMethod;
} }
type EnvKey = /**
| "NEXT_PUBLIC_API_BASE" * Resolve API base URL:
| "NEXT_PUBLIC_API_URL" * - Production (browser): Use same origin (nginx proxies /api to backend)
| "API_BASE_URL" * - Development: Use localhost:4000 (direct to BFF)
| "API_BASE" * - SSR: Use NEXT_PUBLIC_API_BASE env var or localhost:4000
| "API_URL"; */
export const resolveBaseUrl = (explicitBase?: string): string => {
// 1. Explicit base URL provided
if (explicitBase?.trim()) {
return explicitBase.replace(/\/+$/, "");
}
const BASE_URL_ENV_KEYS: readonly EnvKey[] = [ // 2. Browser: use same origin (nginx proxies /api/* to backend)
"NEXT_PUBLIC_API_BASE",
"NEXT_PUBLIC_API_URL",
"API_BASE_URL",
"API_BASE",
"API_URL",
];
const DEFAULT_BASE_URL = "http://localhost:4000";
const resolveSameOriginBase = () => {
if (typeof window !== "undefined" && window.location?.origin) { if (typeof window !== "undefined" && window.location?.origin) {
return window.location.origin; return window.location.origin;
} }
const globalLocation = (globalThis as { location?: { origin?: string } } | undefined)?.location; // 3. Server-side or build time: check env vars
if (globalLocation?.origin) { const envBase = process.env.NEXT_PUBLIC_API_BASE;
return globalLocation.origin; if (envBase?.trim()) {
} // If relative path like "/api", we can't use it server-side without origin
// Just return it - will work in browser after hydration
return DEFAULT_BASE_URL; if (envBase.startsWith("http")) {
}; return envBase.replace(/\/+$/, "");
const normalizeBaseUrl = (value: string) => {
const trimmed = value.trim();
if (!trimmed) {
return DEFAULT_BASE_URL;
}
if (trimmed === "/") {
return resolveSameOriginBase();
}
return trimmed.replace(/\/+$/, "");
};
const resolveBaseUrlFromEnv = () => {
if (typeof process !== "undefined" && process.env) {
for (const key of BASE_URL_ENV_KEYS) {
const envValue = process.env[key];
if (typeof envValue === "string" && envValue.trim()) {
return normalizeBaseUrl(envValue);
}
} }
} }
return DEFAULT_BASE_URL; // 4. Fallback for development
}; return "http://localhost:4000";
export const resolveBaseUrl = (baseUrl?: string) => {
if (typeof baseUrl === "string" && baseUrl.trim()) {
return normalizeBaseUrl(baseUrl);
}
return resolveBaseUrlFromEnv();
}; };
const applyPathParams = (path: string, params?: PathParams): string => { const applyPathParams = (path: string, params?: PathParams): string => {

1
pnpm-lock.yaml generated
View File

@ -3,6 +3,7 @@ lockfileVersion: '9.0'
settings: settings:
autoInstallPeers: true autoInstallPeers: true
excludeLinksFromLockfile: false excludeLinksFromLockfile: false
injectWorkspacePackages: true
importers: importers:

View File

@ -1 +0,0 @@
d56f8408ed1de76e225abd6a8ddb741c32f96102f03b0caf8fef089a30de317b /home/barsa/projects/customer_portal/customer-portal/portal-backend.20251201-1dafa73.tar

View File

@ -0,0 +1 @@
735d984b4fc0c5de1404ee95991e6a0ab627e815a46fbb2e3002240a551146a2 /home/barsa/projects/customer_portal/customer-portal/portal-backend.latest.tar.gz

View File

@ -0,0 +1 @@
de99755961ca5a0d2b8713b1a57b6d818cb860d0eb87387c4ff508882d2f6984 /home/barsa/projects/customer_portal/customer-portal/portal-backend.latest.tar

View File

@ -1 +0,0 @@
4510c9159622868d3cbbf8212274e08bb374e541876406ba7d0f2d7d4d93983a /home/barsa/projects/customer_portal/customer-portal/portal-frontend.20251201-1dafa73.tar

View File

@ -0,0 +1 @@
2d1c7887410361baefcc3f2038dce9079ca6fa19d5afa29e8281c99a40d020c7 /home/barsa/projects/customer_portal/customer-portal/portal-frontend.latest.tar.gz

View File

@ -0,0 +1 @@
ea3c21988f94a9f8755e1024d45187afad435df399c79c17934e701ca7c4ad9b /home/barsa/projects/customer_portal/customer-portal/portal-frontend.latest.tar

View File

@ -1,160 +1,232 @@
#!/usr/bin/env bash #!/usr/bin/env bash
# 🐳 Build production Docker images for Plesk deployment
# 🐳 Build production images for Plesk and save as .tar # Features: Parallel builds, BuildKit, compressed tarballs
# - Builds apps/portal (frontend) and apps/bff (backend)
# - Tags both with :latest and optional version/sha tag
# - Saves tarballs in project root for easy Plesk upload
set -Eeuo pipefail set -Eeuo pipefail
IFS=$'\n\t'
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)" PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
IMAGE_FRONTEND_NAME="${IMAGE_FRONTEND_NAME:-portal-frontend}" # Configuration (override via env vars or flags)
IMAGE_BACKEND_NAME="${IMAGE_BACKEND_NAME:-portal-backend}" IMAGE_FRONTEND="${IMAGE_FRONTEND_NAME:-portal-frontend}"
IMAGE_BACKEND="${IMAGE_BACKEND_NAME:-portal-backend}"
# Optional explicit tag via env or flag; defaults to git short sha + date
IMAGE_TAG="${IMAGE_TAG:-}" IMAGE_TAG="${IMAGE_TAG:-}"
OUTPUT_DIR="${OUTPUT_DIR:-$PROJECT_ROOT}" OUTPUT_DIR="${OUTPUT_DIR:-$PROJECT_ROOT}"
PUSH_REMOTE="${PUSH_REMOTE:-}" # e.g. ghcr.io/<org> PUSH_REMOTE="${PUSH_REMOTE:-}"
PARALLEL="${PARALLEL_BUILD:-1}"
COMPRESS="${COMPRESS:-1}"
USE_LATEST_FILENAME="${USE_LATEST_FILENAME:-1}" # Default: save as .latest.tar.gz
SAVE_TARS=1
GREEN='\033[0;32m' # Colors
YELLOW='\033[1;33m' G='\033[0;32m' Y='\033[1;33m' R='\033[0;31m' B='\033[0;34m' N='\033[0m'
RED='\033[0;31m' log() { echo -e "${G}[BUILD]${N} $*"; }
NC='\033[0m' info() { echo -e "${B}[BUILD]${N} $*"; }
warn() { echo -e "${Y}[BUILD]${N} $*"; }
log() { echo -e "${GREEN}[PLESK-BUILD] $*${NC}"; } fail() { echo -e "${R}[BUILD] ERROR:${N} $*"; exit 1; }
warn() { echo -e "${YELLOW}[PLESK-BUILD] $*${NC}"; }
fail() { echo -e "${RED}[PLESK-BUILD] ERROR: $*${NC}"; exit 1; }
usage() { usage() {
cat <<EOF cat <<EOF
Build production Docker images and save tarballs for Plesk. Build Docker images and save tarballs for Plesk.
Usage: $0 [--tag <tag>] [--output <dir>] [--push <registry>] [--no-save] Usage: $0 [OPTIONS]
Options: Options:
--tag <tag> Tag to add in addition to 'latest' (e.g. v1.2.3 or abc123) --tag <tag> Version tag for image (default: YYYYMMDD-gitsha)
--output <dir> Directory to write tar files (default: project root) --output <dir> Output directory (default: project root)
--push <registry> Also tag and push to registry (e.g. ghcr.io/org or docker.io/user) --push <registry> Push to registry after build
--no-save Build and tag images but do not write tar files --no-save Build only, no tar files
--no-compress Save as .tar instead of .tar.gz
Env vars: --versioned Name files with version tag (default: .latest.tar.gz)
IMAGE_FRONTEND_NAME, IMAGE_BACKEND_NAME, IMAGE_TAG, OUTPUT_DIR, PUSH_REMOTE --sequential Build one at a time (default: parallel)
-h, --help Show this help
Examples: Examples:
$0 --tag $(date +%Y%m%d)-$(git -C "$PROJECT_ROOT" rev-parse --short HEAD) $0 # Output: portal-frontend.latest.tar.gz (default)
PUSH_REMOTE=ghcr.io/acme $0 --tag v1.0.0 $0 --versioned # Output: portal-frontend.20251201-abc123.tar.gz
$0 --tag v1.2.3 --versioned # Output: portal-frontend.v1.2.3.tar.gz
$0 --sequential --no-save # Debug build
EOF EOF
exit 0
} }
SAVE_TARS=1 # Parse arguments
while [[ $# -gt 0 ]]; do while [[ $# -gt 0 ]]; do
case "$1" in case "$1" in
--tag) --tag) IMAGE_TAG="${2:-}"; shift 2 ;;
IMAGE_TAG="${2:-}"; shift 2 ;; --output) OUTPUT_DIR="${2:-}"; shift 2 ;;
--output) --push) PUSH_REMOTE="${2:-}"; shift 2 ;;
OUTPUT_DIR="${2:-}"; shift 2 ;; --no-save) SAVE_TARS=0; shift ;;
--push) --no-compress) COMPRESS=0; shift ;;
PUSH_REMOTE="${2:-}"; shift 2 ;; --versioned) USE_LATEST_FILENAME=0; shift ;;
--no-save) --sequential) PARALLEL=0; shift ;;
SAVE_TARS=0; shift ;; -h|--help) usage ;;
-h|--help) *) fail "Unknown option: $1" ;;
usage; exit 0 ;;
*)
warn "Unknown option: $1"; usage; exit 1 ;;
esac esac
done done
command -v docker >/dev/null 2>&1 || fail "Docker is required." # Validation
command -v docker >/dev/null 2>&1 || fail "Docker required"
cd "$PROJECT_ROOT" cd "$PROJECT_ROOT"
[[ -f apps/portal/Dockerfile ]] || fail "Missing apps/portal/Dockerfile" [[ -f apps/portal/Dockerfile ]] || fail "Missing apps/portal/Dockerfile"
[[ -f apps/bff/Dockerfile ]] || fail "Missing apps/bff/Dockerfile" [[ -f apps/bff/Dockerfile ]] || fail "Missing apps/bff/Dockerfile"
if [[ -z "${IMAGE_TAG}" ]]; then # Auto-generate tag if not provided
if git -C "$PROJECT_ROOT" rev-parse --short HEAD >/dev/null 2>&1; then [[ -z "$IMAGE_TAG" ]] && IMAGE_TAG="$(date +%Y%m%d)-$(git rev-parse --short HEAD 2>/dev/null || echo 'local')"
GIT_SHA="$(git -C "$PROJECT_ROOT" rev-parse --short HEAD)"
IMAGE_TAG="$(date +%Y%m%d)-$GIT_SHA" # Enable BuildKit
export DOCKER_BUILDKIT=1
# Build args
NEXT_PUBLIC_API_BASE="${NEXT_PUBLIC_API_BASE:-/api}"
NEXT_PUBLIC_APP_NAME="${NEXT_PUBLIC_APP_NAME:-Customer Portal}"
GIT_SOURCE="$(git config --get remote.origin.url 2>/dev/null || echo unknown)"
log "🏷️ Tag: ${IMAGE_TAG}"
LOG_DIR="${OUTPUT_DIR}/.build-logs"
mkdir -p "$LOG_DIR"
build_frontend() {
local logfile="$LOG_DIR/frontend.log"
docker build -f apps/portal/Dockerfile \
--build-arg "NEXT_PUBLIC_API_BASE=${NEXT_PUBLIC_API_BASE}" \
--build-arg "NEXT_PUBLIC_APP_NAME=${NEXT_PUBLIC_APP_NAME}" \
--build-arg "NEXT_PUBLIC_APP_VERSION=${IMAGE_TAG}" \
-t "${IMAGE_FRONTEND}:latest" -t "${IMAGE_FRONTEND}:${IMAGE_TAG}" \
--label "org.opencontainers.image.version=${IMAGE_TAG}" \
--label "org.opencontainers.image.source=${GIT_SOURCE}" \
. > "$logfile" 2>&1
local exit_code=$?
if [[ $exit_code -eq 0 ]]; then
log "✅ Frontend done ($(tail -1 "$logfile" | grep -oP 'DONE \K[0-9.]+s' || echo 'complete'))"
else else
IMAGE_TAG="$(date +%Y%m%d)" warn "❌ Frontend FAILED - see $logfile"
tail -20 "$logfile"
fi fi
return $exit_code
}
build_backend() {
local logfile="$LOG_DIR/backend.log"
docker build -f apps/bff/Dockerfile \
-t "${IMAGE_BACKEND}:latest" -t "${IMAGE_BACKEND}:${IMAGE_TAG}" \
--label "org.opencontainers.image.version=${IMAGE_TAG}" \
--label "org.opencontainers.image.source=${GIT_SOURCE}" \
. > "$logfile" 2>&1
local exit_code=$?
if [[ $exit_code -eq 0 ]]; then
log "✅ Backend done ($(tail -1 "$logfile" | grep -oP 'DONE \K[0-9.]+s' || echo 'complete'))"
else
warn "❌ Backend FAILED - see $logfile"
tail -20 "$logfile"
fi
return $exit_code
}
# Build images
START=$(date +%s)
if [[ "$PARALLEL" -eq 1 ]]; then
log "🚀 Parallel build (logs: $LOG_DIR/)"
log "🔨 Building frontend..."
log "🔨 Building backend..."
build_frontend & FE_PID=$!
build_backend & BE_PID=$!
# Show progress dots while waiting
while kill -0 $FE_PID 2>/dev/null || kill -0 $BE_PID 2>/dev/null; do
printf "."
sleep 5
done
echo ""
# Check results
wait $FE_PID || fail "Frontend build failed - check $LOG_DIR/frontend.log"
wait $BE_PID || fail "Backend build failed - check $LOG_DIR/backend.log"
else
log "🔧 Sequential build..."
log "🔨 Building frontend..."
build_frontend || fail "Frontend build failed"
log "🔨 Building backend..."
build_backend || fail "Backend build failed"
fi fi
log "🔨 Building frontend image (${IMAGE_FRONTEND_NAME}:latest, ${IMAGE_FRONTEND_NAME}:${IMAGE_TAG})" BUILD_TIME=$(($(date +%s) - START))
docker build \ log "⏱️ Built in ${BUILD_TIME}s"
--file apps/portal/Dockerfile \
--tag "${IMAGE_FRONTEND_NAME}:latest" \
--tag "${IMAGE_FRONTEND_NAME}:${IMAGE_TAG}" \
--label "org.opencontainers.image.title=Customer Portal Frontend" \
--label "org.opencontainers.image.version=${IMAGE_TAG}" \
--label "org.opencontainers.image.source=$(git -C "$PROJECT_ROOT" config --get remote.origin.url 2>/dev/null || echo unknown)" \
.
log "🔨 Building backend image (${IMAGE_BACKEND_NAME}:latest, ${IMAGE_BACKEND_NAME}:${IMAGE_TAG})" # Save tarballs
docker build \ if [[ "$SAVE_TARS" -eq 1 ]]; then
--file apps/bff/Dockerfile \
--tag "${IMAGE_BACKEND_NAME}:latest" \
--tag "${IMAGE_BACKEND_NAME}:${IMAGE_TAG}" \
--label "org.opencontainers.image.title=Customer Portal Backend" \
--label "org.opencontainers.image.version=${IMAGE_TAG}" \
--label "org.opencontainers.image.source=$(git -C "$PROJECT_ROOT" config --get remote.origin.url 2>/dev/null || echo unknown)" \
.
if [[ "${SAVE_TARS}" -eq 1 ]]; then
mkdir -p "$OUTPUT_DIR" mkdir -p "$OUTPUT_DIR"
FRONT_TAR_LATEST="$OUTPUT_DIR/${IMAGE_FRONTEND_NAME}.latest.tar" SAVE_START=$(date +%s)
BACK_TAR_LATEST="$OUTPUT_DIR/${IMAGE_BACKEND_NAME}.latest.tar"
FRONT_TAR_TAGGED="$OUTPUT_DIR/${IMAGE_FRONTEND_NAME}.${IMAGE_TAG}.tar"
BACK_TAR_TAGGED="$OUTPUT_DIR/${IMAGE_BACKEND_NAME}.${IMAGE_TAG}.tar"
log "💾 Saving tarballs to $OUTPUT_DIR ..." # Determine filename suffix
docker save -o "$FRONT_TAR_LATEST" "${IMAGE_FRONTEND_NAME}:latest" if [[ "$USE_LATEST_FILENAME" -eq 1 ]]; then
docker save -o "$BACK_TAR_LATEST" "${IMAGE_BACKEND_NAME}:latest" FILE_TAG="latest"
docker save -o "$FRONT_TAR_TAGGED" "${IMAGE_FRONTEND_NAME}:${IMAGE_TAG}" else
docker save -o "$BACK_TAR_TAGGED" "${IMAGE_BACKEND_NAME}:${IMAGE_TAG}" FILE_TAG="$IMAGE_TAG"
fi
log "🔐 Generating checksums for integrity verification..." if [[ "$COMPRESS" -eq 1 ]]; then
sha256sum "$FRONT_TAR_TAGGED" > "${FRONT_TAR_TAGGED}.sha256" # Pick fastest available compressor: pigz (parallel) > gzip
sha256sum "$BACK_TAR_TAGGED" > "${BACK_TAR_TAGGED}.sha256" if command -v pigz >/dev/null 2>&1; then
COMPRESSOR="pigz -p $(nproc)" # Use all CPU cores
COMP_NAME="pigz"
else
COMPRESSOR="gzip -1" # Fast mode if no pigz
COMP_NAME="gzip"
fi
log "✅ Wrote:" FE_TAR="$OUTPUT_DIR/${IMAGE_FRONTEND}.${FILE_TAG}.tar.gz"
echo " - $FRONT_TAR_LATEST" BE_TAR="$OUTPUT_DIR/${IMAGE_BACKEND}.${FILE_TAG}.tar.gz"
echo " - $BACK_TAR_LATEST" log "💾 Compressing with $COMP_NAME..."
echo " - $FRONT_TAR_TAGGED"
echo " - $BACK_TAR_TAGGED" (docker save "${IMAGE_FRONTEND}:latest" | $COMPRESSOR > "$FE_TAR") &
echo " - ${FRONT_TAR_TAGGED}.sha256" (docker save "${IMAGE_BACKEND}:latest" | $COMPRESSOR > "$BE_TAR") &
echo " - ${BACK_TAR_TAGGED}.sha256" wait
else
FE_TAR="$OUTPUT_DIR/${IMAGE_FRONTEND}.${FILE_TAG}.tar"
BE_TAR="$OUTPUT_DIR/${IMAGE_BACKEND}.${FILE_TAG}.tar"
log "💾 Saving uncompressed tarballs..."
docker save -o "$FE_TAR" "${IMAGE_FRONTEND}:latest" &
docker save -o "$BE_TAR" "${IMAGE_BACKEND}:latest" &
wait
fi
SAVE_TIME=$(($(date +%s) - SAVE_START))
sha256sum "$FE_TAR" > "${FE_TAR}.sha256"
sha256sum "$BE_TAR" > "${BE_TAR}.sha256"
log "✅ Saved in ${SAVE_TIME}s:"
printf " %-50s %s\n" "$FE_TAR" "$(du -h "$FE_TAR" | cut -f1)"
printf " %-50s %s\n" "$BE_TAR" "$(du -h "$BE_TAR" | cut -f1)"
fi fi
if [[ -n "${PUSH_REMOTE}" ]]; then # Push to registry
FE_REMOTE_LATEST="${PUSH_REMOTE%/}/${IMAGE_FRONTEND_NAME}:latest" if [[ -n "$PUSH_REMOTE" ]]; then
FE_REMOTE_TAGGED="${PUSH_REMOTE%/}/${IMAGE_FRONTEND_NAME}:${IMAGE_TAG}" log "📤 Pushing to ${PUSH_REMOTE}..."
BE_REMOTE_LATEST="${PUSH_REMOTE%/}/${IMAGE_BACKEND_NAME}:latest" for img in "${IMAGE_FRONTEND}" "${IMAGE_BACKEND}"; do
BE_REMOTE_TAGGED="${PUSH_REMOTE%/}/${IMAGE_BACKEND_NAME}:${IMAGE_TAG}" for tag in "latest" "${IMAGE_TAG}"; do
docker tag "${img}:${tag}" "${PUSH_REMOTE}/${img}:${tag}"
log "📤 Tagging for remote: ${PUSH_REMOTE}" docker push "${PUSH_REMOTE}/${img}:${tag}" &
docker tag "${IMAGE_FRONTEND_NAME}:latest" "$FE_REMOTE_LATEST" done
docker tag "${IMAGE_FRONTEND_NAME}:${IMAGE_TAG}" "$FE_REMOTE_TAGGED" done
docker tag "${IMAGE_BACKEND_NAME}:latest" "$BE_REMOTE_LATEST" wait
docker tag "${IMAGE_BACKEND_NAME}:${IMAGE_TAG}" "$BE_REMOTE_TAGGED" log "✅ Pushed"
log "🚀 Pushing to remote registry (ensure you are logged in)"
docker push "$FE_REMOTE_LATEST"
docker push "$FE_REMOTE_TAGGED"
docker push "$BE_REMOTE_LATEST"
docker push "$BE_REMOTE_TAGGED"
fi fi
log "🎉 Done!" TOTAL_TIME=$(($(date +%s) - START))
log "" log "🎉 Complete in ${TOTAL_TIME}s"
log "Next steps:" echo ""
log " 1. Upload .tar files to your Plesk server" info "Next: Upload to Plesk, then:"
log " 2. Load images: docker load -i portal-frontend.${IMAGE_TAG}.tar" if [[ "$COMPRESS" -eq 1 ]]; then
log " 3. Verify checksums: sha256sum -c portal-frontend.${IMAGE_TAG}.tar.sha256" echo " gunzip -c ${IMAGE_FRONTEND}.${FILE_TAG}.tar.gz | docker load"
log " 4. Update Portainer stack with new image tag: ${IMAGE_TAG}" echo " gunzip -c ${IMAGE_BACKEND}.${FILE_TAG}.tar.gz | docker load"
log "" else
log "See docker/portainer/PORTAINER-GUIDE.md for detailed instructions." echo " docker load -i ${IMAGE_FRONTEND}.${FILE_TAG}.tar"
echo " docker load -i ${IMAGE_BACKEND}.${FILE_TAG}.tar"
fi
if [[ "$USE_LATEST_FILENAME" -eq 0 ]]; then
echo " Update Portainer with tag: ${IMAGE_TAG}"
fi