Update pnpm-lock.yaml, Dockerfile, and error handling in BFF
- Enabled workspace package injection in pnpm-lock.yaml for improved dependency management. - Removed outdated SHA256 files for backend and frontend tarballs. - Refactored Dockerfile for BFF to streamline the build process and optimize production image size. - Updated Prisma client configuration to specify binary targets for Alpine compatibility. - Enhanced error handling in WhmcsLinkWorkflowService to use BadRequestException for clearer client feedback. - Adjusted entrypoint script to ensure proper database migration execution.
This commit is contained in:
parent
f4d4cb0ab0
commit
68561fdf1d
4
.npmrc
Normal file
4
.npmrc
Normal file
@ -0,0 +1,4 @@
|
||||
# pnpm configuration
|
||||
# Enable injected workspace packages for pnpm v10 deploy
|
||||
inject-workspace-packages=true
|
||||
|
||||
@ -1,145 +1,128 @@
|
||||
# 🚀 Backend (BFF) Dockerfile - Plesk Optimized
|
||||
# Multi-stage build for NestJS production deployment via Plesk
|
||||
# 🚀 Backend (BFF) Dockerfile - Production Grade (pnpm v10)
|
||||
# - Uses pnpm's injected workspace packages (no legacy flags)
|
||||
# - pnpm deploy creates minimal production-only install
|
||||
# - Prisma + bcrypt built only for Alpine
|
||||
# - No redundant installs
|
||||
|
||||
# =====================================================
|
||||
# Dependencies Stage - Install all dependencies
|
||||
# Stage 1: Dependencies (Debian for native builds)
|
||||
# =====================================================
|
||||
FROM node:22-bookworm-slim AS deps
|
||||
|
||||
# Install system dependencies for building
|
||||
RUN apt-get update && apt-get install -y dumb-init ca-certificates && rm -rf /var/lib/apt/lists/* \
|
||||
RUN apt-get update && apt-get install -y dumb-init ca-certificates \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& corepack enable && corepack prepare pnpm@10.15.0 --activate
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copy workspace configuration
|
||||
COPY pnpm-workspace.yaml package.json pnpm-lock.yaml ./
|
||||
|
||||
# Copy package.json files for dependency resolution
|
||||
COPY .npmrc pnpm-workspace.yaml package.json pnpm-lock.yaml ./
|
||||
COPY packages/domain/package.json ./packages/domain/
|
||||
COPY packages/logging/package.json ./packages/logging/
|
||||
COPY packages/validation/package.json ./packages/validation/
|
||||
COPY apps/bff/package.json ./apps/bff/
|
||||
|
||||
# Install ALL dependencies (needed for build)
|
||||
RUN pnpm install --frozen-lockfile --prefer-offline
|
||||
RUN pnpm install --frozen-lockfile --prefer-offline --config.ignore-scripts=false
|
||||
|
||||
# =====================================================
|
||||
# Builder Stage - Build the application
|
||||
# Stage 2: Builder (compile TypeScript)
|
||||
# =====================================================
|
||||
FROM node:22-bookworm-slim AS builder
|
||||
|
||||
# Install pnpm
|
||||
RUN apt-get update && apt-get install -y ca-certificates && rm -rf /var/lib/apt/lists/* \
|
||||
RUN apt-get update && apt-get install -y ca-certificates \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& corepack enable && corepack prepare pnpm@10.15.0 --activate
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copy workspace configuration
|
||||
COPY pnpm-workspace.yaml package.json pnpm-lock.yaml ./
|
||||
|
||||
# Copy source code
|
||||
COPY .npmrc pnpm-workspace.yaml package.json pnpm-lock.yaml tsconfig.json tsconfig.base.json ./
|
||||
COPY packages/ ./packages/
|
||||
COPY apps/bff/ ./apps/bff/
|
||||
COPY tsconfig.json tsconfig.base.json ./
|
||||
|
||||
# Copy node_modules from deps stage
|
||||
COPY --from=deps /app/node_modules ./node_modules
|
||||
|
||||
WORKDIR /app
|
||||
# Align workspace modules in builder (ensures proper symlinks and resolution)
|
||||
RUN pnpm install --frozen-lockfile --prefer-offline
|
||||
# Build workspace packages so downstream apps can consume compiled artifacts
|
||||
RUN pnpm --filter @customer-portal/domain build && \
|
||||
pnpm --filter @customer-portal/logging build && \
|
||||
pnpm --filter @customer-portal/validation build
|
||||
# Build BFF (generate Prisma client then compile)
|
||||
RUN pnpm --filter @customer-portal/bff exec prisma generate && \
|
||||
pnpm --filter @customer-portal/bff build
|
||||
# No second pnpm install – reuse deps layer
|
||||
|
||||
# Build shared packages
|
||||
RUN pnpm --filter @customer-portal/domain build \
|
||||
&& pnpm --filter @customer-portal/logging build \
|
||||
&& pnpm --filter @customer-portal/validation build
|
||||
|
||||
# Build BFF (prisma types generated in dev, not needed here)
|
||||
RUN pnpm --filter @customer-portal/bff build
|
||||
|
||||
# =====================================================
|
||||
# Production Stage - Final optimized image for Plesk
|
||||
# Stage 3: Production Dependencies (Alpine, pnpm deploy)
|
||||
# =====================================================
|
||||
FROM node:22-alpine AS production
|
||||
FROM node:22-alpine AS prod-deps
|
||||
|
||||
# Install runtime dependencies including dumb-init for proper signal handling
|
||||
RUN apk add --no-cache \
|
||||
wget \
|
||||
curl \
|
||||
dumb-init \
|
||||
# Toolchain for native rebuilds (bcrypt) and Prisma (openssl), and nc for wait-for
|
||||
python3 \
|
||||
make \
|
||||
g++ \
|
||||
pkgconfig \
|
||||
openssl \
|
||||
netcat-openbsd \
|
||||
&& rm -rf /var/cache/apk/*
|
||||
|
||||
# Install pnpm for production dependencies
|
||||
RUN corepack enable && corepack prepare pnpm@10.15.0 --activate
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copy workspace configuration
|
||||
COPY pnpm-workspace.yaml package.json pnpm-lock.yaml ./
|
||||
|
||||
# Copy package.json files for dependency resolution
|
||||
# Minimal manifests for dependency graph
|
||||
COPY .npmrc pnpm-workspace.yaml package.json pnpm-lock.yaml ./
|
||||
COPY packages/domain/package.json ./packages/domain/
|
||||
COPY packages/logging/package.json ./packages/logging/
|
||||
COPY packages/validation/package.json ./packages/validation/
|
||||
COPY apps/bff/package.json ./apps/bff/
|
||||
COPY apps/bff/prisma ./apps/bff/prisma
|
||||
|
||||
# Install production dependencies only (clean approach)
|
||||
ENV HUSKY=0
|
||||
RUN pnpm install --frozen-lockfile --prod --ignore-scripts
|
||||
|
||||
# Rebuild native modules for Alpine environment
|
||||
RUN pnpm rebuild bcrypt
|
||||
RUN apk add --no-cache --virtual .build-deps python3 make g++ pkgconfig openssl-dev \
|
||||
# 1) Install full deps (needed for prisma CLI + bcrypt build)
|
||||
&& pnpm install --frozen-lockfile --ignore-scripts \
|
||||
# 2) Rebuild bcrypt for musl
|
||||
&& pnpm rebuild bcrypt \
|
||||
# 3) Generate Prisma client for Alpine (musl) – the only runtime client
|
||||
&& cd apps/bff && pnpm exec prisma generate && cd ../.. \
|
||||
# 4) Create production-only deployment for BFF
|
||||
&& pnpm deploy --filter @customer-portal/bff --prod /app/deploy \
|
||||
# 5) Remove build-time node_modules and cleanup
|
||||
&& rm -rf /app/node_modules /app/pnpm-lock.yaml \
|
||||
/root/.cache /root/.npm /tmp/* /var/cache/apk/* \
|
||||
&& apk del .build-deps
|
||||
|
||||
# Copy built applications and shared package from builder
|
||||
COPY --from=builder /app/packages/domain/dist ./packages/domain/dist
|
||||
COPY --from=builder /app/packages/logging/dist ./packages/logging/dist
|
||||
COPY --from=builder /app/packages/validation/dist ./packages/validation/dist
|
||||
COPY --from=builder /app/apps/bff/dist ./apps/bff/dist
|
||||
COPY --from=builder /app/apps/bff/prisma ./apps/bff/prisma
|
||||
# /app/deploy now contains: package.json + node_modules for BFF prod deps only
|
||||
|
||||
# Generate Prisma client in production environment
|
||||
WORKDIR /app/apps/bff
|
||||
RUN pnpm dlx prisma@6.14.0 generate
|
||||
# =====================================================
|
||||
# Stage 4: Production Runtime (minimal)
|
||||
# =====================================================
|
||||
FROM node:22-alpine AS production
|
||||
|
||||
# Strip build toolchain to shrink image
|
||||
RUN apk del --no-cache python3 make g++ pkgconfig && rm -rf /root/.cache /var/cache/apk/*
|
||||
RUN addgroup --system --gid 1001 nodejs \
|
||||
&& adduser --system --uid 1001 nestjs
|
||||
|
||||
# Copy entrypoint script
|
||||
COPY apps/bff/scripts/docker-entrypoint.sh /app/docker-entrypoint.sh
|
||||
RUN chmod +x /app/docker-entrypoint.sh
|
||||
# Only tools needed at runtime
|
||||
RUN apk add --no-cache wget dumb-init openssl netcat-openbsd \
|
||||
&& rm -rf /var/cache/apk/*
|
||||
|
||||
# Create non-root user for security [[memory:6689308]]
|
||||
RUN addgroup --system --gid 1001 nodejs && \
|
||||
adduser --system --uid 1001 nestjs
|
||||
WORKDIR /app
|
||||
|
||||
# Create necessary directories and set permissions
|
||||
RUN mkdir -p /app/secrets /app/logs && \
|
||||
chown -R nestjs:nodejs /app
|
||||
# Deploy tree (prod deps for BFF only)
|
||||
COPY --from=prod-deps --chown=nestjs:nodejs /app/deploy ./
|
||||
|
||||
# Compiled code and prisma schema
|
||||
COPY --from=builder --chown=nestjs:nodejs /app/packages/domain/dist ./packages/domain/dist
|
||||
COPY --from=builder --chown=nestjs:nodejs /app/packages/logging/dist ./packages/logging/dist
|
||||
COPY --from=builder --chown=nestjs:nodejs /app/packages/validation/dist ./packages/validation/dist
|
||||
COPY --from=builder --chown=nestjs:nodejs /app/apps/bff/dist ./apps/bff/dist
|
||||
COPY --from=builder --chown=nestjs:nodejs /app/apps/bff/prisma ./apps/bff/prisma
|
||||
|
||||
# Entrypoint and runtime dirs
|
||||
COPY --chown=nestjs:nodejs apps/bff/scripts/docker-entrypoint.sh /app/docker-entrypoint.sh
|
||||
RUN chmod +x /app/docker-entrypoint.sh \
|
||||
&& mkdir -p /app/secrets /app/logs \
|
||||
&& chown nestjs:nodejs /app/secrets /app/logs
|
||||
|
||||
# Switch to non-root user
|
||||
USER nestjs
|
||||
|
||||
# Expose port
|
||||
EXPOSE 4000
|
||||
ENV NODE_ENV=production PORT=4000
|
||||
|
||||
# Environment variables
|
||||
ENV NODE_ENV=production
|
||||
ENV PORT=4000
|
||||
|
||||
# Set working directory for the app
|
||||
WORKDIR /app/apps/bff
|
||||
|
||||
# Health check for container monitoring
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \
|
||||
CMD wget --no-verbose --tries=1 --spider http://localhost:4000/health || exit 1
|
||||
|
||||
# Use dumb-init for proper signal handling, then entrypoint script
|
||||
ENTRYPOINT ["dumb-init", "--", "/app/docker-entrypoint.sh"]
|
||||
CMD ["node", "dist/main.js"]
|
||||
|
||||
@ -1,5 +1,9 @@
|
||||
generator client {
|
||||
provider = "prisma-client-js"
|
||||
// Only include engines we actually need:
|
||||
// - native: for local development
|
||||
// - linux-musl-openssl-3.0.x: for Alpine production
|
||||
binaryTargets = ["native", "linux-musl-openssl-3.0.x"]
|
||||
}
|
||||
|
||||
datasource db {
|
||||
|
||||
@ -24,7 +24,7 @@ fi
|
||||
# Run database migrations if enabled
|
||||
if [ "$RUN_MIGRATIONS" = "true" ] && [ -n "$DATABASE_URL" ]; then
|
||||
echo "🗄️ Running database migrations..."
|
||||
npx prisma migrate deploy --schema=/app/apps/bff/prisma/schema.prisma
|
||||
npx prisma@6.14.0 migrate deploy --schema=/app/apps/bff/prisma/schema.prisma
|
||||
echo "✅ Migrations complete"
|
||||
fi
|
||||
|
||||
|
||||
@ -1,4 +1,12 @@
|
||||
import "tsconfig-paths/register";
|
||||
// tsconfig-paths only needed in development - production builds resolve paths at compile time
|
||||
if (process.env.NODE_ENV !== "production") {
|
||||
try {
|
||||
require("tsconfig-paths/register");
|
||||
} catch {
|
||||
// Not available, paths already resolved
|
||||
}
|
||||
}
|
||||
|
||||
import { Logger, type INestApplication } from "@nestjs/common";
|
||||
|
||||
import { bootstrap } from "./app/bootstrap";
|
||||
|
||||
@ -59,13 +59,15 @@ export class WhmcsLinkWorkflowService {
|
||||
});
|
||||
|
||||
// Provide more specific error messages based on the error type
|
||||
// Use BadRequestException (400) instead of UnauthorizedException (401)
|
||||
// to avoid triggering "session expired" logic in the frontend
|
||||
if (error instanceof Error && error.message.includes("not found")) {
|
||||
throw new UnauthorizedException(
|
||||
throw new BadRequestException(
|
||||
"No billing account found with this email address. Please check your email or contact support."
|
||||
);
|
||||
}
|
||||
|
||||
throw new UnauthorizedException("Unable to verify account. Please try again later.");
|
||||
throw new BadRequestException("Unable to verify account. Please try again later.");
|
||||
}
|
||||
|
||||
const clientNumericId = clientDetails.id;
|
||||
@ -84,15 +86,17 @@ export class WhmcsLinkWorkflowService {
|
||||
const validateResult = await this.whmcsService.validateLogin(email, password);
|
||||
this.logger.debug("WHMCS validation successful");
|
||||
if (!validateResult || !validateResult.userId) {
|
||||
throw new UnauthorizedException("Invalid email or password. Please try again.");
|
||||
throw new BadRequestException("Invalid email or password. Please try again.");
|
||||
}
|
||||
} catch (error) {
|
||||
if (error instanceof UnauthorizedException) throw error;
|
||||
// Re-throw BadRequestException from the validation above
|
||||
if (error instanceof BadRequestException) throw error;
|
||||
|
||||
const errorMessage = getErrorMessage(error);
|
||||
this.logger.error("WHMCS credential validation failed", { error: errorMessage });
|
||||
|
||||
// Check if this is a WHMCS authentication error and provide user-friendly message
|
||||
// Use BadRequestException (400) to avoid triggering "session expired" in frontend
|
||||
const normalizedMessage = errorMessage.toLowerCase();
|
||||
const authErrorPhrases = [
|
||||
"email or password invalid",
|
||||
@ -101,13 +105,13 @@ export class WhmcsLinkWorkflowService {
|
||||
"login failed",
|
||||
];
|
||||
if (authErrorPhrases.some(phrase => normalizedMessage.includes(phrase))) {
|
||||
throw new UnauthorizedException(
|
||||
throw new BadRequestException(
|
||||
"Invalid email or password. Please check your credentials and try again."
|
||||
);
|
||||
}
|
||||
|
||||
// For other errors, provide generic message to avoid exposing system details
|
||||
throw new UnauthorizedException("Unable to verify credentials. Please try again later.");
|
||||
throw new BadRequestException("Unable to verify credentials. Please try again later.");
|
||||
}
|
||||
|
||||
const customerNumber =
|
||||
@ -190,13 +194,14 @@ export class WhmcsLinkWorkflowService {
|
||||
throw error;
|
||||
}
|
||||
|
||||
// Treat missing WHMCS mappings/records as an auth-style failure rather than a system error
|
||||
// Treat missing WHMCS mappings/records as a validation failure
|
||||
// Use BadRequestException (400) to avoid triggering "session expired" in frontend
|
||||
if (
|
||||
error instanceof NotFoundException ||
|
||||
/whmcs client mapping not found/i.test(message) ||
|
||||
/whmcs.*not found/i.test(message)
|
||||
) {
|
||||
throw new UnauthorizedException(
|
||||
throw new BadRequestException(
|
||||
"No billing account found with this email address. Please check your email or contact support."
|
||||
);
|
||||
}
|
||||
|
||||
@ -2,106 +2,93 @@
|
||||
# Multi-stage build for Next.js production deployment via Plesk
|
||||
|
||||
# =====================================================
|
||||
# Dependencies Stage - Install all dependencies
|
||||
# Stage 1: Dependencies - Install all dependencies
|
||||
# =====================================================
|
||||
FROM node:22-alpine AS deps
|
||||
|
||||
# Install system dependencies for building
|
||||
RUN apk add --no-cache libc6-compat dumb-init
|
||||
|
||||
# Install pnpm
|
||||
RUN corepack enable && corepack prepare pnpm@10.15.0 --activate
|
||||
RUN apk add --no-cache libc6-compat dumb-init \
|
||||
&& corepack enable && corepack prepare pnpm@10.15.0 --activate
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copy workspace configuration
|
||||
COPY pnpm-workspace.yaml package.json pnpm-lock.yaml ./
|
||||
|
||||
# Copy package.json files for dependency resolution
|
||||
COPY .npmrc pnpm-workspace.yaml package.json pnpm-lock.yaml ./
|
||||
COPY packages/domain/package.json ./packages/domain/
|
||||
COPY packages/validation/package.json ./packages/validation/
|
||||
COPY apps/portal/package.json ./apps/portal/
|
||||
|
||||
# Install dependencies with frozen lockfile
|
||||
RUN pnpm install --frozen-lockfile --prefer-offline
|
||||
# Install all dependencies with scripts enabled (esbuild, sharp, etc.)
|
||||
RUN pnpm install --frozen-lockfile --prefer-offline --config.ignore-scripts=false
|
||||
|
||||
# =====================================================
|
||||
# Builder Stage - Build the application
|
||||
# Stage 2: Builder - Compile and build Next.js
|
||||
# =====================================================
|
||||
FROM node:22-alpine AS builder
|
||||
|
||||
# Install pnpm
|
||||
RUN corepack enable && corepack prepare pnpm@10.15.0 --activate
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copy workspace configuration
|
||||
COPY pnpm-workspace.yaml package.json pnpm-lock.yaml ./
|
||||
|
||||
# Copy source code
|
||||
# Copy workspace configuration and source
|
||||
COPY .npmrc pnpm-workspace.yaml package.json pnpm-lock.yaml tsconfig.json tsconfig.base.json ./
|
||||
COPY packages/ ./packages/
|
||||
COPY apps/portal/ ./apps/portal/
|
||||
COPY tsconfig.json tsconfig.base.json ./
|
||||
|
||||
# Ensure public directory exists even if the repo doesn't have one
|
||||
# Ensure public directory exists
|
||||
RUN mkdir -p /app/apps/portal/public
|
||||
|
||||
# Copy node_modules from deps stage
|
||||
# Copy pre-installed node_modules from deps
|
||||
COPY --from=deps /app/node_modules ./node_modules
|
||||
|
||||
# Build shared workspace packages first
|
||||
# Build shared packages
|
||||
RUN pnpm --filter @customer-portal/domain build && \
|
||||
pnpm --filter @customer-portal/validation build
|
||||
|
||||
# Build portal with standalone output
|
||||
# Build-time environment variables (baked into Next.js client bundle)
|
||||
ARG NEXT_PUBLIC_API_BASE=/api
|
||||
ARG NEXT_PUBLIC_APP_NAME="Customer Portal"
|
||||
ARG NEXT_PUBLIC_APP_VERSION=1.0.0
|
||||
|
||||
ENV NODE_ENV=production \
|
||||
NEXT_PUBLIC_API_BASE=${NEXT_PUBLIC_API_BASE} \
|
||||
NEXT_PUBLIC_APP_NAME=${NEXT_PUBLIC_APP_NAME} \
|
||||
NEXT_PUBLIC_APP_VERSION=${NEXT_PUBLIC_APP_VERSION}
|
||||
|
||||
WORKDIR /app/apps/portal
|
||||
ENV NODE_ENV=production
|
||||
RUN pnpm build
|
||||
|
||||
# =====================================================
|
||||
# Production Stage - Final optimized image for Plesk
|
||||
# Stage 3: Production - Minimal Alpine runtime image
|
||||
# =====================================================
|
||||
FROM node:22-alpine AS production
|
||||
|
||||
# Install runtime dependencies including dumb-init for proper signal handling
|
||||
RUN apk add --no-cache \
|
||||
wget \
|
||||
curl \
|
||||
dumb-init \
|
||||
libc6-compat \
|
||||
RUN apk add --no-cache wget curl dumb-init libc6-compat \
|
||||
&& rm -rf /var/cache/apk/*
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Create non-root user for security [[memory:6689308]]
|
||||
# Create non-root user
|
||||
RUN addgroup --system --gid 1001 nodejs && \
|
||||
adduser --system --uid 1001 nextjs
|
||||
|
||||
# Copy the Next.js standalone build with proper ownership
|
||||
# Copy Next.js standalone build
|
||||
COPY --from=builder --chown=nextjs:nodejs /app/apps/portal/.next/standalone ./
|
||||
COPY --from=builder --chown=nextjs:nodejs /app/apps/portal/.next/static ./apps/portal/.next/static
|
||||
COPY --from=builder --chown=nextjs:nodejs /app/apps/portal/public ./apps/portal/public
|
||||
|
||||
# Create necessary directories and set permissions
|
||||
RUN mkdir -p /app/logs && \
|
||||
chown -R nextjs:nodejs /app
|
||||
RUN mkdir -p /app/logs && chown -R nextjs:nodejs /app
|
||||
|
||||
# Switch to non-root user
|
||||
USER nextjs
|
||||
|
||||
# Expose port (required for Plesk port mapping)
|
||||
EXPOSE 3000
|
||||
|
||||
# Environment variables
|
||||
ENV NODE_ENV=production
|
||||
ENV NEXT_TELEMETRY_DISABLED=1
|
||||
ENV PORT=3000
|
||||
ENV HOSTNAME="0.0.0.0"
|
||||
ENV NODE_ENV=production \
|
||||
NEXT_TELEMETRY_DISABLED=1 \
|
||||
PORT=3000 \
|
||||
HOSTNAME="0.0.0.0"
|
||||
|
||||
# Health check for container monitoring
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=40s --retries=3 \
|
||||
CMD wget --no-verbose --tries=1 --spider http://localhost:3000/api/health || exit 1
|
||||
|
||||
# Use dumb-init for proper signal handling in containers
|
||||
ENTRYPOINT ["dumb-init", "--"]
|
||||
CMD ["node", "apps/portal/server.js"]
|
||||
|
||||
@ -88,8 +88,9 @@ const nextConfig = {
|
||||
"style-src 'self' 'unsafe-inline'",
|
||||
"img-src 'self' data: https:",
|
||||
"font-src 'self' data:",
|
||||
// Allow API connections
|
||||
"connect-src 'self' https:",
|
||||
// Allow API connections (include localhost for development)
|
||||
// Allow localhost in development for API calls to BFF
|
||||
`connect-src 'self' https: ${process.env.NODE_ENV === "development" ? "http://localhost:*" : ""}`,
|
||||
"frame-ancestors 'none'",
|
||||
].join("; "),
|
||||
},
|
||||
|
||||
@ -54,68 +54,35 @@ export interface ApiClient {
|
||||
DELETE: ApiMethod;
|
||||
}
|
||||
|
||||
type EnvKey =
|
||||
| "NEXT_PUBLIC_API_BASE"
|
||||
| "NEXT_PUBLIC_API_URL"
|
||||
| "API_BASE_URL"
|
||||
| "API_BASE"
|
||||
| "API_URL";
|
||||
/**
|
||||
* Resolve API base URL:
|
||||
* - Production (browser): Use same origin (nginx proxies /api to backend)
|
||||
* - Development: Use localhost:4000 (direct to BFF)
|
||||
* - SSR: Use NEXT_PUBLIC_API_BASE env var or localhost:4000
|
||||
*/
|
||||
export const resolveBaseUrl = (explicitBase?: string): string => {
|
||||
// 1. Explicit base URL provided
|
||||
if (explicitBase?.trim()) {
|
||||
return explicitBase.replace(/\/+$/, "");
|
||||
}
|
||||
|
||||
const BASE_URL_ENV_KEYS: readonly EnvKey[] = [
|
||||
"NEXT_PUBLIC_API_BASE",
|
||||
"NEXT_PUBLIC_API_URL",
|
||||
"API_BASE_URL",
|
||||
"API_BASE",
|
||||
"API_URL",
|
||||
];
|
||||
|
||||
const DEFAULT_BASE_URL = "http://localhost:4000";
|
||||
|
||||
const resolveSameOriginBase = () => {
|
||||
// 2. Browser: use same origin (nginx proxies /api/* to backend)
|
||||
if (typeof window !== "undefined" && window.location?.origin) {
|
||||
return window.location.origin;
|
||||
}
|
||||
|
||||
const globalLocation = (globalThis as { location?: { origin?: string } } | undefined)?.location;
|
||||
if (globalLocation?.origin) {
|
||||
return globalLocation.origin;
|
||||
}
|
||||
|
||||
return DEFAULT_BASE_URL;
|
||||
};
|
||||
|
||||
const normalizeBaseUrl = (value: string) => {
|
||||
const trimmed = value.trim();
|
||||
if (!trimmed) {
|
||||
return DEFAULT_BASE_URL;
|
||||
}
|
||||
|
||||
if (trimmed === "/") {
|
||||
return resolveSameOriginBase();
|
||||
}
|
||||
|
||||
return trimmed.replace(/\/+$/, "");
|
||||
};
|
||||
|
||||
const resolveBaseUrlFromEnv = () => {
|
||||
if (typeof process !== "undefined" && process.env) {
|
||||
for (const key of BASE_URL_ENV_KEYS) {
|
||||
const envValue = process.env[key];
|
||||
if (typeof envValue === "string" && envValue.trim()) {
|
||||
return normalizeBaseUrl(envValue);
|
||||
}
|
||||
// 3. Server-side or build time: check env vars
|
||||
const envBase = process.env.NEXT_PUBLIC_API_BASE;
|
||||
if (envBase?.trim()) {
|
||||
// If relative path like "/api", we can't use it server-side without origin
|
||||
// Just return it - will work in browser after hydration
|
||||
if (envBase.startsWith("http")) {
|
||||
return envBase.replace(/\/+$/, "");
|
||||
}
|
||||
}
|
||||
|
||||
return DEFAULT_BASE_URL;
|
||||
};
|
||||
|
||||
export const resolveBaseUrl = (baseUrl?: string) => {
|
||||
if (typeof baseUrl === "string" && baseUrl.trim()) {
|
||||
return normalizeBaseUrl(baseUrl);
|
||||
}
|
||||
|
||||
return resolveBaseUrlFromEnv();
|
||||
// 4. Fallback for development
|
||||
return "http://localhost:4000";
|
||||
};
|
||||
|
||||
const applyPathParams = (path: string, params?: PathParams): string => {
|
||||
|
||||
1
pnpm-lock.yaml
generated
1
pnpm-lock.yaml
generated
@ -3,6 +3,7 @@ lockfileVersion: '9.0'
|
||||
settings:
|
||||
autoInstallPeers: true
|
||||
excludeLinksFromLockfile: false
|
||||
injectWorkspacePackages: true
|
||||
|
||||
importers:
|
||||
|
||||
|
||||
@ -1 +0,0 @@
|
||||
d56f8408ed1de76e225abd6a8ddb741c32f96102f03b0caf8fef089a30de317b /home/barsa/projects/customer_portal/customer-portal/portal-backend.20251201-1dafa73.tar
|
||||
1
portal-backend.latest.tar.gz.sha256
Normal file
1
portal-backend.latest.tar.gz.sha256
Normal file
@ -0,0 +1 @@
|
||||
735d984b4fc0c5de1404ee95991e6a0ab627e815a46fbb2e3002240a551146a2 /home/barsa/projects/customer_portal/customer-portal/portal-backend.latest.tar.gz
|
||||
1
portal-backend.latest.tar.sha256
Normal file
1
portal-backend.latest.tar.sha256
Normal file
@ -0,0 +1 @@
|
||||
de99755961ca5a0d2b8713b1a57b6d818cb860d0eb87387c4ff508882d2f6984 /home/barsa/projects/customer_portal/customer-portal/portal-backend.latest.tar
|
||||
@ -1 +0,0 @@
|
||||
4510c9159622868d3cbbf8212274e08bb374e541876406ba7d0f2d7d4d93983a /home/barsa/projects/customer_portal/customer-portal/portal-frontend.20251201-1dafa73.tar
|
||||
1
portal-frontend.latest.tar.gz.sha256
Normal file
1
portal-frontend.latest.tar.gz.sha256
Normal file
@ -0,0 +1 @@
|
||||
2d1c7887410361baefcc3f2038dce9079ca6fa19d5afa29e8281c99a40d020c7 /home/barsa/projects/customer_portal/customer-portal/portal-frontend.latest.tar.gz
|
||||
1
portal-frontend.latest.tar.sha256
Normal file
1
portal-frontend.latest.tar.sha256
Normal file
@ -0,0 +1 @@
|
||||
ea3c21988f94a9f8755e1024d45187afad435df399c79c17934e701ca7c4ad9b /home/barsa/projects/customer_portal/customer-portal/portal-frontend.latest.tar
|
||||
@ -1,160 +1,232 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# 🐳 Build production images for Plesk and save as .tar
|
||||
# - Builds apps/portal (frontend) and apps/bff (backend)
|
||||
# - Tags both with :latest and optional version/sha tag
|
||||
# - Saves tarballs in project root for easy Plesk upload
|
||||
# 🐳 Build production Docker images for Plesk deployment
|
||||
# Features: Parallel builds, BuildKit, compressed tarballs
|
||||
|
||||
set -Eeuo pipefail
|
||||
IFS=$'\n\t'
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
||||
|
||||
IMAGE_FRONTEND_NAME="${IMAGE_FRONTEND_NAME:-portal-frontend}"
|
||||
IMAGE_BACKEND_NAME="${IMAGE_BACKEND_NAME:-portal-backend}"
|
||||
|
||||
# Optional explicit tag via env or flag; defaults to git short sha + date
|
||||
# Configuration (override via env vars or flags)
|
||||
IMAGE_FRONTEND="${IMAGE_FRONTEND_NAME:-portal-frontend}"
|
||||
IMAGE_BACKEND="${IMAGE_BACKEND_NAME:-portal-backend}"
|
||||
IMAGE_TAG="${IMAGE_TAG:-}"
|
||||
OUTPUT_DIR="${OUTPUT_DIR:-$PROJECT_ROOT}"
|
||||
PUSH_REMOTE="${PUSH_REMOTE:-}" # e.g. ghcr.io/<org>
|
||||
PUSH_REMOTE="${PUSH_REMOTE:-}"
|
||||
PARALLEL="${PARALLEL_BUILD:-1}"
|
||||
COMPRESS="${COMPRESS:-1}"
|
||||
USE_LATEST_FILENAME="${USE_LATEST_FILENAME:-1}" # Default: save as .latest.tar.gz
|
||||
SAVE_TARS=1
|
||||
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
RED='\033[0;31m'
|
||||
NC='\033[0m'
|
||||
|
||||
log() { echo -e "${GREEN}[PLESK-BUILD] $*${NC}"; }
|
||||
warn() { echo -e "${YELLOW}[PLESK-BUILD] $*${NC}"; }
|
||||
fail() { echo -e "${RED}[PLESK-BUILD] ERROR: $*${NC}"; exit 1; }
|
||||
# Colors
|
||||
G='\033[0;32m' Y='\033[1;33m' R='\033[0;31m' B='\033[0;34m' N='\033[0m'
|
||||
log() { echo -e "${G}[BUILD]${N} $*"; }
|
||||
info() { echo -e "${B}[BUILD]${N} $*"; }
|
||||
warn() { echo -e "${Y}[BUILD]${N} $*"; }
|
||||
fail() { echo -e "${R}[BUILD] ERROR:${N} $*"; exit 1; }
|
||||
|
||||
usage() {
|
||||
cat <<EOF
|
||||
Build production Docker images and save tarballs for Plesk.
|
||||
Build Docker images and save tarballs for Plesk.
|
||||
|
||||
Usage: $0 [--tag <tag>] [--output <dir>] [--push <registry>] [--no-save]
|
||||
Usage: $0 [OPTIONS]
|
||||
|
||||
Options:
|
||||
--tag <tag> Tag to add in addition to 'latest' (e.g. v1.2.3 or abc123)
|
||||
--output <dir> Directory to write tar files (default: project root)
|
||||
--push <registry> Also tag and push to registry (e.g. ghcr.io/org or docker.io/user)
|
||||
--no-save Build and tag images but do not write tar files
|
||||
|
||||
Env vars:
|
||||
IMAGE_FRONTEND_NAME, IMAGE_BACKEND_NAME, IMAGE_TAG, OUTPUT_DIR, PUSH_REMOTE
|
||||
--tag <tag> Version tag for image (default: YYYYMMDD-gitsha)
|
||||
--output <dir> Output directory (default: project root)
|
||||
--push <registry> Push to registry after build
|
||||
--no-save Build only, no tar files
|
||||
--no-compress Save as .tar instead of .tar.gz
|
||||
--versioned Name files with version tag (default: .latest.tar.gz)
|
||||
--sequential Build one at a time (default: parallel)
|
||||
-h, --help Show this help
|
||||
|
||||
Examples:
|
||||
$0 --tag $(date +%Y%m%d)-$(git -C "$PROJECT_ROOT" rev-parse --short HEAD)
|
||||
PUSH_REMOTE=ghcr.io/acme $0 --tag v1.0.0
|
||||
$0 # Output: portal-frontend.latest.tar.gz (default)
|
||||
$0 --versioned # Output: portal-frontend.20251201-abc123.tar.gz
|
||||
$0 --tag v1.2.3 --versioned # Output: portal-frontend.v1.2.3.tar.gz
|
||||
$0 --sequential --no-save # Debug build
|
||||
EOF
|
||||
exit 0
|
||||
}
|
||||
|
||||
SAVE_TARS=1
|
||||
# Parse arguments
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--tag)
|
||||
IMAGE_TAG="${2:-}"; shift 2 ;;
|
||||
--output)
|
||||
OUTPUT_DIR="${2:-}"; shift 2 ;;
|
||||
--push)
|
||||
PUSH_REMOTE="${2:-}"; shift 2 ;;
|
||||
--no-save)
|
||||
SAVE_TARS=0; shift ;;
|
||||
-h|--help)
|
||||
usage; exit 0 ;;
|
||||
*)
|
||||
warn "Unknown option: $1"; usage; exit 1 ;;
|
||||
--tag) IMAGE_TAG="${2:-}"; shift 2 ;;
|
||||
--output) OUTPUT_DIR="${2:-}"; shift 2 ;;
|
||||
--push) PUSH_REMOTE="${2:-}"; shift 2 ;;
|
||||
--no-save) SAVE_TARS=0; shift ;;
|
||||
--no-compress) COMPRESS=0; shift ;;
|
||||
--versioned) USE_LATEST_FILENAME=0; shift ;;
|
||||
--sequential) PARALLEL=0; shift ;;
|
||||
-h|--help) usage ;;
|
||||
*) fail "Unknown option: $1" ;;
|
||||
esac
|
||||
done
|
||||
|
||||
command -v docker >/dev/null 2>&1 || fail "Docker is required."
|
||||
|
||||
# Validation
|
||||
command -v docker >/dev/null 2>&1 || fail "Docker required"
|
||||
cd "$PROJECT_ROOT"
|
||||
|
||||
[[ -f apps/portal/Dockerfile ]] || fail "Missing apps/portal/Dockerfile"
|
||||
[[ -f apps/bff/Dockerfile ]] || fail "Missing apps/bff/Dockerfile"
|
||||
|
||||
if [[ -z "${IMAGE_TAG}" ]]; then
|
||||
if git -C "$PROJECT_ROOT" rev-parse --short HEAD >/dev/null 2>&1; then
|
||||
GIT_SHA="$(git -C "$PROJECT_ROOT" rev-parse --short HEAD)"
|
||||
IMAGE_TAG="$(date +%Y%m%d)-$GIT_SHA"
|
||||
# Auto-generate tag if not provided
|
||||
[[ -z "$IMAGE_TAG" ]] && IMAGE_TAG="$(date +%Y%m%d)-$(git rev-parse --short HEAD 2>/dev/null || echo 'local')"
|
||||
|
||||
# Enable BuildKit
|
||||
export DOCKER_BUILDKIT=1
|
||||
|
||||
# Build args
|
||||
NEXT_PUBLIC_API_BASE="${NEXT_PUBLIC_API_BASE:-/api}"
|
||||
NEXT_PUBLIC_APP_NAME="${NEXT_PUBLIC_APP_NAME:-Customer Portal}"
|
||||
GIT_SOURCE="$(git config --get remote.origin.url 2>/dev/null || echo unknown)"
|
||||
|
||||
log "🏷️ Tag: ${IMAGE_TAG}"
|
||||
|
||||
LOG_DIR="${OUTPUT_DIR}/.build-logs"
|
||||
mkdir -p "$LOG_DIR"
|
||||
|
||||
build_frontend() {
|
||||
local logfile="$LOG_DIR/frontend.log"
|
||||
docker build -f apps/portal/Dockerfile \
|
||||
--build-arg "NEXT_PUBLIC_API_BASE=${NEXT_PUBLIC_API_BASE}" \
|
||||
--build-arg "NEXT_PUBLIC_APP_NAME=${NEXT_PUBLIC_APP_NAME}" \
|
||||
--build-arg "NEXT_PUBLIC_APP_VERSION=${IMAGE_TAG}" \
|
||||
-t "${IMAGE_FRONTEND}:latest" -t "${IMAGE_FRONTEND}:${IMAGE_TAG}" \
|
||||
--label "org.opencontainers.image.version=${IMAGE_TAG}" \
|
||||
--label "org.opencontainers.image.source=${GIT_SOURCE}" \
|
||||
. > "$logfile" 2>&1
|
||||
local exit_code=$?
|
||||
if [[ $exit_code -eq 0 ]]; then
|
||||
log "✅ Frontend done ($(tail -1 "$logfile" | grep -oP 'DONE \K[0-9.]+s' || echo 'complete'))"
|
||||
else
|
||||
IMAGE_TAG="$(date +%Y%m%d)"
|
||||
warn "❌ Frontend FAILED - see $logfile"
|
||||
tail -20 "$logfile"
|
||||
fi
|
||||
return $exit_code
|
||||
}
|
||||
|
||||
build_backend() {
|
||||
local logfile="$LOG_DIR/backend.log"
|
||||
docker build -f apps/bff/Dockerfile \
|
||||
-t "${IMAGE_BACKEND}:latest" -t "${IMAGE_BACKEND}:${IMAGE_TAG}" \
|
||||
--label "org.opencontainers.image.version=${IMAGE_TAG}" \
|
||||
--label "org.opencontainers.image.source=${GIT_SOURCE}" \
|
||||
. > "$logfile" 2>&1
|
||||
local exit_code=$?
|
||||
if [[ $exit_code -eq 0 ]]; then
|
||||
log "✅ Backend done ($(tail -1 "$logfile" | grep -oP 'DONE \K[0-9.]+s' || echo 'complete'))"
|
||||
else
|
||||
warn "❌ Backend FAILED - see $logfile"
|
||||
tail -20 "$logfile"
|
||||
fi
|
||||
return $exit_code
|
||||
}
|
||||
|
||||
# Build images
|
||||
START=$(date +%s)
|
||||
|
||||
if [[ "$PARALLEL" -eq 1 ]]; then
|
||||
log "🚀 Parallel build (logs: $LOG_DIR/)"
|
||||
log "🔨 Building frontend..."
|
||||
log "🔨 Building backend..."
|
||||
|
||||
build_frontend & FE_PID=$!
|
||||
build_backend & BE_PID=$!
|
||||
|
||||
# Show progress dots while waiting
|
||||
while kill -0 $FE_PID 2>/dev/null || kill -0 $BE_PID 2>/dev/null; do
|
||||
printf "."
|
||||
sleep 5
|
||||
done
|
||||
echo ""
|
||||
|
||||
# Check results
|
||||
wait $FE_PID || fail "Frontend build failed - check $LOG_DIR/frontend.log"
|
||||
wait $BE_PID || fail "Backend build failed - check $LOG_DIR/backend.log"
|
||||
else
|
||||
log "🔧 Sequential build..."
|
||||
log "🔨 Building frontend..."
|
||||
build_frontend || fail "Frontend build failed"
|
||||
log "🔨 Building backend..."
|
||||
build_backend || fail "Backend build failed"
|
||||
fi
|
||||
|
||||
log "🔨 Building frontend image (${IMAGE_FRONTEND_NAME}:latest, ${IMAGE_FRONTEND_NAME}:${IMAGE_TAG})"
|
||||
docker build \
|
||||
--file apps/portal/Dockerfile \
|
||||
--tag "${IMAGE_FRONTEND_NAME}:latest" \
|
||||
--tag "${IMAGE_FRONTEND_NAME}:${IMAGE_TAG}" \
|
||||
--label "org.opencontainers.image.title=Customer Portal Frontend" \
|
||||
--label "org.opencontainers.image.version=${IMAGE_TAG}" \
|
||||
--label "org.opencontainers.image.source=$(git -C "$PROJECT_ROOT" config --get remote.origin.url 2>/dev/null || echo unknown)" \
|
||||
.
|
||||
BUILD_TIME=$(($(date +%s) - START))
|
||||
log "⏱️ Built in ${BUILD_TIME}s"
|
||||
|
||||
log "🔨 Building backend image (${IMAGE_BACKEND_NAME}:latest, ${IMAGE_BACKEND_NAME}:${IMAGE_TAG})"
|
||||
docker build \
|
||||
--file apps/bff/Dockerfile \
|
||||
--tag "${IMAGE_BACKEND_NAME}:latest" \
|
||||
--tag "${IMAGE_BACKEND_NAME}:${IMAGE_TAG}" \
|
||||
--label "org.opencontainers.image.title=Customer Portal Backend" \
|
||||
--label "org.opencontainers.image.version=${IMAGE_TAG}" \
|
||||
--label "org.opencontainers.image.source=$(git -C "$PROJECT_ROOT" config --get remote.origin.url 2>/dev/null || echo unknown)" \
|
||||
.
|
||||
|
||||
if [[ "${SAVE_TARS}" -eq 1 ]]; then
|
||||
# Save tarballs
|
||||
if [[ "$SAVE_TARS" -eq 1 ]]; then
|
||||
mkdir -p "$OUTPUT_DIR"
|
||||
FRONT_TAR_LATEST="$OUTPUT_DIR/${IMAGE_FRONTEND_NAME}.latest.tar"
|
||||
BACK_TAR_LATEST="$OUTPUT_DIR/${IMAGE_BACKEND_NAME}.latest.tar"
|
||||
FRONT_TAR_TAGGED="$OUTPUT_DIR/${IMAGE_FRONTEND_NAME}.${IMAGE_TAG}.tar"
|
||||
BACK_TAR_TAGGED="$OUTPUT_DIR/${IMAGE_BACKEND_NAME}.${IMAGE_TAG}.tar"
|
||||
SAVE_START=$(date +%s)
|
||||
|
||||
log "💾 Saving tarballs to $OUTPUT_DIR ..."
|
||||
docker save -o "$FRONT_TAR_LATEST" "${IMAGE_FRONTEND_NAME}:latest"
|
||||
docker save -o "$BACK_TAR_LATEST" "${IMAGE_BACKEND_NAME}:latest"
|
||||
docker save -o "$FRONT_TAR_TAGGED" "${IMAGE_FRONTEND_NAME}:${IMAGE_TAG}"
|
||||
docker save -o "$BACK_TAR_TAGGED" "${IMAGE_BACKEND_NAME}:${IMAGE_TAG}"
|
||||
# Determine filename suffix
|
||||
if [[ "$USE_LATEST_FILENAME" -eq 1 ]]; then
|
||||
FILE_TAG="latest"
|
||||
else
|
||||
FILE_TAG="$IMAGE_TAG"
|
||||
fi
|
||||
|
||||
log "🔐 Generating checksums for integrity verification..."
|
||||
sha256sum "$FRONT_TAR_TAGGED" > "${FRONT_TAR_TAGGED}.sha256"
|
||||
sha256sum "$BACK_TAR_TAGGED" > "${BACK_TAR_TAGGED}.sha256"
|
||||
if [[ "$COMPRESS" -eq 1 ]]; then
|
||||
# Pick fastest available compressor: pigz (parallel) > gzip
|
||||
if command -v pigz >/dev/null 2>&1; then
|
||||
COMPRESSOR="pigz -p $(nproc)" # Use all CPU cores
|
||||
COMP_NAME="pigz"
|
||||
else
|
||||
COMPRESSOR="gzip -1" # Fast mode if no pigz
|
||||
COMP_NAME="gzip"
|
||||
fi
|
||||
|
||||
log "✅ Wrote:"
|
||||
echo " - $FRONT_TAR_LATEST"
|
||||
echo " - $BACK_TAR_LATEST"
|
||||
echo " - $FRONT_TAR_TAGGED"
|
||||
echo " - $BACK_TAR_TAGGED"
|
||||
echo " - ${FRONT_TAR_TAGGED}.sha256"
|
||||
echo " - ${BACK_TAR_TAGGED}.sha256"
|
||||
FE_TAR="$OUTPUT_DIR/${IMAGE_FRONTEND}.${FILE_TAG}.tar.gz"
|
||||
BE_TAR="$OUTPUT_DIR/${IMAGE_BACKEND}.${FILE_TAG}.tar.gz"
|
||||
log "💾 Compressing with $COMP_NAME..."
|
||||
|
||||
(docker save "${IMAGE_FRONTEND}:latest" | $COMPRESSOR > "$FE_TAR") &
|
||||
(docker save "${IMAGE_BACKEND}:latest" | $COMPRESSOR > "$BE_TAR") &
|
||||
wait
|
||||
else
|
||||
FE_TAR="$OUTPUT_DIR/${IMAGE_FRONTEND}.${FILE_TAG}.tar"
|
||||
BE_TAR="$OUTPUT_DIR/${IMAGE_BACKEND}.${FILE_TAG}.tar"
|
||||
log "💾 Saving uncompressed tarballs..."
|
||||
docker save -o "$FE_TAR" "${IMAGE_FRONTEND}:latest" &
|
||||
docker save -o "$BE_TAR" "${IMAGE_BACKEND}:latest" &
|
||||
wait
|
||||
fi
|
||||
|
||||
SAVE_TIME=$(($(date +%s) - SAVE_START))
|
||||
sha256sum "$FE_TAR" > "${FE_TAR}.sha256"
|
||||
sha256sum "$BE_TAR" > "${BE_TAR}.sha256"
|
||||
|
||||
log "✅ Saved in ${SAVE_TIME}s:"
|
||||
printf " %-50s %s\n" "$FE_TAR" "$(du -h "$FE_TAR" | cut -f1)"
|
||||
printf " %-50s %s\n" "$BE_TAR" "$(du -h "$BE_TAR" | cut -f1)"
|
||||
fi
|
||||
|
||||
if [[ -n "${PUSH_REMOTE}" ]]; then
|
||||
FE_REMOTE_LATEST="${PUSH_REMOTE%/}/${IMAGE_FRONTEND_NAME}:latest"
|
||||
FE_REMOTE_TAGGED="${PUSH_REMOTE%/}/${IMAGE_FRONTEND_NAME}:${IMAGE_TAG}"
|
||||
BE_REMOTE_LATEST="${PUSH_REMOTE%/}/${IMAGE_BACKEND_NAME}:latest"
|
||||
BE_REMOTE_TAGGED="${PUSH_REMOTE%/}/${IMAGE_BACKEND_NAME}:${IMAGE_TAG}"
|
||||
|
||||
log "📤 Tagging for remote: ${PUSH_REMOTE}"
|
||||
docker tag "${IMAGE_FRONTEND_NAME}:latest" "$FE_REMOTE_LATEST"
|
||||
docker tag "${IMAGE_FRONTEND_NAME}:${IMAGE_TAG}" "$FE_REMOTE_TAGGED"
|
||||
docker tag "${IMAGE_BACKEND_NAME}:latest" "$BE_REMOTE_LATEST"
|
||||
docker tag "${IMAGE_BACKEND_NAME}:${IMAGE_TAG}" "$BE_REMOTE_TAGGED"
|
||||
|
||||
log "🚀 Pushing to remote registry (ensure you are logged in)"
|
||||
docker push "$FE_REMOTE_LATEST"
|
||||
docker push "$FE_REMOTE_TAGGED"
|
||||
docker push "$BE_REMOTE_LATEST"
|
||||
docker push "$BE_REMOTE_TAGGED"
|
||||
# Push to registry
|
||||
if [[ -n "$PUSH_REMOTE" ]]; then
|
||||
log "📤 Pushing to ${PUSH_REMOTE}..."
|
||||
for img in "${IMAGE_FRONTEND}" "${IMAGE_BACKEND}"; do
|
||||
for tag in "latest" "${IMAGE_TAG}"; do
|
||||
docker tag "${img}:${tag}" "${PUSH_REMOTE}/${img}:${tag}"
|
||||
docker push "${PUSH_REMOTE}/${img}:${tag}" &
|
||||
done
|
||||
done
|
||||
wait
|
||||
log "✅ Pushed"
|
||||
fi
|
||||
|
||||
log "🎉 Done!"
|
||||
log ""
|
||||
log "Next steps:"
|
||||
log " 1. Upload .tar files to your Plesk server"
|
||||
log " 2. Load images: docker load -i portal-frontend.${IMAGE_TAG}.tar"
|
||||
log " 3. Verify checksums: sha256sum -c portal-frontend.${IMAGE_TAG}.tar.sha256"
|
||||
log " 4. Update Portainer stack with new image tag: ${IMAGE_TAG}"
|
||||
log ""
|
||||
log "See docker/portainer/PORTAINER-GUIDE.md for detailed instructions."
|
||||
|
||||
TOTAL_TIME=$(($(date +%s) - START))
|
||||
log "🎉 Complete in ${TOTAL_TIME}s"
|
||||
echo ""
|
||||
info "Next: Upload to Plesk, then:"
|
||||
if [[ "$COMPRESS" -eq 1 ]]; then
|
||||
echo " gunzip -c ${IMAGE_FRONTEND}.${FILE_TAG}.tar.gz | docker load"
|
||||
echo " gunzip -c ${IMAGE_BACKEND}.${FILE_TAG}.tar.gz | docker load"
|
||||
else
|
||||
echo " docker load -i ${IMAGE_FRONTEND}.${FILE_TAG}.tar"
|
||||
echo " docker load -i ${IMAGE_BACKEND}.${FILE_TAG}.tar"
|
||||
fi
|
||||
if [[ "$USE_LATEST_FILENAME" -eq 0 ]]; then
|
||||
echo " Update Portainer with tag: ${IMAGE_TAG}"
|
||||
fi
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user