Dockerfile best practices, multi-stage builds, and Docker Compose
# Docker Containers for Development with Google Antigravity
Master Docker containerization in your Google Antigravity projects for consistent development environments and production-ready deployments. This guide covers Dockerfile optimization, multi-stage builds, and Docker Compose orchestration.
## Optimized Dockerfile for Next.js
Create a production-ready multi-stage Dockerfile:
```dockerfile
# Dockerfile
# Stage 1: Dependencies
FROM node:20-alpine AS deps
RUN apk add --no-cache libc6-compat
WORKDIR /app
# Install dependencies based on the preferred package manager
COPY package.json yarn.lock* package-lock.json* pnpm-lock.yaml* ./
RUN \
if [ -f yarn.lock ]; then yarn --frozen-lockfile; \
elif [ -f package-lock.json ]; then npm ci; \
elif [ -f pnpm-lock.yaml ]; then corepack enable pnpm && pnpm i --frozen-lockfile; \
else echo "Lockfile not found." && exit 1; \
fi
# Stage 2: Builder
FROM node:20-alpine AS builder
WORKDIR /app
COPY --from=deps /app/node_modules ./node_modules
COPY . .
# Disable telemetry during build
ENV NEXT_TELEMETRY_DISABLED=1
# Build arguments for environment variables
ARG DATABASE_URL
ARG NEXT_PUBLIC_APP_URL
ENV DATABASE_URL=${DATABASE_URL}
ENV NEXT_PUBLIC_APP_URL=${NEXT_PUBLIC_APP_URL}
RUN \
if [ -f yarn.lock ]; then yarn build; \
elif [ -f package-lock.json ]; then npm run build; \
elif [ -f pnpm-lock.yaml ]; then corepack enable pnpm && pnpm run build; \
fi
# Stage 3: Runner
FROM node:20-alpine AS runner
WORKDIR /app
ENV NODE_ENV=production
ENV NEXT_TELEMETRY_DISABLED=1
# Create non-root user for security
RUN addgroup --system --gid 1001 nodejs
RUN adduser --system --uid 1001 nextjs
# Copy necessary files
COPY --from=builder /app/public ./public
COPY --from=builder /app/.next/standalone ./
COPY --from=builder /app/.next/static ./.next/static
# Set correct permissions
RUN chown -R nextjs:nodejs /app
USER nextjs
EXPOSE 3000
ENV PORT=3000
ENV HOSTNAME="0.0.0.0"
CMD ["node", "server.js"]
```
## Docker Compose for Development
Set up a complete development environment:
```yaml
# docker-compose.yml
version: "3.9"
services:
app:
build:
context: .
dockerfile: Dockerfile.dev
volumes:
- .:/app
- /app/node_modules
- /app/.next
ports:
- "3000:3000"
environment:
- NODE_ENV=development
- DATABASE_URL=postgresql://postgres:password@db:5432/myapp_dev
- REDIS_URL=redis://redis:6379
depends_on:
db:
condition: service_healthy
redis:
condition: service_started
command: npm run dev
db:
image: postgres:16-alpine
volumes:
- postgres_data:/var/lib/postgresql/data
- ./scripts/init-db.sql:/docker-entrypoint-initdb.d/init.sql
environment:
POSTGRES_USER: postgres
POSTGRES_PASSWORD: password
POSTGRES_DB: myapp_dev
ports:
- "5432:5432"
healthcheck:
test: ["CMD-SHELL", "pg_isready -U postgres"]
interval: 5s
timeout: 5s
retries: 5
redis:
image: redis:7-alpine
volumes:
- redis_data:/data
ports:
- "6379:6379"
command: redis-server --appendonly yes
mailhog:
image: mailhog/mailhog
ports:
- "1025:1025"
- "8025:8025"
volumes:
postgres_data:
redis_data:
```
## Development Dockerfile
Create a development-specific Dockerfile:
```dockerfile
# Dockerfile.dev
FROM node:20-alpine
WORKDIR /app
# Install development dependencies
RUN apk add --no-cache git
# Install pnpm
RUN corepack enable pnpm
# Copy package files
COPY package.json pnpm-lock.yaml ./
# Install all dependencies (including devDependencies)
RUN pnpm install
# Copy source code
COPY . .
# Expose port
EXPOSE 3000
# Start development server
CMD ["pnpm", "dev"]
```
## Docker Compose Production
Configure production deployment:
```yaml
# docker-compose.prod.yml
version: "3.9"
services:
app:
build:
context: .
dockerfile: Dockerfile
args:
- DATABASE_URL=${DATABASE_URL}
- NEXT_PUBLIC_APP_URL=${NEXT_PUBLIC_APP_URL}
restart: always
ports:
- "3000:3000"
environment:
- NODE_ENV=production
- DATABASE_URL=${DATABASE_URL}
- REDIS_URL=redis://redis:6379
depends_on:
- redis
deploy:
replicas: 2
resources:
limits:
cpus: "1"
memory: 1G
reservations:
cpus: "0.5"
memory: 512M
healthcheck:
test: ["CMD", "wget", "-qO-", "http://localhost:3000/api/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
redis:
image: redis:7-alpine
restart: always
volumes:
- redis_data:/data
command: redis-server --appendonly yes --maxmemory 256mb --maxmemory-policy allkeys-lru
deploy:
resources:
limits:
memory: 512M
nginx:
image: nginx:alpine
restart: always
ports:
- "80:80"
- "443:443"
volumes:
- ./nginx.conf:/etc/nginx/nginx.conf:ro
- ./certs:/etc/nginx/certs:ro
depends_on:
- app
volumes:
redis_data:
```
## Docker Build Scripts
Automate Docker operations:
```typescript
// scripts/docker-build.ts
import { execSync } from "child_process";
const IMAGE_NAME = process.env.IMAGE_NAME || "myapp";
const TAG = process.env.TAG || `${new Date().toISOString().slice(0, 10)}`;
const REGISTRY = process.env.REGISTRY || "";
function run(command: string) {
console.log(`Running: ${command}`);
execSync(command, { stdio: "inherit" });
}
async function build() {
const fullTag = REGISTRY ? `${REGISTRY}/${IMAGE_NAME}:${TAG}` : `${IMAGE_NAME}:${TAG}`;
console.log(`Building Docker image: ${fullTag}`);
// Build with build args
run(`docker build \
--build-arg DATABASE_URL="${process.env.DATABASE_URL}" \
--build-arg NEXT_PUBLIC_APP_URL="${process.env.NEXT_PUBLIC_APP_URL}" \
-t ${fullTag} \
-t ${IMAGE_NAME}:latest \
.`);
// Push to registry if configured
if (REGISTRY) {
console.log("Pushing to registry...");
run(`docker push ${fullTag}`);
run(`docker push ${IMAGE_NAME}:latest`);
}
console.log("Build complete!");
}
build().catch(console.error);
```
## Health Check Endpoint
Implement container health checks:
```typescript
// src/app/api/health/route.ts
import { NextResponse } from "next/server";
import { db } from "@/lib/database";
import { redis } from "@/lib/redis";
export async function GET() {
const checks = {
status: "healthy",
timestamp: new Date().toISOString(),
uptime: process.uptime(),
checks: {} as Record<string, { status: string; latency?: number }>,
};
// Database check
try {
const start = Date.now();
await db.$queryRaw`SELECT 1`;
checks.checks.database = {
status: "healthy",
latency: Date.now() - start,
};
} catch (error) {
checks.checks.database = { status: "unhealthy" };
checks.status = "degraded";
}
// Redis check
try {
const start = Date.now();
await redis.ping();
checks.checks.redis = {
status: "healthy",
latency: Date.now() - start,
};
} catch (error) {
checks.checks.redis = { status: "unhealthy" };
checks.status = "degraded";
}
const statusCode = checks.status === "healthy" ? 200 : 503;
return NextResponse.json(checks, { status: statusCode });
}
```
Google Antigravity generates production-ready Docker configurations with multi-stage builds, security best practices, and orchestration patterns for scalable deployments.This Docker prompt is ideal for developers working on:
By using this prompt, you can save hours of manual coding and ensure best practices are followed from the start. It's particularly valuable for teams looking to maintain consistency across their docker implementations.
Yes! All prompts on Antigravity AI Directory are free to use for both personal and commercial projects. No attribution required, though it's always appreciated.
This prompt works excellently with Claude, ChatGPT, Cursor, GitHub Copilot, and other modern AI coding assistants. For best results, use models with large context windows.
You can modify the prompt by adding specific requirements, constraints, or preferences. For Docker projects, consider mentioning your framework version, coding style, and any specific libraries you're using.