Skip to content

Deployment

Deploying TypeORM Applications to Production

Section titled “Deploying TypeORM Applications to Production”

Deploying TypeORM applications requires careful consideration of database migrations, environment configuration, and scalability.

Deployment Pipeline
================================================================================
+------------------+ +------------------+ +------------------+
| Build | --> | Test | --> | Package |
| Application | | Suite | | Container |
+------------------+ +------------------+ +------------------+
|
v
+------------------+ +------------------+ +------------------+
| Production | <-- | Staging | <-- | Registry |
| Deployment | | Deployment | | Push |
+------------------+ +------------------+ +------------------+
|
v
+------------------+
| Run Migrations |
| Health Checks |
| Monitoring |
+------------------+
================================================================================

# Dockerfile
# Build stage
FROM node:20-alpine AS builder
WORKDIR /app
# Copy package files
COPY package*.json ./
RUN npm ci --only=production
# Copy source
COPY . .
# Build TypeScript
RUN npm run build
# Production stage
FROM node:20-alpine AS production
WORKDIR /app
# Copy built files and dependencies
COPY --from=builder /app/dist ./dist
COPY --from=builder /app/node_modules ./node_modules
COPY --from=builder /app/package*.json ./
# Create non-root user
RUN addgroup -g 1001 -S nodejs
RUN adduser -S nestjs -u 1001
RUN chown -R nestjs:nodejs /app
USER nestjs
# Environment
ENV NODE_ENV=production
ENV PORT=3000
EXPOSE 3000
# Health check
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
CMD wget --no-verbose --tries=1 --spider http://localhost:3000/health || exit 1
CMD ["node", "dist/main.js"]
docker-compose.yml
version: '3.8'
services:
app:
build:
context: .
target: production
ports:
- "3000:3000"
environment:
- NODE_ENV=production
- DB_HOST=postgres
- DB_PORT=5432
- DB_USERNAME=${DB_USERNAME}
- DB_PASSWORD=${DB_PASSWORD}
- DB_DATABASE=${DB_DATABASE}
depends_on:
postgres:
condition: service_healthy
redis:
condition: service_healthy
networks:
- app-network
restart: unless-stopped
postgres:
image: postgres:15-alpine
environment:
- POSTGRES_USER=${DB_USERNAME}
- POSTGRES_PASSWORD=${DB_PASSWORD}
- POSTGRES_DB=${DB_DATABASE}
volumes:
- postgres-data:/var/lib/postgresql/data
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${DB_USERNAME}"]
interval: 10s
timeout: 5s
retries: 5
networks:
- app-network
restart: unless-stopped
redis:
image: redis:7-alpine
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 10s
timeout: 5s
retries: 5
networks:
- app-network
restart: unless-stopped
volumes:
postgres-data:
networks:
app-network:
driver: bridge

k8s/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: myapp
labels:
app: myapp
spec:
replicas: 3
selector:
matchLabels:
app: myapp
template:
metadata:
labels:
app: myapp
spec:
containers:
- name: myapp
image: myapp:latest
ports:
- containerPort: 3000
env:
- name: NODE_ENV
value: production
- name: DB_HOST
valueFrom:
secretKeyRef:
name: db-secret
key: host
- name: DB_PASSWORD
valueFrom:
secretKeyRef:
name: db-secret
key: password
resources:
requests:
memory: "256Mi"
cpu: "250m"
limits:
memory: "512Mi"
cpu: "500m"
livenessProbe:
httpGet:
path: /health
port: 3000
initialDelaySeconds: 30
periodSeconds: 10
readinessProbe:
httpGet:
path: /health/ready
port: 3000
initialDelaySeconds: 5
periodSeconds: 5
initContainers:
- name: run-migrations
image: myapp:latest
command: ['node', 'dist/scripts/migrate.js']
env:
- name: DB_HOST
valueFrom:
secretKeyRef:
name: db-secret
key: host
---
apiVersion: v1
kind: Service
metadata:
name: myapp
spec:
selector:
app: myapp
ports:
- port: 80
targetPort: 3000
type: LoadBalancer
---
apiVersion: v1
kind: Secret
metadata:
name: db-secret
type: Opaque
stringData:
host: postgres.default.svc.cluster.local
password: your-password
k8s/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: app-config
data:
NODE_ENV: "production"
DB_PORT: "5432"
DB_DATABASE: "myapp"
DB_POOL_SIZE: "20"
REDIS_HOST: "redis.default.svc.cluster.local"
REDIS_PORT: "6379"
LOG_LEVEL: "info"

.github/workflows/deploy.yml
name: Deploy
on:
push:
branches: [main]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: '20'
cache: 'npm'
- name: Install dependencies
run: npm ci
- name: Run tests
run: npm test
- name: Run e2e tests
run: npm run test:e2e
env:
DB_HOST: localhost
DB_PORT: 5432
DB_USERNAME: postgres
DB_PASSWORD: postgres
DB_DATABASE: test
build:
needs: test
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Build Docker image
run: docker build -t myapp:${{ github.sha }} .
- name: Push to registry
run: |
echo ${{ secrets.DOCKER_PASSWORD }} | docker login -u ${{ secrets.DOCKER_USERNAME }} --password-stdin
docker push myapp:${{ github.sha }}
deploy-staging:
needs: build
runs-on: ubuntu-latest
environment: staging
steps:
- uses: actions/checkout@v4
- name: Deploy to staging
run: |
kubectl set image deployment/myapp myapp=myapp:${{ github.sha }}
kubectl rollout status deployment/myapp
env:
KUBECONFIG: ${{ secrets.KUBECONFIG_STAGING }}
deploy-production:
needs: deploy-staging
runs-on: ubuntu-latest
environment: production
steps:
- uses: actions/checkout@v4
- name: Run migrations
run: |
kubectl run migration --image=myapp:${{ github.sha }} --restart=Never --command -- node dist/scripts/migrate.js
kubectl wait --for=condition=complete job/migration --timeout=300s
env:
KUBECONFIG: ${{ secrets.KUBECONFIG_PRODUCTION }}
- name: Deploy to production
run: |
kubectl set image deployment/myapp myapp=myapp:${{ github.sha }}
kubectl rollout status deployment/myapp
env:
KUBECONFIG: ${{ secrets.KUBECONFIG_PRODUCTION }}

src/scripts/migrate.ts
import { DataSource } from 'typeorm';
import * as dotenv from 'dotenv';
dotenv.config();
async function runMigrations() {
const dataSource = new DataSource({
type: 'postgres',
host: process.env.DB_HOST,
port: parseInt(process.env.DB_PORT, 10),
username: process.env.DB_USERNAME,
password: process.env.DB_PASSWORD,
database: process.env.DB_DATABASE,
entities: ['dist/**/*.entity{.js}'],
migrations: ['dist/migrations/*{.js}'],
synchronize: false,
});
try {
console.log('Connecting to database...');
await dataSource.initialize();
console.log('Running migrations...');
const migrations = await dataSource.runMigrations();
console.log(`Executed ${migrations.length} migrations:`);
migrations.forEach(m => console.log(` - ${m}`));
process.exit(0);
} catch (error) {
console.error('Migration failed:', error);
process.exit(1);
} finally {
if (dataSource.isInitialized) {
await dataSource.destroy();
}
}
}
runMigrations();
Zero-Downtime Migration Strategy
================================================================================
For schema changes that could cause downtime:
1. Additive Changes (Safe)
- Add new nullable column
- Add new table
- Add new index (CONCURRENTLY)
2. Destructive Changes (Requires Planning)
- Remove column
- Remove table
- Remove index
Safe Migration Pattern:
Step 1: Add new column (nullable)
+------------------+ +------------------+
| users | --> | users |
+------------------+ +------------------+
| id | | id |
| name | | name |
| email | | email |
| | | phone (nullable) |
+------------------+ +------------------+
Step 2: Deploy code that writes to new column
Step 3: Backfill existing data
Step 4: Make column non-null (if needed)
Step 5: Remove old column (if replacing)
================================================================================
src/scripts/migrate-rollback.ts
import { DataSource } from 'typeorm';
import * as dotenv from 'dotenv';
dotenv.config();
async function rollbackMigration() {
const dataSource = new DataSource({
type: 'postgres',
host: process.env.DB_HOST,
port: parseInt(process.env.DB_PORT, 10),
username: process.env.DB_USERNAME,
password: process.env.DB_PASSWORD,
database: process.env.DB_DATABASE,
entities: ['dist/**/*.entity{.js}'],
migrations: ['dist/migrations/*{.js}'],
synchronize: false,
});
try {
console.log('Connecting to database...');
await dataSource.initialize();
console.log('Rolling back last migration...');
await dataSource.undoLastMigration();
console.log('Rollback successful');
process.exit(0);
} catch (error) {
console.error('Rollback failed:', error);
process.exit(1);
} finally {
if (dataSource.isInitialized) {
await dataSource.destroy();
}
}
}
rollbackMigration();

src/health/health.controller.ts
import { Controller, Get } from '@nestjs/common';
import {
HealthCheckService,
HealthCheck,
TypeOrmHealthIndicator,
MemoryHealthIndicator,
DiskHealthIndicator,
} from '@nestjs/terminus';
@Controller('health')
export class HealthController {
constructor(
private health: HealthCheckService,
private db: TypeOrmHealthIndicator,
private memory: MemoryHealthIndicator,
private disk: DiskHealthIndicator,
) {}
@Get()
@HealthCheck()
check() {
return this.health.check([
() => this.db.pingCheck('database', { timeout: 300 }),
() => this.memory.checkHeap('memory_heap', 150 * 1024 * 1024),
() => this.memory.checkRSS('memory_rss', 150 * 1024 * 1024),
() => this.disk.checkStorage('storage', { thresholdPercent: 0.9 }),
]);
}
@Get('ready')
@HealthCheck()
ready() {
return this.health.check([
() => this.db.pingCheck('database'),
]);
}
@Get('live')
live() {
return { status: 'ok' };
}
}

Blue-Green Deployment
================================================================================
Current State (Blue):
+------------------+
| Blue Env | <-- Active, receiving traffic
| v1.0.0 |
+------------------+
|
v
+------------------+
| Load Balancer|
+------------------+
Deployment Process:
Step 1: Deploy to Green
+------------------+ +------------------+
| Blue Env | | Green Env |
| v1.0.0 | | v1.1.0 |
+------------------+ +------------------+
| |
v v
+------------------------------------------+
| Load Balancer |
| (points to Blue) |
+------------------------------------------+
Step 2: Run migrations on Green
Step 3: Test Green environment
Step 4: Switch traffic to Green
+------------------------------------------+
| Load Balancer |
| (points to Green) |
+------------------------------------------+
Step 5: Blue becomes standby for rollback
================================================================================

Deployment Checklist
================================================================================
Pre-Deployment:
[ ] All tests passing
[ ] Code reviewed and approved
[ ] Environment variables configured
[ ] Database backup created
[ ] Migrations tested on staging
Deployment:
[ ] Deploy new version
[ ] Run database migrations
[ ] Health checks passing
[ ] Monitor error rates
Post-Deployment:
[ ] Verify functionality
[ ] Check logs for errors
[ ] Monitor performance metrics
[ ] Update documentation
Rollback Plan:
[ ] Document rollback procedure
[ ] Test rollback on staging
[ ] Keep previous version available
================================================================================

Deployment Quick Reference
+------------------------------------------------------------------+
| |
| Strategy | Use Case |
| -------------------|------------------------------------------|
| Rolling Update | Gradual replacement of instances |
| Blue-Green | Zero-downtime with instant rollback |
| Canary | Gradual traffic shift |
| |
| Components | Description |
| -------------------|------------------------------------------|
| Docker | Container packaging |
| Kubernetes | Container orchestration |
| CI/CD | Automated deployment pipeline |
| Health Checks | Application monitoring |
| |
| Best Practices | Description |
| -------------------|------------------------------------------|
| Immutable infra | Don't modify running containers |
| Run migrations | Before deploying new code |
| Health checks | Implement liveness and readiness |
| Rollback plan | Always have a rollback strategy |
| |
+------------------------------------------------------------------+

Chapter 40: Scaling Strategies


Last Updated: February 2026