Implement PostgreSQL upgrade and rollback scripts, update docker-compose for PostgreSQL version 17.5, and enhance .gitignore to include backup files. Add README for upgrade instructions.

This commit is contained in:
Yuri-Lima
2025-08-31 12:11:50 +02:00
parent 9b841b21d9
commit 16c737c816
7 changed files with 160601 additions and 1 deletions

1
.gitignore vendored
View File

@@ -4,5 +4,6 @@ logs
database
assets
database_bkp
database_backup*
pgadmin
redis

View File

@@ -0,0 +1,16 @@
## Upgrade Modes
> [`Cloud Servers`](https://phx-erp.youtrack.cloud/articles/INT-A-105/PostgreSQL-Docker-Upgrade-Rollback-Guide-Any-Version-Any-Version?edit=true)
> [`Self Hosted`](https://phx-erp.youtrack.cloud/articles/INT-A-106/PostgreSQL-Upgrade-Rollback-Self-Hosted)
# Quick Move
## Upgrade PostgreSQL
```bash
docker compose --profile postgres-upgrade run --rm postgres-auto-upgrade
```
## Rollback PostgreSQL (if needed)
```bash
docker compose --profile postgres-rollback run --rm postgres-auto-rollback
```

File diff suppressed because one or more lines are too long

View File

@@ -46,7 +46,7 @@ services:
network_mode: bridge
postgres:
restart: always
image: "postgres:15.1-alpine"
image: "postgres:17.5-alpine"
container_name: phoenixDB # Hostname
# logging:
# driver: loki

618
docker-compose.yaml.bak Normal file
View File

@@ -0,0 +1,618 @@
---
services:
postgres-auto-upgrade:
profiles:
- postgres-upgrade # 🟢 This isolates the service
image: alpine:3.19
container_name: postgres_auto_upgrade
working_dir: /opt/phx
environment:
TZ: Europe/Berlin
volumes:
- .:/opt/phx:rw
- /var/run/docker.sock:/var/run/docker.sock
entrypoint: >
sh -c "
apk add --no-cache bash coreutils grep sed findutils curl docker-cli dos2unix &&
mkdir -p ~/.docker/cli-plugins &&
curl -SL https://github.com/docker/compose/releases/download/v2.27.0/docker-compose-linux-x86_64 -o ~/.docker/cli-plugins/docker-compose &&
chmod +x ~/.docker/cli-plugins/docker-compose &&
chmod +x ./postgres_upgrade.sh &&
./postgres_upgrade.sh"
restart: 'no'
depends_on: []
network_mode: bridge
postgres-auto-rollback:
profiles:
- postgres-rollback # 🟢 This isolates the service
image: alpine:3.19
container_name: postgres_rollback
working_dir: /opt/phx
environment:
TZ: Europe/Berlin
volumes:
- .:/opt/phx:rw
- /var/run/docker.sock:/var/run/docker.sock
entrypoint: >
sh -c "
apk add --no-cache bash coreutils grep sed findutils curl docker-cli dos2unix &&
mkdir -p ~/.docker/cli-plugins &&
curl -SL https://github.com/docker/compose/releases/download/v2.27.0/docker-compose-linux-x86_64 -o ~/.docker/cli-plugins/docker-compose &&
chmod +x ~/.docker/cli-plugins/docker-compose &&
chmod +x ./rollback_postgres_upgrade.sh &&
./rollback_postgres_upgrade.sh"
restart: 'no'
depends_on: []
network_mode: bridge
postgres:
restart: always
image: "postgres:17.5-alpine"
container_name: phoenixDB # Hostname
# logging:
# driver: loki
# options:
# loki-url: "${LOKI_URL}"
# loki-retries: "${LOKI_RETRIES}"
# loki-batch-size: "${LOKI_BATCH_SIZE}"
# loki-external-labels: "service=phx-postgres,env=prod"
networks:
- backend
security_opt:
- no-new-privileges:true
environment:
DEBUG: true
POSTGRES_DB: ${DB_NAME}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
TZ: Europe/Berlin
volumes:
- "./database:/var/lib/postgresql/data"
# deploy:
# restart_policy: # Define how the service should restart when it fails
# condition: on-failure # Only restart if the container exits with a non-zero code
# delay: 5s # Wait 5 seconds before attempting to restart
# max_attempts: 5 # Maximum number of restart attempts before giving up
# window: 120s # Time window to evaluate restart attempts (resets counter after this period)
# resources: # Resource allocation and limits for the container
# limits: # Maximum resources the container can use
# cpus: "0.75" # Maximum CPU cores (75% of one core)
# memory: 768M # Maximum memory usage (768 megabytes)
# reservations: # Guaranteed minimum resources for the container
# cpus: "0.25" # Reserved CPU cores (25% of one core)
# memory: 256M # Reserved memory (256 megabytes)
healthcheck:
test: [ "CMD-SHELL", "pg_isready -U postgres" ]
interval: 5s # Time between each health check
timeout: 2s # Number of failures before marking as unhealthy
retries: 5 # Grace period before health checks start
pgadmin:
restart: always
image: dpage/pgadmin4:9.6.0
container_name: pgadmin4-ui
user: "5050:5050"
# logging:
# driver: loki
# options:
# loki-url: "${LOKI_URL}"
# loki-retries: "${LOKI_RETRIES}"
# loki-batch-size: "${LOKI_BATCH_SIZE}"
# loki-external-labels: "service=phx-pgadmin,env=prod"
networks:
- backend
- frontend
environment:
PGADMIN_DEFAULT_EMAIL: ${PGADMIN_DEFAULT_EMAIL}
PGADMIN_DEFAULT_PASSWORD: ${SUPER_ADMIN_USER_PASSWORD}
PGADMIN_CONFIG_SERVER_MODE: 'True'
PGADMIN_CONFIG_WSGI_SCRIPT_NAME: "'/pgadmin4'"
PGADMIN_CONFIG_PROXY_X_PROTO_COUNT: 1
PGADMIN_SERVER_JSON_FILE: '/var/lib/pgadmin/servers.json'
PGADMIN_REPLACE_SERVERS_ON_STARTUP: 'True'
PGADMIN_CONFIG_DATA_DIR: "'/var/lib/pgadmin'"
PGADMIN_CONFIG_MASTER_PASSWORD_REQUIRED: 'False'
# pgpass dynamic vars
PGPASSFILE: /var/lib/pgadmin/pgpass
PGPASS_HOST: ${DB_HOST}
PGPASS_PORT: ${DB_PORT}
PGPASS_DB: ${DB_NAME}
PGPASS_USER: ${DB_USERNAME}
PGPASS_PASSWORD: ${POSTGRES_PASSWORD}
# Other config
ALLOW_SAVE_PASSWORD: 'False'
MFA_ENABLED: 'True'
MFA_FORCE_REGISTRATION: 'False'
MFA_SUPPORTED_METHODS: 'email'
MFA_EMAIL_SUBJECT: 'Your MFA code by PHX-ERP'
MAX_LOGIN_ATTEMPTS: 5
ENHANCED_COOKIE_PROTECTION: 'True'
SHOW_GRAVATAR_IMAGE: 'True'
SECURITY_EMAIL_SENDER: ${SECURITY_EMAIL_SENDER}
MAIL_SERVER: ${MAIL_SERVER}
MAIL_PORT: ${MAIL_PORT}
MAIL_USE_SSL: 'False'
MAIL_USE_TLS: 'False'
MAIL_USERNAME: ${MAIL_USERNAME}
MAIL_PASSWORD: ${MAIL_PASSWORD}
MAIL_DEBUG: 'False'
TZ: Europe/Berlin
volumes:
- ./pgadmin/data:/var/lib/pgadmin
- ./pgadmin/pgadmin-entrypoint.sh:/docker-entrypoint.sh:ro
mem_limit: 512M
memswap_limit: 512M
deploy:
restart_policy: # Define how the service should restart when it fails
condition: on-failure # Only restart if the container exits with a non-zero code
delay: 5s # Wait 5 seconds before attempting to restart
max_attempts: 5 # Maximum number of restart attempts before giving up
window: 120s # Time window to evaluate restart attempts (resets counter after this period)
resources: # Resource allocation and limits for the container
limits: # Maximum resources the container can use
cpus: "1.0" # Maximum CPU cores (100% of one core)
memory: 512M # Maximum memory usage (512 megabytes)
reservations: # Guaranteed minimum resources for the container
cpus: "0.15" # Reserved CPU cores (15% of one core)
memory: 250M # Reserved memory (250 megabytes)
entrypoint: ["/bin/sh", "/docker-entrypoint.sh"]
depends_on:
postgres:
condition: service_healthy
healthcheck:
test: ["CMD", "wget", "-O", "-", "http://localhost:80/misc/ping"]
interval: 15s
timeout: 10s
retries: 5
start_period: 120s
phoenix-app:
restart: always
image: "phxerp/phoenix-app:alpha"
container_name: phoenix-app
ports:
- "3000:3000" # Restrict to only allow access from Grafana Server IP
# logging:
# driver: loki
# options:
# loki-url: "${LOKI_URL}"
# loki-retries: "${LOKI_RETRIES}"
# loki-batch-size: "${LOKI_BATCH_SIZE}"
# loki-external-labels: "service=phx-app,env=prod,project=phoenix"
environment:
TZ: Europe/Berlin
volumes:
- ./app_custom:/usr/share/nginx/html/assets/custom
- nginx-logs:/var/log/nginx # this is part of the fail2ban to avoid rotation logs cleaning setup. This will be done by docker volume rm nginx-logs
# - ./phoenix-app/logs:/var/log/nginx # this is part of the fail2ban config to make analysis of the logs easier
# - ./nginx/nginx.conf:/etc/nginx/nginx.conf # Uncomment this if you want to use override the default nginx.conf
# - ./nginx/includes:/etc/nginx/includes:ro # Uncomment this if you want to use override the default includes
networks:
- backend
- frontend
# deploy:
# restart_policy: # Define how the service should restart when it fails
# condition: on-failure # Only restart if the container exits with a non-zero code
# delay: 5s # Wait 5 seconds before attempting to restart
# max_attempts: 5 # Maximum number of restart attempts before giving up
# window: 120s # Time window to evaluate restart attempts (resets counter after this period)
# resources: # Resource allocation and limits for the container
# limits: # Maximum resources the container can use
# cpus: "0.35" # Maximum CPU cores (35% of one core)
# memory: 384M # Maximum memory usage (384 megabytes)
# reservations: # Guaranteed minimum resources for the container
# cpus: "0.10" # Reserved CPU cores (10% of one core)
# memory: 128M # Reserved memory (128 megabytes)
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "http://phoenix-app/login"] # localhost checks that the NGINX server inside the container is serving something at the root
interval: 10s # check every 10 seconds
timeout: 5s # allow 5 seconds per check
retries: 5 # mark as unhealthy after 5 failures
start_period: 15s # wait 15 seconds after container start before checking
phoenix-system:
restart: always
image: "phxerp/phoenix-system:alpha"
# logging:
# driver: loki
# options:
# loki-url: "${LOKI_URL}"
# loki-retries: "${LOKI_RETRIES}"
# loki-batch-size: "${LOKI_BATCH_SIZE}"
# loki-external-labels: "service=phoenix-system,env=prod"
environment:
- "DB_HOST=${DB_HOST}"
- "DB_NAME=${DB_NAME}"
- "DB_PASSWORD=${POSTGRES_PASSWORD}"
- "DB_USERNAME=${DB_USERNAME}"
- "SUPER_ADMIN_USER_PASSWORD=${SUPER_ADMIN_USER_PASSWORD}"
- "REDIS_PASSWORD=${REDIS_PASSWORD}"
- NODE_ENV=${NODE_ENV}
- PHX_HOST_NAME=${PHX_HOST_NAME}
- PERFORMANCE_STRUCTURED_LOGGING=${PERFORMANCE_STRUCTURED_LOGGING}
- PERFORMANCE_WARNING_THRESHOLD=${PERFORMANCE_WARNING_THRESHOLD}
- PERFORMANCE_DETAILED_MEMORY=${PERFORMANCE_DETAILED_MEMORY}
- TZ=Europe/Berlin
command: ["npm", "run", "start:server"]
deploy:
replicas: ${PHOENIX_SYSTEM_REPLICAS} #change here if u want to have more replicas. Cant find a way to set via variable right now
# restart_policy: # Define how the service should restart when it fails
# condition: on-failure # Only restart if the container exits with a non-zero code
# delay: 5s # Wait 5 seconds before attempting to restart
# max_attempts: 5 # Maximum number of restart attempts before giving up
# window: 120s # Time window to evaluate restart attempts (resets counter after this period)
# resources: # Resource allocation and limits for the container
# limits: # Maximum resources the container can use
# cpus: "1.50" # Maximum CPU cores (150% of one core)
# memory: 1600M # Maximum memory usage (1600 megabytes)
# reservations: # Guaranteed minimum resources for the container
# cpus: "0.50" # Reserved CPU cores (50% of one core)
# memory: 768M # Reserved memory (768 megabytes)
networks:
backend:
aliases:
- phoenix-system
depends_on:
postgres:
condition: service_healthy
phoenix-redis:
condition: service_healthy
healthcheck:
test: ["CMD-SHELL", "curl -s http://phoenix-system:3000/health | grep -q '\"admin-api\":{\"status\":\"up\"}' && curl -s http://phoenix-system:3000/health | grep -q '\"database\":{\"status\":\"up\"}'"] # Checks both admin-api and database status
interval: 10s # Time between each health check
timeout: 10s # Max time to wait for each check
retries: 20 # Number of failures before marking as unhealthy
start_period: 60s # Grace period before health checks start
volumes:
- "./assets:/usr/src/app/packages/dev-server/assets"
# - "./logs:/usr/src/app/packages/dev-server/logs"
phoenix-worker:
restart: always
image: "phxerp/phoenix-system:alpha"
container_name: "phoenix-worker"
ports:
- "3001:3001" # Restrict to only allow access from Grafana Server IP
# logging:
# driver: loki
# options:
# loki-url: "${LOKI_URL}"
# loki-retries: "${LOKI_RETRIES}"
# loki-batch-size: "${LOKI_BATCH_SIZE}"
# loki-external-labels: "service=phx-worker,env=prod"
networks:
- backend
environment:
- TZ=Europe/Berlin
- "DB_HOST=${DB_HOST}"
- "DB_NAME=${DB_NAME}"
- "DB_PASSWORD=${POSTGRES_PASSWORD}"
- "DB_USERNAME=${DB_USERNAME}"
- "SUPER_ADMIN_USER_PASSWORD=${SUPER_ADMIN_USER_PASSWORD}"
- REDIS_PASSWORD=${REDIS_PASSWORD}
- NODE_ENV=${NODE_ENV}
- PHX_HOST_NAME=${PHX_HOST_NAME}
- PERFORMANCE_STRUCTURED_LOGGING=${PERFORMANCE_STRUCTURED_LOGGING}
- PERFORMANCE_WARNING_THRESHOLD=${PERFORMANCE_WARNING_THRESHOLD}
- PERFORMANCE_DETAILED_MEMORY=${PERFORMANCE_DETAILED_MEMORY}
command: ['npm', 'run', 'start:worker']
# deploy:
# restart_policy: # Define how the service should restart when it fails
# condition: on-failure # Only restart if the container exits with a non-zero code
# delay: 5s # Wait 5 seconds before attempting to restart
# max_attempts: 5 # Maximum number of restart attempts before giving up
# window: 120s # Time window to evaluate restart attempts (resets counter after this period)
# resources: # Resource allocation and limits for the container
# limits: # Maximum resources the container can use
# cpus: '2.0' # Maximum CPU cores (200% of one core)
# memory: 2G # Maximum memory usage (2 gigabytes)
# reservations: # Guaranteed minimum resources for the container
# cpus: '0.5' # Reserved CPU cores (50% of one core)
# memory: 512M # Reserved memory (512 megabytes)
depends_on:
phoenix-system:
condition: service_healthy
postgres:
condition: service_healthy
healthcheck:
test: [ "CMD-SHELL", "curl -s http://phoenix-worker:3001/health | grep -q '\"status\":\"ok\"'" ] # Check if worker responds with status ok
interval: 10s # Time between each health check
timeout: 6s # Max time to wait for each check
retries: 20 # Grace period before health checks start
start_period: 30s # Grace period before health checks start
volumes:
- "./assets:/usr/src/app/packages/dev-server/assets"
# - "./logs:/usr/src/app/packages/dev-server/logs"
phoenix-redis:
image: 'bitnami/redis:latest'
container_name: redis
command: /opt/bitnami/scripts/redis/run.sh # Not good, but as agreed. At some point i can start using this: --maxmemory + add eviction policy
user: root
# logging:
# driver: loki
# options:
# loki-url: "${LOKI_URL}"
# loki-retries: "${LOKI_RETRIES}"
# loki-batch-size: "${LOKI_BATCH_SIZE}"
# loki-external-labels: "service=phx-redis,env=prod"
networks:
- backend
restart: always
environment:
ALLOW_EMPTY_PASSWORD: "no"
REDIS_PASSWORD: ${REDIS_PASSWORD}
TZ: Europe/Berlin
# deploy:
# restart_policy: # Define how the service should restart when it fails
# condition: on-failure # Only restart if the container exits with a non-zero code
# delay: 5s # Wait 5 seconds before attempting to restart
# max_attempts: 5 # Maximum number of restart attempts before giving up
# window: 120s # Time window to evaluate restart attempts (resets counter after this period)
# resources: # Resource allocation and limits for the container
# limits: # Maximum resources the container can use
# cpus: "0.25" # Maximum CPU cores (25% of one core)
# memory: 100M # Maximum memory usage (100 megabytes)
# reservations: # Guaranteed minimum resources for the container
# cpus: "0.05" # Reserved CPU cores (5% of one core)
# memory: 32M # Reserved memory (32 megabytes)
healthcheck:
test: [
"CMD-SHELL",
"redis-cli --no-auth-warning -a ${REDIS_PASSWORD} ping | grep PONG && test -w /bitnami/redis/data"
]
interval: 5s
retries: 10
timeout: 5s
depends_on:
postgres:
condition: service_healthy
volumes:
- "./redis/data:/bitnami/redis/data"
phoenix-health-exporter:
image: phxerp/phoenix-health-exporter:alpha
container_name: health_exporter
restart: unless-stopped
# logging:
# driver: loki
# options:
# loki-url: "${LOKI_URL}"
# loki-retries: "${LOKI_RETRIES}"
# loki-batch-size: "${LOKI_BATCH_SIZE}"
# loki-external-labels: "service=phx-health-exporter,env=prod"
ports:
- "9800:9800"
environment:
DB_HOST: ${DB_HOST}
DB_NAME: ${DB_NAME}
DB_PASSWORD: ${POSTGRES_PASSWORD}
DB_USERNAME: ${DB_USERNAME}
REDIS_PASSWORD: ${REDIS_PASSWORD}
TZ: Europe/Berlin
networks:
- frontend
- backend
volumes:
- /etc/hostname:/etc/host_hostname:ro # This ensures the container always uses the real machine hostname, even if restarted or recreated.
security_opt:
- no-new-privileges:true
memswap_limit: 512M
deploy:
restart_policy: # Define how the service should restart when it fails
condition: on-failure # Only restart if the container exits with a non-zero code
delay: 5s # Wait 5 seconds before attempting to restart
max_attempts: 5 # Maximum number of restart attempts before giving up
window: 120s # Time window to evaluate restart attempts (resets counter after this period)
resources: # Resource allocation and limits for the container
limits: # Maximum resources the container can use
cpus: "0.5" # Maximum CPU cores (50% of one core)
memory: 256M # Maximum memory usage (256 megabytes)
reservations: # Guaranteed minimum resources for the container
cpus: "0.1" # Reserved CPU cores (10% of one core)
memory: 64M # Reserved memory (64 megabytes)
depends_on:
phoenix-system:
condition: service_healthy
phoenix-worker:
condition: service_healthy
postgres:
condition: service_healthy
healthcheck:
test: ["CMD-SHELL", "curl -sf http://localhost:9800/healthz || exit 1"]
interval: 1m
timeout: 5s
retries: 3
start_period: 15s
node-exporter:
image: quay.io/prometheus/node-exporter:latest
container_name: node_exporter
# logging:
# driver: loki
# options:
# loki-url: "${LOKI_URL}"
# loki-retries: "${LOKI_RETRIES}"
# loki-batch-size: "${LOKI_BATCH_SIZE}"
# loki-external-labels: "service=phx-node-exporter,env=prod"
networks:
- metrics
- frontend
restart: unless-stopped
environment:
TZ: Europe/Berlin
ports:
- "9100:9100" # Restrict to only allow access from Grafana Server IP
command:
- "--path.procfs=/host/proc"
- "--path.sysfs=/host/sys"
- "--path.rootfs=/host"
- "--collector.filesystem.ignored-mount-points=^/(sys|proc|dev)($$|/)"
volumes:
- "/proc:/host/proc:ro"
- "/sys:/host/sys:ro"
- "/:/host:ro,rslave"
security_opt:
- no-new-privileges:true
memswap_limit: 512M
deploy:
restart_policy: # Define how the service should restart when it fails
condition: on-failure # Only restart if the container exits with a non-zero code
delay: 5s # Wait 5 seconds before attempting to restart
max_attempts: 5 # Maximum number of restart attempts before giving up
window: 120s # Time window to evaluate restart attempts (resets counter after this period)
resources: # Resource allocation and limits for the container
limits: # Maximum resources the container can use
cpus: "0.25" # Maximum CPU cores (25% of one core)
memory: 128M # Maximum memory usage (128 megabytes)
reservations: # Guaranteed minimum resources for the container
cpus: "0.05" # Reserved CPU cores (5% of one core)
memory: 32M # Reserved memory (32 megabytes)
depends_on:
phoenix-worker: # This is to avoid alocation of resources to the node-exporter if the phoenix-worker is not healthy yet.
condition: service_healthy
healthcheck:
test: ["CMD", "wget", "-qO-", "http://localhost:9100/metrics"]
interval: 15s
timeout: 5s
retries: 3
start_period: 20s
# nginx-exporter:
# image: nginx/nginx-prometheus-exporter:1.4.2
# container_name: nginx_exporter
# restart: unless-stopped
# # logging:
# # driver: loki
# # options:
# # loki-url: "${LOKI_URL}"
# # loki-retries: "${LOKI_RETRIES}"
# # loki-batch-size: "${LOKI_BATCH_SIZE}"
# # loki-external-labels: "service=phx-nginx-exporter,env=prod"
# ports:
# - "9113:9113" # Restrict to only allow access from Grafana Server IP
# command:
# - '--nginx.scrape-uri=http://phoenix-app/stub_status'
# security_opt:
# - no-new-privileges:true
# deploy:
# resources:
# limits:
# cpus: '0.25'
# memory: 128M
# depends_on:
# phoenix-app:
# condition: service_healthy
# networks:
# - frontend
# - metrics
# healthcheck:
# test: ["CMD", "wget", "-qO-", "http://localhost:9113/metrics"] # Not working as expected
# interval: 15s
# timeout: 5s
# retries: 3
# start_period: 10s
https_portal:
container_name: https_portal
image: "steveltn/https-portal:1.21"
restart: unless-stopped
# logging:
# driver: loki
# options:
# loki-url: "${LOKI_URL}"
# loki-retries: "${LOKI_RETRIES}"
# loki-batch-size: "${LOKI_BATCH_SIZE}"
# loki-external-labels: "service=phx-https-portal,env=prod"
networks:
- frontend # [ PgAdmin, Phoenix-App ]
ports:
- "80:80"
- "443:443"
# - host:container
environment:
STAGE: "production" # Use Let's Encrypt production server
WEBSOCKET: "true" # Enable websocket support
DEBUG: "true"
RENEW_MARGIN_DAYS: 30
CLIENT_MAX_BODY_SIZE: 0
SERVER_NAMES_HASH_BUCKET_SIZE: 128 # Increase hash bucket size for server names - good for bigger domains names, if not set correctly, it will throw an error, break the container.
# FORCE_RENEW: 'true'
DOMAINS: "${HTTPS_PORTAL_DOMAINS}"
TZ: Europe/Berlin
volumes:
- ./https_portal/data:/var/lib/https-portal # ssl_certs, vhost.d, htdocs
- ./https_portal/log:/var/log/nginx # nginx logs
# - ./https_portal/config/custom_nginx.conf:/opt/custom_nginx.conf:ro # ✅ Mount file in a safe path
memswap_limit: 512M
deploy:
restart_policy: # Define how the service should restart when it fails
condition: on-failure # Only restart if the container exits with a non-zero code
delay: 5s # Wait 5 seconds before attempting to restart
max_attempts: 5 # Maximum number of restart attempts before giving up
window: 120s # Time window to evaluate restart attempts (resets counter after this period)
resources: # Resource allocation and limits for the container
limits: # Maximum resources the container can use
cpus: "0.5" # Maximum CPU cores (50% of one core)
memory: 256M # Maximum memory usage (256 megabytes)
reservations: # Guaranteed minimum resources for the container
cpus: "0.1" # Reserved CPU cores (10% of one core)
memory: 64M # Reserved memory (64 megabytes)
depends_on:
# pgadmin:
# condition: service_healthy
postgres:
condition: service_healthy
fail2ban:
restart: always
image: crazymax/fail2ban:latest
container_name: fail2ban
network_mode: host # important: act on host network
cap_add:
- NET_ADMIN # needed to manage firewall
- NET_RAW
environment:
TZ: Europe/Berlin
volumes:
- ./fail2ban/data:/data
- ./fail2ban/jail.d:/etc/fail2ban/jail.d
- ./fail2ban/filter.d:/data/filter.d
- /var/log:/var/log:ro # Parse host logs to the sshd
- nginx-logs:/data/nginx-logs:ro
# - ./phoenix-app/logs:/logs/phoenix-app:ro # not needed anymore, but keep here for manual/testing purposes.
memswap_limit: 512M
deploy:
restart_policy: # Define how the service should restart when it fails
condition: on-failure # Only restart if the container exits with a non-zero code
delay: 5s # Wait 5 seconds before attempting to restart
max_attempts: 5 # Maximum number of restart attempts before giving up
window: 120s # Time window to evaluate restart attempts (resets counter after this period)
resources: # Resource allocation and limits for the container
limits: # Maximum resources the container can use
cpus: "0.5" # Maximum CPU cores (50% of one core)
memory: 100M # Maximum memory usage (100 megabytes)
reservations: # Guaranteed minimum resources for the container
cpus: "0.1" # Reserved CPU cores (10% of one core)
memory: 35M # Reserved memory (32 megabytes)
depends_on:
phoenix-worker: # This is to avoid alocation of resources to the fail2ban if the phoenix-worker is not healthy yet.
condition: service_healthy
volumes:
nginx-logs:
name: nginx-logs
networks:
backend:
driver: bridge
external: false
ipam:
config:
- subnet: 172.19.0.0/16
frontend:
driver: bridge
external: false
ipam:
config:
- subnet: 172.20.0.0/16
metrics:
driver: bridge
external: false
ipam:
config:
- subnet: 172.22.0.0/16

259
postgres_upgrade.sh Executable file
View File

@@ -0,0 +1,259 @@
#!/usr/bin/env bash
set -euo pipefail
trap 'echo "⚠️ An error occurred. Consider running rollback or checking backups."' ERR
COMPOSE=./docker-compose.yaml
SERVICE=postgres
DATA_DIR=./database
PG_VERSION_FILE="$DATA_DIR/PG_VERSION"
echo "🧪 Validating docker-compose config..."
docker compose -f "$COMPOSE" config > /dev/null || {
echo "❌ docker-compose config failed. Restore aborted."
exit 1
}
if [ ! -d "$DATA_DIR" ]; then
echo "❌ Expected data directory '${DATA_DIR}' does not exist. Aborting."
exit 1
fi
# echo "🔍 Checking if Postgres service is already running..."
# if ! docker compose ps --services --filter "status=running" | grep -q "^${SERVICE}$"; then
# echo "⚠️ '${SERVICE}' service is not running. Skipping auto-upgrade step."
# echo "🔄 Attempting to start '${SERVICE}' service to detect version..."
# docker compose up -d $SERVICE
# echo "⏳ Waiting for PostgreSQL to become ready..."
# for i in $(seq 1 60); do
# if docker compose exec -T $SERVICE pg_isready -U postgres > /dev/null 2>&1; then
# break
# fi
# echo "⏳ Still waiting... (${i}s)"
# sleep 1
# done
# if ! docker compose exec -T $SERVICE pg_isready -U postgres > /dev/null 2>&1; then
# echo "❌ PostgreSQL did not become ready in time. Aborting."
# echo "💡 Postgres is not running. Revert to the old version on you docker-compose.yaml file and start start the service!"
# echo "1. Run: docker compose up -d --force-recreate $SERVICE"
# echo "2. Run: docker compose --profile postgres-rollback run --rm postgres-auto-rollback"
# exit 1
# fi
# fi
# echo "⏳ Waiting for PostgreSQL to become ready before dumping SQL..."
# for i in $(seq 1 120); do
# if docker compose exec -T $SERVICE pg_isready -U postgres > /dev/null 2>&1; then
# break
# fi
# echo "⏳ Still waiting... (${i}s)"
# sleep 1
# done
# if ! docker compose exec -T $SERVICE pg_isready -U postgres > /dev/null 2>&1; then
# echo "❌ PostgreSQL did not become ready in time. Aborting."
# exit 1
# fi
echo "📡 Detecting running PostgreSQL version..."
OLD_VERSION=$(cat "$PG_VERSION_FILE")
echo "🔍 Detected running PostgreSQL version: $OLD_VERSION"
OLD_MAJOR=$(echo "$OLD_VERSION" | cut -d. -f1)
echo "🔍 Detected running PostgreSQL major version: $OLD_MAJOR"
OLD_IMG="${OLD_VERSION}-alpine"
echo "🆕 Detecting target version from docker-compose.yaml..."
NEW_IMG=$(docker compose -f $COMPOSE config | grep "image:" | grep "$SERVICE" | awk '{print $2}')
# Ensure NEW_IMG was detected
if [[ -z "$NEW_IMG" ]]; then
echo "❌ Failed to detect target Postgres image from $COMPOSE. Aborting."
exit 1
fi
NEW_VERSION=$(echo "$NEW_IMG" | sed -E 's/^postgres://; s/-alpine.*$//')
NEW_MAJOR=$(echo "$NEW_VERSION" | cut -d. -f1)
echo "🔁 From $OLD_VERSION (major $OLD_MAJOR) → $NEW_VERSION (major $NEW_MAJOR)"
if [[ "$NEW_VERSION" == *beta* ]] || [[ "$NEW_VERSION" == *rc* ]] || [[ "$NEW_VERSION" == *bookworm* ]]; then
echo "❌ Target version $NEW_VERSION appears to be a pre-release (beta/rc/bookworm). Skipping upgrade."
echo "💡 Please upgrade to a stable version of Postgres."
exit 1
fi
# Early exit if no upgrade needed
if [ "$OLD_MAJOR" -eq "$NEW_MAJOR" ]; then
echo "✅ Already running target major version. Skipping upgrade."
exit 0
fi
# Paths
BACKUP_DIR=${DATA_DIR}_backup_${OLD_IMG}_$(date +%Y%m%d_%H%M%S)
OLD_DATA_DIR=./database_old
UPGRADE_DIR=./database_tmp_upgrade
# 1. Stop services
echo "🛑 Stopping services..."
docker compose -f $COMPOSE down
# 2. Backup database directory
echo "🔐 Creating backup at ${BACKUP_DIR}..."
cp -a "$DATA_DIR" "$BACKUP_DIR"
echo "📦 Dumping full SQL backup using temporary PostgreSQL container..."
DUMP_FILE="backup_dump_${OLD_IMG}_$(date +%Y%m%d_%H%M%S).sql"
TMP_CONTAINER_NAME="pg-dump-${OLD_MAJOR}"
# Run temporary postgres container with existing data dir
docker run -d --rm \
--name "$TMP_CONTAINER_NAME" \
-v "$DATA_DIR:/var/lib/postgresql/data" \
-e POSTGRES_USER=postgres \
postgres:${OLD_IMG}
echo "⏳ Waiting for pg_dump container to become ready..."
for i in $(seq 1 30); do
if docker exec "$TMP_CONTAINER_NAME" pg_isready -U postgres > /dev/null 2>&1; then
break
fi
echo "⏳ Still waiting... (${i}s)"
sleep 1
done
if ! docker exec "$TMP_CONTAINER_NAME" pg_isready -U postgres > /dev/null 2>&1; then
echo "❌ Temporary container for SQL dump did not become ready. Aborting."
docker rm -f "$TMP_CONTAINER_NAME" > /dev/null 2>&1 || true
exit 1
fi
docker exec "$TMP_CONTAINER_NAME" pg_dumpall -U postgres > "$DUMP_FILE"
echo "🧹 Cleaning up older SQL dump files..."
ALL_DUMPS=( $(ls -t backup_dump_*.sql 2>/dev/null || true) )
if [ "${#ALL_DUMPS[@]}" -gt 1 ]; then
LATEST_DUMP="${ALL_DUMPS[0]}"
TO_DELETE=( "${ALL_DUMPS[@]:1}" )
for dump in "${TO_DELETE[@]}"; do
echo "🗑️ Removing old dump: $dump"
rm -f "$dump"
done
echo "✅ Only latest dump '${LATEST_DUMP}' preserved."
else
echo " Only one dump file found. No cleanup needed."
fi
docker rm -f "$TMP_CONTAINER_NAME" > /dev/null 2>&1 || true
# 3. Create upgrade target folder
echo "📁 Creating upgrade workspace ${UPGRADE_DIR}..."
mkdir -p "$UPGRADE_DIR"
# 4. Perform pg_upgrade
echo "🔧 Running pg_upgrade via tianon image..."
docker run --rm \
-v "${BACKUP_DIR}:/var/lib/postgresql/${OLD_MAJOR}/data" \
-v "${UPGRADE_DIR}:/var/lib/postgresql/${NEW_MAJOR}/data" \
tianon/postgres-upgrade:${OLD_MAJOR}-to-${NEW_MAJOR} --copy
# 5. Promote new data
echo "🔁 Swapping data directories..."
rm -rf "$DATA_DIR"
mv "$UPGRADE_DIR" "$DATA_DIR"
# 6. Restore pg_hba.conf before startup
echo "🔄 Restoring pg_hba.conf if it existed..."
cp "${BACKUP_DIR}/pg_hba.conf" "${DATA_DIR}/pg_hba.conf" || echo "✅ No custom pg_hba.conf to restore."
# 7. Update image in docker-compose.yaml
echo "📝 Updating docker-compose to use image ${NEW_IMG}..."
sed -i.bak -E "s#postgres:[^ ]*${OLD_MAJOR}[^ ]*#postgres:${NEW_IMG}#" "$COMPOSE"
# 8. Start container
echo "🚀 Starting upgraded container..."
docker compose -f $COMPOSE up -d $SERVICE
# 9. Wait until DB is accepting connections
echo "⏳ Waiting for PostgreSQL to become ready..."
until docker compose exec -T $SERVICE pg_isready -U postgres; do
sleep 1
done
# 10. Collation and Reindexing
echo "🔧 Reindexing and refreshing collation versions..."
docker compose exec $SERVICE bash -c '
set -e
DBS=$(psql -U postgres -tAc "SELECT datname FROM pg_database WHERE datallowconn")
for db in $DBS; do
echo "➡️ Reindexing $db..."
psql -U postgres -d "$db" -c "REINDEX DATABASE \"$db\";" || true
psql -U postgres -d "$db" -c "REINDEX SYSTEM \"$db\";" || true
echo "➡️ Refreshing collation version for $db..."
if ! psql -U postgres -d "$db" -c "ALTER DATABASE \"$db\" REFRESH COLLATION VERSION;" 2>/dev/null; then
echo "⚠️ Collation refresh failed. Forcing reset..."
psql -U postgres -d postgres -c "UPDATE pg_database SET datcollversion = NULL WHERE datname = '\''$db'\'';" || true
psql -U postgres -d "$db" -c "ALTER DATABASE \"$db\" REFRESH COLLATION VERSION;" || \
echo "❌ Still failed for $db. Review manually."
fi
echo "➡️ Refreshing system collations in $db..."
for coll in $(psql -U postgres -d "$db" -tAc "SELECT nspname || '\''.'\'' || quote_ident(collname) FROM pg_collation JOIN pg_namespace ON collnamespace = pg_namespace.oid WHERE collprovider = '\''c'\'';"); do
echo " 🌀 ALTER COLLATION $coll REFRESH VERSION;"
psql -U postgres -d "$db" -c "ALTER COLLATION $coll REFRESH VERSION;" || \
echo " ⚠️ Skipped $coll due to version mismatch (likely Alpine)."
done
done
'
# 11. Suppress collation warnings on musl (Alpine)
if docker compose exec $SERVICE ldd --version 2>&1 | grep -qi 'musl'; then
echo "🧼 Detected musl libc (Alpine). Resetting all datcollversion values..."
docker compose exec -T $SERVICE psql -U postgres -d postgres -c \
"UPDATE pg_database SET datcollversion = NULL WHERE datcollversion IS NOT NULL;"
fi
# 12. Make delete_old_cluster.sh executable
DELETE_SCRIPT="./delete_old_cluster.sh"
if [[ -f "$DELETE_SCRIPT" ]]; then
chmod +x "$DELETE_SCRIPT"
fi
# 13. Make rollback script executable
ROLLBACK_SCRIPT="./rollback_postgres_upgrade.sh"
if [[ -f "$ROLLBACK_SCRIPT" ]]; then
chmod +x "$ROLLBACK_SCRIPT"
fi
# 14. Final message
echo "✅ Upgrade complete!"
echo "🎉 Postgres is now running ${NEW_IMG} with data in '${DATA_DIR}'."
echo "🧰 Old version is saved in '${OLD_DATA_DIR}'."
echo "💡 Next steps:"
echo " - ✅ Run smoke tests"
echo " - 🧹 If all OK - PLEASE MAKE SURE ON YOUR WEBSITE, YOU HAVE ALL THE DATA YOU NEED AFTER THE UPGRADE, run:"
echo " rm -rf ./database_backup_* ./database_upgraded_*"
echo "🧹 Cleaning up older backups..."
find . -maxdepth 1 -type d -name "database_backup_*" ! -path "./${BACKUP_DIR##*/}" -exec rm -rf {} +
echo "✅ Only latest backup '${BACKUP_DIR}' preserved."
# Step 15: Restart full application
echo "🔄 Pulling latest images..."
if ! docker compose pull; then
echo "❌ Failed to pull images. Aborting."
exit 1
fi
echo "🔄 Starting full application stack..."
if ! docker compose up -d --force-recreate; then
echo "❌ Failed to start application stack. Aborting."
exit 1
fi
echo "✅ Deployment completed successfully."

115
rollback_postgres_upgrade.sh Executable file
View File

@@ -0,0 +1,115 @@
#!/usr/bin/env bash
set -euo pipefail
COMPOSE=./docker-compose.yaml
SERVICE=postgres
DATA_DIR=./database
ROLLBACK_TIMESTAMP=$(date +%Y%m%d_%H%M%S)
echo "🧪 Validating docker-compose config..."
docker compose -f "$COMPOSE" config > /dev/null || {
echo "❌ docker-compose config failed. Restore aborted."
exit 1
}
# Extract current Postgres image
CURRENT_IMG=$(docker compose -f "$COMPOSE" config | grep "image:" | grep "$SERVICE" | awk '{print $2}' || true)
if [[ -z "$CURRENT_IMG" ]]; then
echo "❌ Could not detect current image for service '$SERVICE'."
exit 1
fi
CURRENT_TAG=$(basename "$CURRENT_IMG")
CURRENT_VERSION=$(echo "$CURRENT_TAG" | cut -d'-' -f1) # e.g., 17.5
# Detect appropriate backup folder
BACKUP_CANDIDATES=($(ls -td ./database_backup_* 2>/dev/null || true))
if [[ ${#BACKUP_CANDIDATES[@]} -eq 0 ]]; then
echo "❌ No backup directory found. Cannot determine previous version."
echo " Available folders:"
ls -1d ./database_backup_* || true
exit 1
elif [[ ${#BACKUP_CANDIDATES[@]} -eq 1 ]]; then
SELECTED_BACKUP="${BACKUP_CANDIDATES[0]}"
echo " Only one backup found. Using: ${SELECTED_BACKUP}"
else
SELECTED_BACKUP="${BACKUP_CANDIDATES[1]}"
echo " Multiple backups found. Using second latest: ${SELECTED_BACKUP}"
fi
# Extract version from selected backup folder
OLD_TAG=$(basename "$SELECTED_BACKUP" | sed -E 's/database_backup_(([^_]+)-alpine).*/\1/')
OLD_IMG="postgres:${OLD_TAG}"
DELETED_UPGRADE_DIR=./database_upgraded_${CURRENT_VERSION}_${ROLLBACK_TIMESTAMP}
echo "⏪ Initiating rollback from Postgres ${CURRENT_TAG} to ${OLD_IMG}..."
# Step 1: Confirm backup exists
if [ ! -d "$SELECTED_BACKUP" ]; then
echo "❌ Backup folder '${SELECTED_BACKUP}' not found. Aborting."
exit 1
fi
# Step 2: Stop services
echo "🛑 Stopping running services..."
docker compose -f "$COMPOSE" down
# Step 3: Archive current (possibly broken) database
echo "📦 Archiving current database directory as '${DELETED_UPGRADE_DIR}'..."
mv "$DATA_DIR" "$DELETED_UPGRADE_DIR"
# Step 4: Restore previous version
echo "♻️ Restoring from backup folder '${SELECTED_BACKUP}'..."
cp -a "$SELECTED_BACKUP" "$DATA_DIR"
# Step 5: Restore image tag in docker-compose.yaml
echo "🔁 Reverting docker-compose image tag to Postgres ${OLD_IMG}..."
update_image_tag() {
local svc="$1"
local file="$2"
local target_tag="$3"
echo "🔁 Reverting docker-compose image tag for service '$svc' to Postgres: ${target_tag}..."
# Use awk to scope updates within the service definition only
awk -v service="$svc" -v new_tag="$target_tag" '
BEGIN { in_service = 0 }
/^[ ]{2}[a-zA-Z0-9_-]+:/ {
in_service = ($1 == service ":") ? 1 : 0
}
in_service && /^\s*image:/ {
sub(/postgres:[^"'"'"']+/, "postgres:" new_tag)
}
{ print }
' "$file" > "${file}.tmp" && mv "${file}.tmp" "$file"
}
update_image_tag "$SERVICE" "$COMPOSE" "$OLD_TAG"
# Step 6: Restart Postgres
echo "🚀 Starting Postgres service with restored image..."
docker compose -f "$COMPOSE" up -d "$SERVICE"
# Step 7: Final messages
echo "✅ Rollback complete!"
echo "🗃️ PostgreSQL downgraded to '${OLD_IMG}' and data restored from '${SELECTED_BACKUP}'."
echo "📦 The faulty upgrade has been archived in '${DELETED_UPGRADE_DIR}'."
echo " - To clean: rm -rf ${DELETED_UPGRADE_DIR}"
echo " - To verify: docker compose logs -f $SERVICE"
# Step 8: Restart full application
echo "🔄 Pulling latest images..."
if ! docker compose pull; then
echo "❌ Failed to pull images. Aborting."
exit 1
fi
echo "🔄 Starting full application stack..."
if ! docker compose up -d --force-recreate; then
echo "❌ Failed to start application stack. Please check logs."
exit 1
fi
echo "✅ Deployment completed successfully."