Add self-hosted project folder
This commit is contained in:
32
.env
Normal file
32
.env
Normal file
@@ -0,0 +1,32 @@
|
||||
# ===== Enviroment Stage ======
|
||||
NODE_ENV=production
|
||||
# ====== Database Configuration ======
|
||||
POSTGRES_PASSWORD=
|
||||
PGADMIN_DEFAULT_PASSWORD=
|
||||
DB_NAME="phoenix"
|
||||
DB_HOST="phoenixDB"
|
||||
DB_PORT=5432
|
||||
DB_USERNAME="postgres"
|
||||
PHX_SYSTEM_CONNECTION_POOL_MAX=5
|
||||
PHX_WORKER_CONNECTION_POOL_MAX=2
|
||||
# ===== PGADMIN CONFIGURATION =====
|
||||
PGADMIN_DEFAULT_EMAIL="info@phx-erp.de"
|
||||
MAIL_SERVER="mail.phx-erp.de"
|
||||
MAIL_PORT=465
|
||||
MAIL_USERNAME="internal@phx-erp.de"
|
||||
MAIL_PASSWORD="8Kb2p4!o1"
|
||||
SECURITY_EMAIL_SENDER="'No Reply PHX <no-reply@phx-erp.de>'"
|
||||
# ====== Phoenix Super Admin Configuration ======
|
||||
SUPER_ADMIN_USER_PASSWORD=123
|
||||
# ====== Redis Configuration ======
|
||||
REDIS_PASSWORD=
|
||||
# ===== Metris Configuration ======
|
||||
# Loki API URL -> The IP 5.75.153.161 is the Grafana Server where it has a firewall rule to allow the connection. Please, if you change here, need to be change in NGINX too.
|
||||
LOKI_URL=http://grafana.phx-erp.de:3100/loki/api/v1/push
|
||||
LOKI_RETRIES=5
|
||||
LOKI_BATCH_SIZE=500
|
||||
# ===== HTTPS-PORTAL Configuration ======
|
||||
HTTPS_PORTAL_DOMAINS=
|
||||
# ====== PHX-SYSTEM Configuration ======
|
||||
PHOENIX_SYSTEM_REPLICAS=1
|
||||
PHX_HOST_NAME=
|
||||
15
.gitignore
vendored
Normal file
15
.gitignore
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
|
||||
# Backup files
|
||||
*.bak
|
||||
*.backup
|
||||
*.sql
|
||||
*.dump
|
||||
backup/
|
||||
backups/
|
||||
*.tar.gz
|
||||
*.zip
|
||||
*.7z
|
||||
*_backup
|
||||
*_backup_*
|
||||
backup_*
|
||||
*.old
|
||||
16
README-Postgres-Upgrade.md
Normal file
16
README-Postgres-Upgrade.md
Normal file
@@ -0,0 +1,16 @@
|
||||
|
||||
## Upgrade Modes
|
||||
> [`Cloud Servers`](https://phx-erp.youtrack.cloud/articles/INT-A-105/PostgreSQL-Docker-Upgrade-Rollback-Guide-Any-Version-Any-Version?edit=true)
|
||||
> [`Self Hosted`](https://phx-erp.youtrack.cloud/articles/INT-A-106/PostgreSQL-Upgrade-Rollback-Self-Hosted)
|
||||
|
||||
# Quick Move
|
||||
|
||||
## Upgrade PostgreSQL
|
||||
```bash
|
||||
docker compose --profile postgres-upgrade run --rm postgres-auto-upgrade
|
||||
```
|
||||
|
||||
## Rollback PostgreSQL (if needed)
|
||||
```bash
|
||||
docker compose --profile postgres-rollback run --rm postgres-auto-rollback
|
||||
```
|
||||
3
app_custom/custom-style.css
Normal file
3
app_custom/custom-style.css
Normal file
@@ -0,0 +1,3 @@
|
||||
/* .login-logo-img {
|
||||
background-image: url("/assets/custom/loginscreen-logo.png")!Important;
|
||||
} */
|
||||
87
crash_diagnose.sh
Normal file
87
crash_diagnose.sh
Normal file
@@ -0,0 +1,87 @@
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
|
||||
YELLOW='\033[1;33m'
|
||||
RED='\033[1;31m'
|
||||
NC='\033[0m' # No color
|
||||
|
||||
echo -e "📦 ${YELLOW}PHX Crash Diagnostic Tool (Docker + Linux Server)${NC}"
|
||||
echo "🔍 Boot Timeline:"
|
||||
journalctl --list-boots | head -3
|
||||
|
||||
echo -e "\n⚠️ OOM Kills:"
|
||||
journalctl -b -1 | grep -i 'killed process' || echo "None found."
|
||||
|
||||
echo -e "\n⚠️ Out of Memory Events:"
|
||||
journalctl -b -1 | grep -i 'out of memory' || echo "None found."
|
||||
|
||||
echo -e "\n⚠️ systemd-oomd Events:"
|
||||
journalctl -b -1 | grep systemd-oomd || echo "None found."
|
||||
|
||||
echo -e "\n🔥 CPU/Load Pressure (dmesg/syslog):"
|
||||
journalctl -b -1 | grep -Ei 'cpu|load average|soft lockup|hung task' || echo "None found."
|
||||
|
||||
echo -e "\n🚨 System Errors (priority 0–3):"
|
||||
journalctl -b -1 -p 3..0 || echo "None found."
|
||||
|
||||
if command -v docker &> /dev/null && docker info >/dev/null 2>&1; then
|
||||
echo -e "\n🐳 Docker detected and running."
|
||||
|
||||
CONTAINERS=$(docker ps -aq)
|
||||
if [[ -z "$CONTAINERS" ]]; then
|
||||
echo -e "\n⚠️ No containers found. Skipping container-specific diagnostics."
|
||||
else
|
||||
echo -e "\n🐳 Docker OOM-Killed Containers:"
|
||||
docker inspect $CONTAINERS 2>/dev/null | grep -B10 '"OOMKilled": true' || echo "No containers were OOMKilled."
|
||||
|
||||
echo -e "\n🔁 Recently Restarted Containers:"
|
||||
docker ps -a --format '{{.Names}}\t{{.Status}}' | grep -i 'restarted' || echo "No recent restarts."
|
||||
|
||||
echo -e "\n📉 Top 5 Containers by Memory Usage (now):"
|
||||
docker stats --no-stream --format "table {{.Name}}\t{{.MemUsage}}" | sort -k2 -hr | head -n 6
|
||||
|
||||
echo -e "\n📈 Top 5 Containers by CPU Usage (now):"
|
||||
docker stats --no-stream --format "table {{.Name}}\t{{.CPUPerc}}" | sort -k2 -hr | head -n 6
|
||||
|
||||
echo -e "\n📋 Docker Container Memory Limits:"
|
||||
docker inspect $CONTAINERS --format '{{ .Name }}: {{ .HostConfig.Memory }} bytes' | grep -v ': 0' || echo "None set"
|
||||
|
||||
echo -e "\n📋 Containers With No Memory Limit:"
|
||||
docker inspect $CONTAINERS --format '{{ .Name }}: {{ .HostConfig.Memory }}' | awk '$2 == 0 {print $1}'
|
||||
|
||||
echo -e "\n📝 Last 100 Log Lines from PHX Containers:"
|
||||
for name in $(docker ps -a --format '{{.Names}}' | grep -i 'phoenix\|pgadmin\|postgres'); do
|
||||
echo -e "\n--- Logs for $name ---"
|
||||
docker logs --tail=100 "$name" 2>/dev/null || echo "No logs for $name"
|
||||
done
|
||||
fi
|
||||
else
|
||||
echo -e "\n🐳 ${RED}Docker is not installed or not running.${NC}"
|
||||
fi
|
||||
|
||||
# Historical CPU/memory usage with 'sar'
|
||||
if command -v sar &> /dev/null; then
|
||||
echo -e "\n📊 Analyzing Memory and CPU Usage via sar (last 60 mins if possible)..."
|
||||
|
||||
echo -e "\n🔍 Memory Usage (High Usage if >90%):"
|
||||
sar -r | awk '
|
||||
BEGIN { OFS="\t"; print "Time", "%memused", "%commit", "Status" }
|
||||
/^[0-9]/ {
|
||||
memused = $4; commit = $8;
|
||||
status = (memused+0 > 90 || commit+0 > 95) ? "⚠️ HIGH" : "OK";
|
||||
printf "%s\t%s%%\t%s%%\t%s\n", $1, memused, commit, status;
|
||||
}'
|
||||
|
||||
echo -e "\n🔍 CPU Usage (High if %idle < 10 or %system > 90):"
|
||||
sar -u | awk '
|
||||
BEGIN { OFS="\t"; print "Time", "%user", "%system", "%idle", "Status" }
|
||||
/^[0-9]/ {
|
||||
user = $3; sys = $5; idle = $8;
|
||||
status = (idle+0 < 10 || sys+0 > 90) ? "⚠️ HIGH" : "OK";
|
||||
printf "%s\t%s%%\t%s%%\t%s%%\t%s\n", $1, user, sys, idle, status;
|
||||
}'
|
||||
else
|
||||
echo -e "\nℹ️ 'sar' (sysstat) is not installed. Skipping historical CPU/memory analysis."
|
||||
fi
|
||||
|
||||
echo -e "\n✅ ${YELLOW}Done. Use this script after crashes or schedule it in cron for proactive monitoring.${NC}"
|
||||
509
docker-compose.yaml
Normal file
509
docker-compose.yaml
Normal file
@@ -0,0 +1,509 @@
|
||||
---
|
||||
services:
|
||||
postgres-auto-upgrade:
|
||||
profiles:
|
||||
- postgres-upgrade # 🟢 This isolates the service
|
||||
image: alpine:3.19
|
||||
container_name: postgres_auto_upgrade
|
||||
working_dir: /opt/phx
|
||||
volumes:
|
||||
- .:/opt/phx:rw
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
entrypoint: >
|
||||
sh -c "
|
||||
apk add --no-cache bash coreutils grep sed findutils curl docker-cli dos2unix &&
|
||||
mkdir -p ~/.docker/cli-plugins &&
|
||||
curl -SL https://github.com/docker/compose/releases/download/v2.27.0/docker-compose-linux-x86_64 -o ~/.docker/cli-plugins/docker-compose &&
|
||||
chmod +x ~/.docker/cli-plugins/docker-compose &&
|
||||
chmod +x ./postgres_upgrade.sh &&
|
||||
./postgres_upgrade.sh"
|
||||
restart: 'no'
|
||||
depends_on: []
|
||||
network_mode: bridge
|
||||
postgres-auto-rollback:
|
||||
profiles:
|
||||
- postgres-rollback # 🟢 This isolates the service
|
||||
image: alpine:3.19
|
||||
container_name: postgres_rollback
|
||||
working_dir: /opt/phx
|
||||
volumes:
|
||||
- .:/opt/phx:rw
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
entrypoint: >
|
||||
sh -c "
|
||||
apk add --no-cache bash coreutils grep sed findutils curl docker-cli dos2unix &&
|
||||
mkdir -p ~/.docker/cli-plugins &&
|
||||
curl -SL https://github.com/docker/compose/releases/download/v2.27.0/docker-compose-linux-x86_64 -o ~/.docker/cli-plugins/docker-compose &&
|
||||
chmod +x ~/.docker/cli-plugins/docker-compose &&
|
||||
chmod +x ./rollback_postgres_upgrade.sh &&
|
||||
./rollback_postgres_upgrade.sh"
|
||||
restart: 'no'
|
||||
depends_on: []
|
||||
network_mode: bridge
|
||||
postgres:
|
||||
restart: always
|
||||
image: "postgres:15.1-alpine"
|
||||
container_name: phoenixDB # Hostname
|
||||
# logging:
|
||||
# driver: loki
|
||||
# options:
|
||||
# loki-url: "${LOKI_URL}"
|
||||
# loki-retries: "${LOKI_RETRIES}"
|
||||
# loki-batch-size: "${LOKI_BATCH_SIZE}"
|
||||
# loki-external-labels: "service=phx-postgres,env=prod"
|
||||
networks:
|
||||
- backend
|
||||
environment:
|
||||
DEBUG: true
|
||||
POSTGRES_DB: phoenix
|
||||
POSTGRES_PASSWORD: "8*6&Ti3TJxN^"
|
||||
volumes:
|
||||
- "./database:/var/lib/postgresql/data"
|
||||
# deploy:
|
||||
# restart_policy: # Define how the service should restart when it fails
|
||||
# condition: on-failure # Only restart if the container exits with a non-zero code
|
||||
# delay: 5s # Wait 5 seconds before attempting to restart
|
||||
# max_attempts: 5 # Maximum number of restart attempts before giving up
|
||||
# window: 120s # Time window to evaluate restart attempts (resets counter after this period)
|
||||
# resources: # Resource allocation and limits for the container
|
||||
# limits: # Maximum resources the container can use
|
||||
# cpus: "0.75" # Maximum CPU cores (75% of one core)
|
||||
# memory: 768M # Maximum memory usage (768 megabytes)
|
||||
# reservations: # Guaranteed minimum resources for the container
|
||||
# cpus: "0.25" # Reserved CPU cores (25% of one core)
|
||||
# memory: 256M # Reserved memory (256 megabytes)
|
||||
healthcheck:
|
||||
test: [ "CMD-SHELL", "pg_isready -U postgres" ]
|
||||
interval: 5s # Time between each health check
|
||||
timeout: 2s # Number of failures before marking as unhealthy
|
||||
retries: 5 # Grace period before health checks start
|
||||
pgadmin:
|
||||
restart: always
|
||||
image: dpage/pgadmin4:9.6.0
|
||||
container_name: pgadmin4-ui
|
||||
ports:
|
||||
- "5050:80"
|
||||
user: "5050:5050"
|
||||
# logging:
|
||||
# driver: loki
|
||||
# options:
|
||||
# loki-url: "${LOKI_URL}"
|
||||
# loki-retries: "${LOKI_RETRIES}"
|
||||
# loki-batch-size: "${LOKI_BATCH_SIZE}"
|
||||
# loki-external-labels: "service=phx-pgadmin,env=prod"
|
||||
networks:
|
||||
- backend
|
||||
- frontend
|
||||
environment:
|
||||
PGADMIN_DEFAULT_EMAIL: "info@phx-erp.de"
|
||||
PGADMIN_DEFAULT_PASSWORD: "123"
|
||||
PGADMIN_CONFIG_SERVER_MODE: 'True'
|
||||
PGADMIN_CONFIG_PROXY_X_PROTO_COUNT: 1
|
||||
PGADMIN_SERVER_JSON_FILE: '/var/lib/pgadmin/servers.json'
|
||||
PGADMIN_REPLACE_SERVERS_ON_STARTUP: 'True'
|
||||
PGADMIN_CONFIG_DATA_DIR: "'/var/lib/pgadmin'"
|
||||
PGADMIN_CONFIG_MASTER_PASSWORD_REQUIRED: 'False'
|
||||
PGPASSFILE: /var/lib/pgadmin/pgpass
|
||||
PGPASS_HOST: "phoenixDB"
|
||||
PGPASS_PORT: 5432
|
||||
PGPASS_DB: "phoenix"
|
||||
PGPASS_USER: "postgres"
|
||||
PGPASS_PASSWORD: ""
|
||||
ALLOW_SAVE_PASSWORD: 'False'
|
||||
MFA_ENABLED: 'True'
|
||||
MFA_FORCE_REGISTRATION: 'False'
|
||||
MFA_SUPPORTED_METHODS: 'email'
|
||||
MFA_EMAIL_SUBJECT: 'Your MFA code by PHX-ERP'
|
||||
MAX_LOGIN_ATTEMPTS: 5
|
||||
ENHANCED_COOKIE_PROTECTION: 'True'
|
||||
SHOW_GRAVATAR_IMAGE: 'True'
|
||||
SECURITY_EMAIL_SENDER: "'No Reply PHX <no-reply@phx-erp.de>'"
|
||||
MAIL_SERVER: "mail.phx-erp.de"
|
||||
MAIL_PORT: 465
|
||||
MAIL_USE_SSL: 'False'
|
||||
MAIL_USE_TLS: 'False'
|
||||
MAIL_USERNAME: "internal@phx-erp.de"
|
||||
MAIL_PASSWORD: "8Kb2p4!o1"
|
||||
MAIL_DEBUG: 'False'
|
||||
volumes:
|
||||
- ./pgadmin/data:/var/lib/pgadmin
|
||||
- ./pgadmin/pgadmin-entrypoint.sh:/docker-entrypoint.sh:ro
|
||||
mem_limit: 512M
|
||||
memswap_limit: 512M
|
||||
deploy:
|
||||
restart_policy: # Define how the service should restart when it fails
|
||||
condition: on-failure # Only restart if the container exits with a non-zero code
|
||||
delay: 5s # Wait 5 seconds before attempting to restart
|
||||
max_attempts: 5 # Maximum number of restart attempts before giving up
|
||||
window: 120s # Time window to evaluate restart attempts (resets counter after this period)
|
||||
resources: # Resource allocation and limits for the container
|
||||
limits: # Maximum resources the container can use
|
||||
cpus: "1.0" # Maximum CPU cores (100% of one core)
|
||||
memory: 512M # Maximum memory usage (512 megabytes)
|
||||
reservations: # Guaranteed minimum resources for the container
|
||||
cpus: "0.15" # Reserved CPU cores (15% of one core)
|
||||
memory: 250M # Reserved memory (250 megabytes)
|
||||
entrypoint: ["/bin/sh", "/docker-entrypoint.sh"]
|
||||
depends_on:
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "-O", "-", "http://localhost:80/misc/ping"]
|
||||
interval: 15s
|
||||
timeout: 10s
|
||||
retries: 5
|
||||
start_period: 120s
|
||||
phoenix-app:
|
||||
restart: always
|
||||
image: "phxerp/phoenix-app:alpha"
|
||||
container_name: phoenix-app
|
||||
ports:
|
||||
- "3000:3000" # Restrict to only allow access from Grafana Server IP
|
||||
# logging:
|
||||
# driver: loki
|
||||
# options:
|
||||
# loki-url: "${LOKI_URL}"
|
||||
# loki-retries: "${LOKI_RETRIES}"
|
||||
# loki-batch-size: "${LOKI_BATCH_SIZE}"
|
||||
# loki-external-labels: "service=phx-app,env=prod,project=phoenix"
|
||||
volumes:
|
||||
- ./app_custom:/usr/share/nginx/html/assets/custom
|
||||
# - ./nginx/nginx.conf:/etc/nginx/nginx.conf # Uncomment this if you want to use override the default nginx.conf
|
||||
# - ./nginx/includes:/etc/nginx/includes:ro # Uncomment this if you want to use override the default includes
|
||||
networks:
|
||||
- backend
|
||||
- frontend
|
||||
# deploy:
|
||||
# restart_policy: # Define how the service should restart when it fails
|
||||
# condition: on-failure # Only restart if the container exits with a non-zero code
|
||||
# delay: 5s # Wait 5 seconds before attempting to restart
|
||||
# max_attempts: 5 # Maximum number of restart attempts before giving up
|
||||
# window: 120s # Time window to evaluate restart attempts (resets counter after this period)
|
||||
# resources: # Resource allocation and limits for the container
|
||||
# limits: # Maximum resources the container can use
|
||||
# cpus: "0.35" # Maximum CPU cores (35% of one core)
|
||||
# memory: 384M # Maximum memory usage (384 megabytes)
|
||||
# reservations: # Guaranteed minimum resources for the container
|
||||
# cpus: "0.10" # Reserved CPU cores (10% of one core)
|
||||
# memory: 128M # Reserved memory (128 megabytes)
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--spider", "-q", "http://phoenix-app/login"] # localhost checks that the NGINX server inside the container is serving something at the root
|
||||
interval: 10s # check every 10 seconds
|
||||
timeout: 5s # allow 5 seconds per check
|
||||
retries: 5 # mark as unhealthy after 5 failures
|
||||
start_period: 15s # wait 15 seconds after container start before checking
|
||||
phoenix-system:
|
||||
restart: always
|
||||
image: "phxerp/phoenix-system:alpha"
|
||||
# logging:
|
||||
# driver: loki
|
||||
# options:
|
||||
# loki-url: "${LOKI_URL}"
|
||||
# loki-retries: "${LOKI_RETRIES}"
|
||||
# loki-batch-size: "${LOKI_BATCH_SIZE}"
|
||||
# loki-external-labels: "service=phoenix-system,env=prod"
|
||||
environment:
|
||||
- "DB_HOST=${DB_HOST}"
|
||||
- "DB_NAME=${DB_NAME}"
|
||||
- "DB_PASSWORD=${POSTGRES_PASSWORD}"
|
||||
- "DB_USERNAME=${DB_USERNAME}"
|
||||
- "SUPER_ADMIN_USER_PASSWORD=${SUPER_ADMIN_USER_PASSWORD}"
|
||||
- "REDIS_PASSWORD=${REDIS_PASSWORD}"
|
||||
- NODE_ENV=${NODE_ENV}
|
||||
- PHX_HOST_NAME=${PHX_HOST_NAME}
|
||||
- PERFORMANCE_STRUCTURED_LOGGING=${PERFORMANCE_STRUCTURED_LOGGING}
|
||||
- PERFORMANCE_WARNING_THRESHOLD=${PERFORMANCE_WARNING_THRESHOLD}
|
||||
- PERFORMANCE_DETAILED_MEMORY=${PERFORMANCE_DETAILED_MEMORY}
|
||||
command: ["npm", "run", "start:server"]
|
||||
deploy:
|
||||
replicas: ${PHOENIX_SYSTEM_REPLICAS} #change here if u want to have more replicas. Cant find a way to set via variable right now
|
||||
# restart_policy: # Define how the service should restart when it fails
|
||||
# condition: on-failure # Only restart if the container exits with a non-zero code
|
||||
# delay: 5s # Wait 5 seconds before attempting to restart
|
||||
# max_attempts: 5 # Maximum number of restart attempts before giving up
|
||||
# window: 120s # Time window to evaluate restart attempts (resets counter after this period)
|
||||
# resources: # Resource allocation and limits for the container
|
||||
# limits: # Maximum resources the container can use
|
||||
# cpus: "1.50" # Maximum CPU cores (150% of one core)
|
||||
# memory: 1600M # Maximum memory usage (1600 megabytes)
|
||||
# reservations: # Guaranteed minimum resources for the container
|
||||
# cpus: "0.50" # Reserved CPU cores (50% of one core)
|
||||
# memory: 768M # Reserved memory (768 megabytes)
|
||||
networks:
|
||||
backend:
|
||||
aliases:
|
||||
- phoenix-system
|
||||
depends_on:
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
phoenix-redis:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "curl -s http://phoenix-system:3000/health | grep -q '\"admin-api\":{\"status\":\"up\"}' && curl -s http://phoenix-system:3000/health | grep -q '\"database\":{\"status\":\"up\"}'"] # Checks both admin-api and database status
|
||||
interval: 10s # Time between each health check
|
||||
timeout: 10s # Max time to wait for each check
|
||||
retries: 20 # Number of failures before marking as unhealthy
|
||||
start_period: 60s # Grace period before health checks start
|
||||
volumes:
|
||||
- "./assets:/usr/src/app/packages/dev-server/assets"
|
||||
- "./server_custom:/usr/src/app/packages/dev-server/custom"
|
||||
# - "./logs:/usr/src/app/packages/dev-server/logs"
|
||||
phoenix-worker:
|
||||
restart: always
|
||||
image: "phxerp/phoenix-system:alpha"
|
||||
container_name: "phoenix-worker"
|
||||
ports:
|
||||
- "3001:3001" # Restrict to only allow access from Grafana Server IP
|
||||
# logging:
|
||||
# driver: loki
|
||||
# options:
|
||||
# loki-url: "${LOKI_URL}"
|
||||
# loki-retries: "${LOKI_RETRIES}"
|
||||
# loki-batch-size: "${LOKI_BATCH_SIZE}"
|
||||
# loki-external-labels: "service=phx-worker,env=prod"
|
||||
networks:
|
||||
- backend
|
||||
environment:
|
||||
- "DB_HOST=${DB_HOST}"
|
||||
- "DB_NAME=${DB_NAME}"
|
||||
- "DB_PASSWORD=${POSTGRES_PASSWORD}"
|
||||
- "DB_USERNAME=${DB_USERNAME}"
|
||||
- "SUPER_ADMIN_USER_PASSWORD=${SUPER_ADMIN_USER_PASSWORD}"
|
||||
- REDIS_PASSWORD=${REDIS_PASSWORD}
|
||||
- NODE_ENV=${NODE_ENV}
|
||||
- PHX_HOST_NAME=${PHX_HOST_NAME}
|
||||
- PERFORMANCE_STRUCTURED_LOGGING=${PERFORMANCE_STRUCTURED_LOGGING}
|
||||
- PERFORMANCE_WARNING_THRESHOLD=${PERFORMANCE_WARNING_THRESHOLD}
|
||||
- PERFORMANCE_DETAILED_MEMORY=${PERFORMANCE_DETAILED_MEMORY}
|
||||
command: ['npm', 'run', 'start:worker']
|
||||
# deploy:
|
||||
# restart_policy: # Define how the service should restart when it fails
|
||||
# condition: on-failure # Only restart if the container exits with a non-zero code
|
||||
# delay: 5s # Wait 5 seconds before attempting to restart
|
||||
# max_attempts: 5 # Maximum number of restart attempts before giving up
|
||||
# window: 120s # Time window to evaluate restart attempts (resets counter after this period)
|
||||
# resources: # Resource allocation and limits for the container
|
||||
# limits: # Maximum resources the container can use
|
||||
# cpus: '2.0' # Maximum CPU cores (200% of one core)
|
||||
# memory: 2G # Maximum memory usage (2 gigabytes)
|
||||
# reservations: # Guaranteed minimum resources for the container
|
||||
# cpus: '0.5' # Reserved CPU cores (50% of one core)
|
||||
# memory: 512M # Reserved memory (512 megabytes)
|
||||
depends_on:
|
||||
phoenix-system:
|
||||
condition: service_healthy
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test: [ "CMD-SHELL", "curl -s http://phoenix-worker:3001/health | grep -q '\"status\":\"ok\"'" ] # Check if worker responds with status ok
|
||||
interval: 10s # Time between each health check
|
||||
timeout: 6s # Max time to wait for each check
|
||||
retries: 20 # Grace period before health checks start
|
||||
start_period: 30s # Grace period before health checks start
|
||||
volumes:
|
||||
- "./assets:/usr/src/app/packages/dev-server/assets"
|
||||
- "./server_custom:/usr/src/app/packages/dev-server/custom"
|
||||
# - "./logs:/usr/src/app/packages/dev-server/logs"
|
||||
phoenix-redis:
|
||||
image: 'bitnami/redis:latest'
|
||||
container_name: redis
|
||||
command: /opt/bitnami/scripts/redis/run.sh # Not good, but as agreed. At some point i can start using this: --maxmemory + add eviction policy
|
||||
user: root
|
||||
# logging:
|
||||
# driver: loki
|
||||
# options:
|
||||
# loki-url: "${LOKI_URL}"
|
||||
# loki-retries: "${LOKI_RETRIES}"
|
||||
# loki-batch-size: "${LOKI_BATCH_SIZE}"
|
||||
# loki-external-labels: "service=phx-redis,env=prod"
|
||||
networks:
|
||||
- backend
|
||||
restart: always
|
||||
environment:
|
||||
ALLOW_EMPTY_PASSWORD: "no"
|
||||
REDIS_PASSWORD: ${REDIS_PASSWORD}
|
||||
# deploy:
|
||||
# restart_policy: # Define how the service should restart when it fails
|
||||
# condition: on-failure # Only restart if the container exits with a non-zero code
|
||||
# delay: 5s # Wait 5 seconds before attempting to restart
|
||||
# max_attempts: 5 # Maximum number of restart attempts before giving up
|
||||
# window: 120s # Time window to evaluate restart attempts (resets counter after this period)
|
||||
# resources: # Resource allocation and limits for the container
|
||||
# limits: # Maximum resources the container can use
|
||||
# cpus: "0.25" # Maximum CPU cores (25% of one core)
|
||||
# memory: 100M # Maximum memory usage (100 megabytes)
|
||||
# reservations: # Guaranteed minimum resources for the container
|
||||
# cpus: "0.05" # Reserved CPU cores (5% of one core)
|
||||
# memory: 32M # Reserved memory (32 megabytes)
|
||||
healthcheck:
|
||||
test: [
|
||||
"CMD-SHELL",
|
||||
"redis-cli --no-auth-warning -a ${REDIS_PASSWORD} ping | grep PONG && test -w /bitnami/redis/data"
|
||||
]
|
||||
interval: 5s
|
||||
retries: 10
|
||||
timeout: 5s
|
||||
depends_on:
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
volumes:
|
||||
- "./redis/data:/bitnami/redis/data"
|
||||
phoenix-health-exporter:
|
||||
image: phxerp/phoenix-health-exporter:alpha
|
||||
container_name: health_exporter
|
||||
restart: unless-stopped
|
||||
# logging:
|
||||
# driver: loki
|
||||
# options:
|
||||
# loki-url: "${LOKI_URL}"
|
||||
# loki-retries: "${LOKI_RETRIES}"
|
||||
# loki-batch-size: "${LOKI_BATCH_SIZE}"
|
||||
# loki-external-labels: "service=phx-health-exporter,env=prod"
|
||||
ports:
|
||||
- "9800:9800"
|
||||
environment:
|
||||
DB_HOST: ${DB_HOST}
|
||||
DB_NAME: ${DB_NAME}
|
||||
DB_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
DB_USERNAME: ${DB_USERNAME}
|
||||
REDIS_PASSWORD: ${REDIS_PASSWORD}
|
||||
networks:
|
||||
- frontend
|
||||
- backend
|
||||
volumes:
|
||||
- /etc/hostname:/etc/host_hostname:ro # This ensures the container always uses the real machine hostname, even if restarted or recreated.
|
||||
security_opt:
|
||||
- no-new-privileges:true
|
||||
memswap_limit: 512M
|
||||
deploy:
|
||||
restart_policy: # Define how the service should restart when it fails
|
||||
condition: on-failure # Only restart if the container exits with a non-zero code
|
||||
delay: 5s # Wait 5 seconds before attempting to restart
|
||||
max_attempts: 5 # Maximum number of restart attempts before giving up
|
||||
window: 120s # Time window to evaluate restart attempts (resets counter after this period)
|
||||
resources: # Resource allocation and limits for the container
|
||||
limits: # Maximum resources the container can use
|
||||
cpus: "0.5" # Maximum CPU cores (50% of one core)
|
||||
memory: 256M # Maximum memory usage (256 megabytes)
|
||||
reservations: # Guaranteed minimum resources for the container
|
||||
cpus: "0.1" # Reserved CPU cores (10% of one core)
|
||||
memory: 64M # Reserved memory (64 megabytes)
|
||||
depends_on:
|
||||
phoenix-system:
|
||||
condition: service_healthy
|
||||
phoenix-worker:
|
||||
condition: service_healthy
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "curl -sf http://localhost:9800/healthz || exit 1"]
|
||||
interval: 1m
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
start_period: 15s
|
||||
node-exporter:
|
||||
image: quay.io/prometheus/node-exporter:latest
|
||||
container_name: node_exporter
|
||||
# logging:
|
||||
# driver: loki
|
||||
# options:
|
||||
# loki-url: "${LOKI_URL}"
|
||||
# loki-retries: "${LOKI_RETRIES}"
|
||||
# loki-batch-size: "${LOKI_BATCH_SIZE}"
|
||||
# loki-external-labels: "service=phx-node-exporter,env=prod"
|
||||
networks:
|
||||
- metrics
|
||||
- frontend
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "9100:9100" # Restrict to only allow access from Grafana Server IP
|
||||
command:
|
||||
- "--path.procfs=/host/proc"
|
||||
- "--path.sysfs=/host/sys"
|
||||
- "--path.rootfs=/host"
|
||||
- "--collector.filesystem.ignored-mount-points=^/(sys|proc|dev)($$|/)"
|
||||
volumes:
|
||||
- "/proc:/host/proc:ro"
|
||||
- "/sys:/host/sys:ro"
|
||||
- "/:/host:ro,rslave"
|
||||
security_opt:
|
||||
- no-new-privileges:true
|
||||
memswap_limit: 512M
|
||||
deploy:
|
||||
restart_policy: # Define how the service should restart when it fails
|
||||
condition: on-failure # Only restart if the container exits with a non-zero code
|
||||
delay: 5s # Wait 5 seconds before attempting to restart
|
||||
max_attempts: 5 # Maximum number of restart attempts before giving up
|
||||
window: 120s # Time window to evaluate restart attempts (resets counter after this period)
|
||||
resources: # Resource allocation and limits for the container
|
||||
limits: # Maximum resources the container can use
|
||||
cpus: "0.25" # Maximum CPU cores (25% of one core)
|
||||
memory: 128M # Maximum memory usage (128 megabytes)
|
||||
reservations: # Guaranteed minimum resources for the container
|
||||
cpus: "0.05" # Reserved CPU cores (5% of one core)
|
||||
memory: 32M # Reserved memory (32 megabytes)
|
||||
depends_on:
|
||||
phoenix-worker: # This is to avoid alocation of resources to the node-exporter if the phoenix-worker is not healthy yet.
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "-qO-", "http://localhost:9100/metrics"]
|
||||
interval: 15s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
start_period: 20s
|
||||
# nginx-exporter:
|
||||
# image: nginx/nginx-prometheus-exporter:1.4.2
|
||||
# container_name: nginx_exporter
|
||||
# restart: unless-stopped
|
||||
# # logging:
|
||||
# # driver: loki
|
||||
# # options:
|
||||
# # loki-url: "${LOKI_URL}"
|
||||
# # loki-retries: "${LOKI_RETRIES}"
|
||||
# # loki-batch-size: "${LOKI_BATCH_SIZE}"
|
||||
# # loki-external-labels: "service=phx-nginx-exporter,env=prod"
|
||||
# ports:
|
||||
# - "9113:9113" # Restrict to only allow access from Grafana Server IP
|
||||
# command:
|
||||
# - '--nginx.scrape-uri=http://phoenix-app/stub_status'
|
||||
# security_opt:
|
||||
# - no-new-privileges:true
|
||||
# deploy:
|
||||
# resources:
|
||||
# limits:
|
||||
# cpus: '0.25'
|
||||
# memory: 128M
|
||||
# depends_on:
|
||||
# phoenix-app:
|
||||
# condition: service_healthy
|
||||
# networks:
|
||||
# - frontend
|
||||
# - metrics
|
||||
# healthcheck:
|
||||
# test: ["CMD", "wget", "-qO-", "http://localhost:9113/metrics"] # Not working as expected
|
||||
# interval: 15s
|
||||
# timeout: 5s
|
||||
# retries: 3
|
||||
# start_period: 10s
|
||||
|
||||
networks:
|
||||
backend:
|
||||
driver: bridge
|
||||
external: false
|
||||
ipam:
|
||||
config:
|
||||
- subnet: 172.19.0.0/16
|
||||
|
||||
frontend:
|
||||
driver: bridge
|
||||
external: false
|
||||
ipam:
|
||||
config:
|
||||
- subnet: 172.20.0.0/16
|
||||
|
||||
metrics:
|
||||
driver: bridge
|
||||
external: false
|
||||
ipam:
|
||||
config:
|
||||
- subnet: 172.22.0.0/16
|
||||
8
helper.md
Normal file
8
helper.md
Normal file
@@ -0,0 +1,8 @@
|
||||
Check if server.json is present
|
||||
```bash
|
||||
docker exec -it pgadmin_container ls -l /var/lib/pgadmin/servers.json
|
||||
```
|
||||
Check the content of server.json
|
||||
```bash
|
||||
docker exec -it pgadmin_container cat /var/lib/pgadmin/servers.json
|
||||
```
|
||||
49
pgadmin/pgadmin-entrypoint.sh
Normal file
49
pgadmin/pgadmin-entrypoint.sh
Normal file
@@ -0,0 +1,49 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
echo "🔧 Entrypoint: Ensuring .pgpass directory and file"
|
||||
|
||||
PGADMIN_HOME="/var/lib/pgadmin"
|
||||
PGPASS_PATH="${PGADMIN_HOME}/pgpass"
|
||||
SERVERS_JSON_PATH="/var/lib/pgadmin/servers.json"
|
||||
|
||||
# Ensure parent directory exists
|
||||
mkdir -p "$PGADMIN_HOME"
|
||||
|
||||
# Create or overwrite .pgpass file
|
||||
echo "${PGPASS_HOST}:${PGPASS_PORT}:${PGPASS_DB}:${PGPASS_USER}:${PGPASS_PASSWORD}" > "$PGPASS_PATH"
|
||||
chmod 600 "$PGPASS_PATH"
|
||||
chown 5050:5050 "$PGPASS_PATH"
|
||||
export PGPASSFILE="$PGPASS_PATH"
|
||||
|
||||
echo "✅ .pgpass ready at $PGPASS_PATH"
|
||||
echo "🛠️ Generating servers.json for pgAdmin..."
|
||||
|
||||
# Try to ensure /pgadmin4 is owned by 5050 if possible
|
||||
if [ -d /pgadmin4 ]; then
|
||||
echo "🔧 Attempting to chown /pgadmin4 to 5050:5050"
|
||||
chown 5050:5050 /pgadmin4 2>/dev/null || echo "⚠️ Could not chown /pgadmin4 (likely read-only or permission issue)"
|
||||
fi
|
||||
|
||||
cat <<EOF > "$SERVERS_JSON_PATH"
|
||||
{
|
||||
"Servers": {
|
||||
"1": {
|
||||
"Name": "Phoenix DB",
|
||||
"Group": "PHX GROUP",
|
||||
"Host": "${PGPASS_HOST}",
|
||||
"Port": ${PGPASS_PORT},
|
||||
"MaintenanceDB": "${PGPASS_DB}",
|
||||
"Username": "${PGPASS_USER}",
|
||||
"SSLMode": "prefer",
|
||||
"PassFile": "$PGPASSFILE"
|
||||
}
|
||||
}
|
||||
}
|
||||
EOF
|
||||
|
||||
chmod 600 "$SERVERS_JSON_PATH"
|
||||
chown 5050:5050 "$SERVERS_JSON_PATH"
|
||||
echo "✅ servers.json created at $SERVERS_JSON_PATH"
|
||||
|
||||
exec /entrypoint.sh "$@"
|
||||
259
postgres_upgrade.sh
Normal file
259
postgres_upgrade.sh
Normal file
@@ -0,0 +1,259 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
trap 'echo "⚠️ An error occurred. Consider running rollback or checking backups."' ERR
|
||||
|
||||
COMPOSE=./docker-compose.yaml
|
||||
SERVICE=postgres
|
||||
DATA_DIR=./database
|
||||
PG_VERSION_FILE="$DATA_DIR/PG_VERSION"
|
||||
|
||||
echo "🧪 Validating docker-compose config..."
|
||||
docker compose -f "$COMPOSE" config > /dev/null || {
|
||||
echo "❌ docker-compose config failed. Restore aborted."
|
||||
exit 1
|
||||
}
|
||||
|
||||
if [ ! -d "$DATA_DIR" ]; then
|
||||
echo "❌ Expected data directory '${DATA_DIR}' does not exist. Aborting."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# echo "🔍 Checking if Postgres service is already running..."
|
||||
# if ! docker compose ps --services --filter "status=running" | grep -q "^${SERVICE}$"; then
|
||||
# echo "⚠️ '${SERVICE}' service is not running. Skipping auto-upgrade step."
|
||||
# echo "🔄 Attempting to start '${SERVICE}' service to detect version..."
|
||||
# docker compose up -d $SERVICE
|
||||
|
||||
# echo "⏳ Waiting for PostgreSQL to become ready..."
|
||||
# for i in $(seq 1 60); do
|
||||
# if docker compose exec -T $SERVICE pg_isready -U postgres > /dev/null 2>&1; then
|
||||
# break
|
||||
# fi
|
||||
# echo "⏳ Still waiting... (${i}s)"
|
||||
# sleep 1
|
||||
# done
|
||||
|
||||
# if ! docker compose exec -T $SERVICE pg_isready -U postgres > /dev/null 2>&1; then
|
||||
# echo "❌ PostgreSQL did not become ready in time. Aborting."
|
||||
# echo "💡 Postgres is not running. Revert to the old version on you docker-compose.yaml file and start start the service!"
|
||||
# echo "1. Run: docker compose up -d --force-recreate $SERVICE"
|
||||
# echo "2. Run: docker compose --profile postgres-rollback run --rm postgres-auto-rollback"
|
||||
# exit 1
|
||||
# fi
|
||||
# fi
|
||||
|
||||
# echo "⏳ Waiting for PostgreSQL to become ready before dumping SQL..."
|
||||
# for i in $(seq 1 120); do
|
||||
# if docker compose exec -T $SERVICE pg_isready -U postgres > /dev/null 2>&1; then
|
||||
# break
|
||||
# fi
|
||||
# echo "⏳ Still waiting... (${i}s)"
|
||||
# sleep 1
|
||||
# done
|
||||
|
||||
# if ! docker compose exec -T $SERVICE pg_isready -U postgres > /dev/null 2>&1; then
|
||||
# echo "❌ PostgreSQL did not become ready in time. Aborting."
|
||||
# exit 1
|
||||
# fi
|
||||
|
||||
echo "📡 Detecting running PostgreSQL version..."
|
||||
OLD_VERSION=$(cat "$PG_VERSION_FILE")
|
||||
echo "🔍 Detected running PostgreSQL version: $OLD_VERSION"
|
||||
OLD_MAJOR=$(echo "$OLD_VERSION" | cut -d. -f1)
|
||||
echo "🔍 Detected running PostgreSQL major version: $OLD_MAJOR"
|
||||
OLD_IMG="${OLD_VERSION}-alpine"
|
||||
|
||||
|
||||
echo "🆕 Detecting target version from docker-compose.yaml..."
|
||||
NEW_IMG=$(docker compose -f $COMPOSE config | grep "image:" | grep "$SERVICE" | awk '{print $2}')
|
||||
|
||||
# Ensure NEW_IMG was detected
|
||||
if [[ -z "$NEW_IMG" ]]; then
|
||||
echo "❌ Failed to detect target Postgres image from $COMPOSE. Aborting."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
NEW_VERSION=$(echo "$NEW_IMG" | sed -E 's/^postgres://; s/-alpine.*$//')
|
||||
NEW_MAJOR=$(echo "$NEW_VERSION" | cut -d. -f1)
|
||||
|
||||
echo "🔁 From $OLD_VERSION (major $OLD_MAJOR) → $NEW_VERSION (major $NEW_MAJOR)"
|
||||
|
||||
if [[ "$NEW_VERSION" == *beta* ]] || [[ "$NEW_VERSION" == *rc* ]] || [[ "$NEW_VERSION" == *bookworm* ]]; then
|
||||
echo "❌ Target version $NEW_VERSION appears to be a pre-release (beta/rc/bookworm). Skipping upgrade."
|
||||
echo "💡 Please upgrade to a stable version of Postgres."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Early exit if no upgrade needed
|
||||
if [ "$OLD_MAJOR" -eq "$NEW_MAJOR" ]; then
|
||||
echo "✅ Already running target major version. Skipping upgrade."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Paths
|
||||
BACKUP_DIR=${DATA_DIR}_backup_${OLD_IMG}_$(date +%Y%m%d_%H%M%S)
|
||||
OLD_DATA_DIR=./database_old
|
||||
UPGRADE_DIR=./database_tmp_upgrade
|
||||
|
||||
# 1. Stop services
|
||||
echo "🛑 Stopping services..."
|
||||
docker compose -f $COMPOSE down
|
||||
|
||||
# 2. Backup database directory
|
||||
echo "🔐 Creating backup at ${BACKUP_DIR}..."
|
||||
cp -a "$DATA_DIR" "$BACKUP_DIR"
|
||||
|
||||
|
||||
echo "📦 Dumping full SQL backup using temporary PostgreSQL container..."
|
||||
DUMP_FILE="backup_dump_${OLD_IMG}_$(date +%Y%m%d_%H%M%S).sql"
|
||||
TMP_CONTAINER_NAME="pg-dump-${OLD_MAJOR}"
|
||||
|
||||
# Run temporary postgres container with existing data dir
|
||||
docker run -d --rm \
|
||||
--name "$TMP_CONTAINER_NAME" \
|
||||
-v "$DATA_DIR:/var/lib/postgresql/data" \
|
||||
-e POSTGRES_USER=postgres \
|
||||
postgres:${OLD_IMG}
|
||||
|
||||
echo "⏳ Waiting for pg_dump container to become ready..."
|
||||
for i in $(seq 1 30); do
|
||||
if docker exec "$TMP_CONTAINER_NAME" pg_isready -U postgres > /dev/null 2>&1; then
|
||||
break
|
||||
fi
|
||||
echo "⏳ Still waiting... (${i}s)"
|
||||
sleep 1
|
||||
done
|
||||
|
||||
if ! docker exec "$TMP_CONTAINER_NAME" pg_isready -U postgres > /dev/null 2>&1; then
|
||||
echo "❌ Temporary container for SQL dump did not become ready. Aborting."
|
||||
docker rm -f "$TMP_CONTAINER_NAME" > /dev/null 2>&1 || true
|
||||
exit 1
|
||||
fi
|
||||
|
||||
docker exec "$TMP_CONTAINER_NAME" pg_dumpall -U postgres > "$DUMP_FILE"
|
||||
|
||||
echo "🧹 Cleaning up older SQL dump files..."
|
||||
ALL_DUMPS=( $(ls -t backup_dump_*.sql 2>/dev/null || true) )
|
||||
|
||||
if [ "${#ALL_DUMPS[@]}" -gt 1 ]; then
|
||||
LATEST_DUMP="${ALL_DUMPS[0]}"
|
||||
TO_DELETE=( "${ALL_DUMPS[@]:1}" )
|
||||
|
||||
for dump in "${TO_DELETE[@]}"; do
|
||||
echo "🗑️ Removing old dump: $dump"
|
||||
rm -f "$dump"
|
||||
done
|
||||
|
||||
echo "✅ Only latest dump '${LATEST_DUMP}' preserved."
|
||||
else
|
||||
echo "ℹ️ Only one dump file found. No cleanup needed."
|
||||
fi
|
||||
|
||||
docker rm -f "$TMP_CONTAINER_NAME" > /dev/null 2>&1 || true
|
||||
|
||||
# 3. Create upgrade target folder
|
||||
echo "📁 Creating upgrade workspace ${UPGRADE_DIR}..."
|
||||
mkdir -p "$UPGRADE_DIR"
|
||||
|
||||
# 4. Perform pg_upgrade
|
||||
echo "🔧 Running pg_upgrade via tianon image..."
|
||||
docker run --rm \
|
||||
-v "${BACKUP_DIR}:/var/lib/postgresql/${OLD_MAJOR}/data" \
|
||||
-v "${UPGRADE_DIR}:/var/lib/postgresql/${NEW_MAJOR}/data" \
|
||||
tianon/postgres-upgrade:${OLD_MAJOR}-to-${NEW_MAJOR} --copy
|
||||
|
||||
# 5. Promote new data
|
||||
echo "🔁 Swapping data directories..."
|
||||
rm -rf "$DATA_DIR"
|
||||
mv "$UPGRADE_DIR" "$DATA_DIR"
|
||||
|
||||
# 6. Restore pg_hba.conf before startup
|
||||
echo "🔄 Restoring pg_hba.conf if it existed..."
|
||||
cp "${BACKUP_DIR}/pg_hba.conf" "${DATA_DIR}/pg_hba.conf" || echo "✅ No custom pg_hba.conf to restore."
|
||||
|
||||
# 7. Update image in docker-compose.yaml
|
||||
echo "📝 Updating docker-compose to use image ${NEW_IMG}..."
|
||||
sed -i.bak -E "s#postgres:[^ ]*${OLD_MAJOR}[^ ]*#postgres:${NEW_IMG}#" "$COMPOSE"
|
||||
|
||||
# 8. Start container
|
||||
echo "🚀 Starting upgraded container..."
|
||||
docker compose -f $COMPOSE up -d $SERVICE
|
||||
|
||||
# 9. Wait until DB is accepting connections
|
||||
echo "⏳ Waiting for PostgreSQL to become ready..."
|
||||
until docker compose exec -T $SERVICE pg_isready -U postgres; do
|
||||
sleep 1
|
||||
done
|
||||
|
||||
# 10. Collation and Reindexing
|
||||
echo "🔧 Reindexing and refreshing collation versions..."
|
||||
docker compose exec $SERVICE bash -c '
|
||||
set -e
|
||||
DBS=$(psql -U postgres -tAc "SELECT datname FROM pg_database WHERE datallowconn")
|
||||
for db in $DBS; do
|
||||
echo "➡️ Reindexing $db..."
|
||||
psql -U postgres -d "$db" -c "REINDEX DATABASE \"$db\";" || true
|
||||
psql -U postgres -d "$db" -c "REINDEX SYSTEM \"$db\";" || true
|
||||
|
||||
echo "➡️ Refreshing collation version for $db..."
|
||||
if ! psql -U postgres -d "$db" -c "ALTER DATABASE \"$db\" REFRESH COLLATION VERSION;" 2>/dev/null; then
|
||||
echo "⚠️ Collation refresh failed. Forcing reset..."
|
||||
psql -U postgres -d postgres -c "UPDATE pg_database SET datcollversion = NULL WHERE datname = '\''$db'\'';" || true
|
||||
psql -U postgres -d "$db" -c "ALTER DATABASE \"$db\" REFRESH COLLATION VERSION;" || \
|
||||
echo "❌ Still failed for $db. Review manually."
|
||||
fi
|
||||
|
||||
echo "➡️ Refreshing system collations in $db..."
|
||||
for coll in $(psql -U postgres -d "$db" -tAc "SELECT nspname || '\''.'\'' || quote_ident(collname) FROM pg_collation JOIN pg_namespace ON collnamespace = pg_namespace.oid WHERE collprovider = '\''c'\'';"); do
|
||||
echo " 🌀 ALTER COLLATION $coll REFRESH VERSION;"
|
||||
psql -U postgres -d "$db" -c "ALTER COLLATION $coll REFRESH VERSION;" || \
|
||||
echo " ⚠️ Skipped $coll due to version mismatch (likely Alpine)."
|
||||
done
|
||||
done
|
||||
'
|
||||
|
||||
# 11. Suppress collation warnings on musl (Alpine)
|
||||
if docker compose exec $SERVICE ldd --version 2>&1 | grep -qi 'musl'; then
|
||||
echo "🧼 Detected musl libc (Alpine). Resetting all datcollversion values..."
|
||||
docker compose exec -T $SERVICE psql -U postgres -d postgres -c \
|
||||
"UPDATE pg_database SET datcollversion = NULL WHERE datcollversion IS NOT NULL;"
|
||||
fi
|
||||
|
||||
# 12. Make delete_old_cluster.sh executable
|
||||
DELETE_SCRIPT="./delete_old_cluster.sh"
|
||||
if [[ -f "$DELETE_SCRIPT" ]]; then
|
||||
chmod +x "$DELETE_SCRIPT"
|
||||
fi
|
||||
|
||||
# 13. Make rollback script executable
|
||||
ROLLBACK_SCRIPT="./rollback_postgres_upgrade.sh"
|
||||
if [[ -f "$ROLLBACK_SCRIPT" ]]; then
|
||||
chmod +x "$ROLLBACK_SCRIPT"
|
||||
fi
|
||||
|
||||
# 14. Final message
|
||||
echo "✅ Upgrade complete!"
|
||||
echo "🎉 Postgres is now running ${NEW_IMG} with data in '${DATA_DIR}'."
|
||||
echo "🧰 Old version is saved in '${OLD_DATA_DIR}'."
|
||||
echo "💡 Next steps:"
|
||||
echo " - ✅ Run smoke tests"
|
||||
echo " - 🧹 If all OK - PLEASE MAKE SURE ON YOUR WEBSITE, YOU HAVE ALL THE DATA YOU NEED AFTER THE UPGRADE, run:"
|
||||
echo " rm -rf ./database_backup_* ./database_upgraded_*"
|
||||
echo "🧹 Cleaning up older backups..."
|
||||
find . -maxdepth 1 -type d -name "database_backup_*" ! -path "./${BACKUP_DIR##*/}" -exec rm -rf {} +
|
||||
echo "✅ Only latest backup '${BACKUP_DIR}' preserved."
|
||||
|
||||
# Step 15: Restart full application
|
||||
echo "🔄 Pulling latest images..."
|
||||
if ! docker compose pull; then
|
||||
echo "❌ Failed to pull images. Aborting."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "🔄 Starting full application stack..."
|
||||
if ! docker compose up -d --force-recreate; then
|
||||
echo "❌ Failed to start application stack. Aborting."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✅ Deployment completed successfully."
|
||||
7
resources-limits.md
Normal file
7
resources-limits.md
Normal file
@@ -0,0 +1,7 @@
|
||||
## 📚 **References**
|
||||
|
||||
- **YouTrack Documentation:** [Docker Compose Resource Limits](https://phx-erp.youtrack.cloud/articles/INT-A-107/Docker-Compose-Resource-Limits) - Comprehensive resource allocation guide for Phoenix ERP stack
|
||||
- **Docker Documentation:** [Resource constraints](https://docs.docker.com/config/containers/resource_constraints/)
|
||||
- **Docker Compose:** [Deploy specification](https://docs.docker.com/compose/compose-file/deploy/)
|
||||
|
||||
_Last updated: 2025-07-16 (Comprehensive revision based on production docker-compose.yaml)_
|
||||
115
rollback_postgres_upgrade.sh
Normal file
115
rollback_postgres_upgrade.sh
Normal file
@@ -0,0 +1,115 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
COMPOSE=./docker-compose.yaml
|
||||
SERVICE=postgres
|
||||
DATA_DIR=./database
|
||||
ROLLBACK_TIMESTAMP=$(date +%Y%m%d_%H%M%S)
|
||||
|
||||
echo "🧪 Validating docker-compose config..."
|
||||
docker compose -f "$COMPOSE" config > /dev/null || {
|
||||
echo "❌ docker-compose config failed. Restore aborted."
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Extract current Postgres image
|
||||
CURRENT_IMG=$(docker compose -f "$COMPOSE" config | grep "image:" | grep "$SERVICE" | awk '{print $2}' || true)
|
||||
|
||||
if [[ -z "$CURRENT_IMG" ]]; then
|
||||
echo "❌ Could not detect current image for service '$SERVICE'."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
CURRENT_TAG=$(basename "$CURRENT_IMG")
|
||||
CURRENT_VERSION=$(echo "$CURRENT_TAG" | cut -d'-' -f1) # e.g., 17.5
|
||||
|
||||
# Detect appropriate backup folder
|
||||
BACKUP_CANDIDATES=($(ls -td ./database_backup_* 2>/dev/null || true))
|
||||
|
||||
if [[ ${#BACKUP_CANDIDATES[@]} -eq 0 ]]; then
|
||||
echo "❌ No backup directory found. Cannot determine previous version."
|
||||
echo "ℹ️ Available folders:"
|
||||
ls -1d ./database_backup_* || true
|
||||
exit 1
|
||||
elif [[ ${#BACKUP_CANDIDATES[@]} -eq 1 ]]; then
|
||||
SELECTED_BACKUP="${BACKUP_CANDIDATES[0]}"
|
||||
echo "ℹ️ Only one backup found. Using: ${SELECTED_BACKUP}"
|
||||
else
|
||||
SELECTED_BACKUP="${BACKUP_CANDIDATES[1]}"
|
||||
echo "ℹ️ Multiple backups found. Using second latest: ${SELECTED_BACKUP}"
|
||||
fi
|
||||
|
||||
# Extract version from selected backup folder
|
||||
OLD_TAG=$(basename "$SELECTED_BACKUP" | sed -E 's/database_backup_(([^_]+)-alpine).*/\1/')
|
||||
OLD_IMG="postgres:${OLD_TAG}"
|
||||
|
||||
DELETED_UPGRADE_DIR=./database_upgraded_${CURRENT_VERSION}_${ROLLBACK_TIMESTAMP}
|
||||
|
||||
echo "⏪ Initiating rollback from Postgres ${CURRENT_TAG} to ${OLD_IMG}..."
|
||||
|
||||
# Step 1: Confirm backup exists
|
||||
if [ ! -d "$SELECTED_BACKUP" ]; then
|
||||
echo "❌ Backup folder '${SELECTED_BACKUP}' not found. Aborting."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Step 2: Stop services
|
||||
echo "🛑 Stopping running services..."
|
||||
docker compose -f "$COMPOSE" down
|
||||
|
||||
# Step 3: Archive current (possibly broken) database
|
||||
echo "📦 Archiving current database directory as '${DELETED_UPGRADE_DIR}'..."
|
||||
mv "$DATA_DIR" "$DELETED_UPGRADE_DIR"
|
||||
|
||||
# Step 4: Restore previous version
|
||||
echo "♻️ Restoring from backup folder '${SELECTED_BACKUP}'..."
|
||||
cp -a "$SELECTED_BACKUP" "$DATA_DIR"
|
||||
|
||||
# Step 5: Restore image tag in docker-compose.yaml
|
||||
echo "🔁 Reverting docker-compose image tag to Postgres ${OLD_IMG}..."
|
||||
update_image_tag() {
|
||||
local svc="$1"
|
||||
local file="$2"
|
||||
local target_tag="$3"
|
||||
|
||||
echo "🔁 Reverting docker-compose image tag for service '$svc' to Postgres: ${target_tag}..."
|
||||
|
||||
# Use awk to scope updates within the service definition only
|
||||
awk -v service="$svc" -v new_tag="$target_tag" '
|
||||
BEGIN { in_service = 0 }
|
||||
/^[ ]{2}[a-zA-Z0-9_-]+:/ {
|
||||
in_service = ($1 == service ":") ? 1 : 0
|
||||
}
|
||||
in_service && /^\s*image:/ {
|
||||
sub(/postgres:[^"'"'"']+/, "postgres:" new_tag)
|
||||
}
|
||||
{ print }
|
||||
' "$file" > "${file}.tmp" && mv "${file}.tmp" "$file"
|
||||
}
|
||||
update_image_tag "$SERVICE" "$COMPOSE" "$OLD_TAG"
|
||||
|
||||
# Step 6: Restart Postgres
|
||||
echo "🚀 Starting Postgres service with restored image..."
|
||||
docker compose -f "$COMPOSE" up -d "$SERVICE"
|
||||
|
||||
# Step 7: Final messages
|
||||
echo "✅ Rollback complete!"
|
||||
echo "🗃️ PostgreSQL downgraded to '${OLD_IMG}' and data restored from '${SELECTED_BACKUP}'."
|
||||
echo "📦 The faulty upgrade has been archived in '${DELETED_UPGRADE_DIR}'."
|
||||
echo " - To clean: rm -rf ${DELETED_UPGRADE_DIR}"
|
||||
echo " - To verify: docker compose logs -f $SERVICE"
|
||||
|
||||
# Step 8: Restart full application
|
||||
echo "🔄 Pulling latest images..."
|
||||
if ! docker compose pull; then
|
||||
echo "❌ Failed to pull images. Aborting."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "🔄 Starting full application stack..."
|
||||
if ! docker compose up -d --force-recreate; then
|
||||
echo "❌ Failed to start application stack. Please check logs."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✅ Deployment completed successfully."
|
||||
260
server_custom/config.ts
Normal file
260
server_custom/config.ts
Normal file
@@ -0,0 +1,260 @@
|
||||
/* tslint:disable:no-console */
|
||||
import { AssetServerPlugin } from '@phoenix/asset-server-plugin';
|
||||
import { ADMIN_API_PATH, API_PORT, SHOP_API_PATH, SUPER_ADMIN_USER_IDENTIFIER, WORKER_PORT } from '@phoenix/common';
|
||||
import { DefaultJobQueuePlugin, WinstonLogger, LogLevel, RedisSessionCachePlugin, SystemConfig, TypeOrmLogger, TypeORMHealthCheckStrategy } from '@phoenix/core';
|
||||
import { EmailPlugin, EmailPluginOptions, FileBasedTemplateLoader, defaultEmailHandlers } from '@phoenix/email-plugin';
|
||||
import path from 'path';
|
||||
import { ConnectionOptions } from 'typeorm';
|
||||
// Import EmailSettingsService
|
||||
|
||||
//DEV for now
|
||||
// import { BonnEmailEventHandler } from './plugins/bonn-api-plugin/handler/bonn-email-handler';
|
||||
|
||||
/**
|
||||
* Config settings used during development
|
||||
*/
|
||||
export const devConfig: SystemConfig = {
|
||||
apiOptions: {
|
||||
port: API_PORT,
|
||||
workerPort: WORKER_PORT,
|
||||
// sslPort: API_SSL_PORT,
|
||||
//sslCertPath: path.join(__dirname, './secrets/certificate.crt'),
|
||||
//sslKeyPath: path.join(__dirname, './secrets/certificate.key'),
|
||||
adminApiPath: ADMIN_API_PATH,
|
||||
shopApiPath: SHOP_API_PATH,
|
||||
cors: {
|
||||
origin: true,
|
||||
credentials: true,
|
||||
},
|
||||
adminApiPlayground: true
|
||||
},
|
||||
authOptions: {
|
||||
disableAuth: true,
|
||||
sessionSecret: 'some-secret',
|
||||
requireVerification: false,
|
||||
tokenMethod: "bearer",
|
||||
superadminCredentials: {
|
||||
identifier: SUPER_ADMIN_USER_IDENTIFIER,
|
||||
password: process.env.SUPER_ADMIN_USER_PASSWORD || 'superadmin'
|
||||
}
|
||||
},
|
||||
dbConnectionOptions: {
|
||||
// synchronize: true,
|
||||
// logging: true,
|
||||
logger: new TypeOrmLogger(),
|
||||
migrations: [path.join(__dirname, 'migrations/*.ts')],
|
||||
...getDbConfig(),
|
||||
// migrationsRun: true,
|
||||
// migrations: ["migration/*.js"],
|
||||
// cli: {
|
||||
// migrationsDir: "migration"
|
||||
// }
|
||||
// logging: ["error"]
|
||||
},
|
||||
// dbConnectionOptionsEx: [{
|
||||
// name: "sl",
|
||||
// synchronize: false,
|
||||
// host: 'localhost',
|
||||
// username: 'sa',
|
||||
// password: 'sa',
|
||||
// database: 'SL_MWAWI',
|
||||
// options: { encrypt: false, instanceName: "" },
|
||||
// extra: { trustedConnection: false },
|
||||
// logger: new TypeOrmLogger(),
|
||||
// type: 'mssql'
|
||||
// } as any],
|
||||
// paymentOptions: {
|
||||
// // paymentMethodHandlers: [examplePaymentHandler],
|
||||
// },
|
||||
customFields: {
|
||||
|
||||
Product: [
|
||||
// {
|
||||
// name: 'customFieldx',
|
||||
// type: 'string',
|
||||
// }
|
||||
|
||||
],
|
||||
DocumentLineItem: [
|
||||
|
||||
],
|
||||
},
|
||||
searchableFields: {
|
||||
processResource: [
|
||||
"scanId"
|
||||
]
|
||||
},
|
||||
logger: new WinstonLogger({ level: LogLevel.Debug }),
|
||||
workerLogger: new WinstonLogger({ level: LogLevel.Info }),
|
||||
importExportOptions: {
|
||||
importProductAssetsDir: path.join(__dirname, 'import', 'product-assets'),
|
||||
},
|
||||
defaults: {
|
||||
defaultTakeNumber: 100,
|
||||
},
|
||||
systemOptions: {
|
||||
healthChecks: [new TypeORMHealthCheckStrategy(null, { key: 'database', timeout: 1000 })],
|
||||
errorHandlers: [],
|
||||
},
|
||||
plugins: [
|
||||
// not needed for local dev
|
||||
RedisSessionCachePlugin.init({
|
||||
namespace: 'phx-session',
|
||||
redisOptions: {
|
||||
host: process.env.REDIS_HOST || 'redis',
|
||||
port: process.env.REDIS_PORT ? parseInt(process.env.REDIS_PORT) : 6379,
|
||||
db: process.env.REDIS_DB ? parseInt(process.env.REDIS_DB) : 0,
|
||||
password: process.env.REDIS_PASSWORD || 'admin'
|
||||
}
|
||||
}),
|
||||
AssetServerPlugin.init({
|
||||
route: 'remote-assets',
|
||||
assetUploadDir: path.join(__dirname, 'assets'),
|
||||
port: 5002,
|
||||
assetUrlPrefix: "\\remote-assets\\" // to make it relative for client
|
||||
}),
|
||||
// only 4 dev
|
||||
// BonnAPIPlugin.init({
|
||||
// callerID: 'b64f845c-e4ed-43e9-b1f8-2e0b274afde0',
|
||||
// apikey: 'ab9748dd-ac5f-40d8-954c-5c6d01092d80',
|
||||
// XAccessToken: '48jerefi21r9itwp7ax88fxv2v20blhh',
|
||||
// lotInfoUrl: 'https://api.zf.com/ZFMessTraceAuxSvc/v2.0/bptrace/lot-info',
|
||||
// emailReceiver: 'ds@cts-schmid.de',
|
||||
// autoBelegReportId: "9d77ddff-afec-4412-97dd-9272b497e0c3",
|
||||
// printerHost: 'DESKTOP-OEEV0PG',
|
||||
// MESAssignLotUrl: ""
|
||||
// }),
|
||||
// BonnAPIPlugin.init({
|
||||
// callerID: '18265a9e-7792-4671-88b2-8fa2ac4af5d4',
|
||||
// apikey: '83997009-248c-472d-8579-5ec681c29daa',
|
||||
// XAccessToken: 'gjyntdym13u7hsb8wxnk0bfwnxko52xo',
|
||||
// lotInfoUrl: 'https://apidev.zf.com/ZFMessTraceAuxSvc/v1.0/bptrace/lot-info'
|
||||
// }),
|
||||
// ReinerSCTPlugin.init(
|
||||
// {
|
||||
// hostname: 'https://timecard.bonn-unternehmensgruppe.de',
|
||||
// username: 'ctsapi',
|
||||
// password: 'Tje6tiuEsY'
|
||||
// }
|
||||
// )
|
||||
// ,
|
||||
//just for dev for now
|
||||
// EdiTransusPlugin.init({
|
||||
// url: "https://webconnect.transus.com/exchange.asmx",
|
||||
// clientId: "10904548",
|
||||
// clientKey: "R304WGXHKBZG"
|
||||
// }),
|
||||
DefaultJobQueuePlugin.init({
|
||||
useDatabaseForBuffer: true
|
||||
}),
|
||||
// DefaultStoragePlaceRankPlugin.init({})
|
||||
// new DefaultSearchPlugin(),
|
||||
// new ElasticsearchPlugin({
|
||||
// host: 'http://192.168.99.100',
|
||||
// port: 9200,
|
||||
// }),
|
||||
// DocusignPlugin.init({
|
||||
// devMode:true,
|
||||
// handlers: defaultDocusignHandlers,
|
||||
// assetDownloadDir: path.join(__dirname, 'docusign'),
|
||||
// assetUploadDir: path.join(__dirname, 'docusign'),
|
||||
// port: API_PORT,
|
||||
// route: "docusign"
|
||||
// }),
|
||||
EmailPlugin.init({
|
||||
route: 'mailbox',
|
||||
handlers: [...defaultEmailHandlers],
|
||||
// Dynamic Email Templates
|
||||
templateLoader: new FileBasedTemplateLoader(path.join(__dirname, '../email-plugin/templates')),
|
||||
outputPath: path.join(__dirname, 'test-emails'),
|
||||
globalTemplateVars: {
|
||||
verifyEmailAddressUrl: 'http://localhost:4201/verify',
|
||||
passwordResetUrl: 'http://localhost:4201/reset-password',
|
||||
changeEmailAddressUrl: 'http://localhost:4201/change-email-address',
|
||||
},
|
||||
// transport: {
|
||||
// type: 'smtp',
|
||||
// host: '',
|
||||
// port: null,
|
||||
// secure: false,
|
||||
// auth: {
|
||||
// user: '',
|
||||
// pass: '',
|
||||
// },
|
||||
// tls: {
|
||||
// rejectUnauthorized: false,
|
||||
// },
|
||||
// }
|
||||
} as EmailPluginOptions),
|
||||
],
|
||||
};
|
||||
|
||||
function getDbConfig(): ConnectionOptions {
|
||||
const dbType = process.env.DB || 'postgres';
|
||||
const dbHost = process.env.DB_HOST || 'localhost';
|
||||
const dbPort = +process.env.DB_PORT || 5432;
|
||||
|
||||
const connectionPoolMax = process.env.CONNECTION_POOL_MAX ?? 20;
|
||||
|
||||
const dbUsername = process.env.DB_USERNAME || 'postgres';
|
||||
const password = process.env.DB_PASSWORD || 'admin';
|
||||
const database = process.env.DB_NAME || 'phoenix'
|
||||
|
||||
if (password == "admin")
|
||||
console.warn("default postgres password is used!");
|
||||
|
||||
if (process.env.DB_HOST)
|
||||
console.log(`using DB Host ${dbHost} from env`);
|
||||
|
||||
console.log(`using Database ${database}`);
|
||||
console.log(`using User ${dbUsername}`);
|
||||
|
||||
switch (dbType) {
|
||||
case 'postgres':
|
||||
console.log('Using postgres connection at ' + dbHost);
|
||||
return {
|
||||
synchronize: true,
|
||||
type: 'postgres',
|
||||
//host: '127.0.0.1',
|
||||
host: dbHost,
|
||||
port: dbPort,
|
||||
username: dbUsername,
|
||||
password: password,
|
||||
database: database,
|
||||
// logging: "all",
|
||||
extra: {
|
||||
max: connectionPoolMax
|
||||
},
|
||||
cache: {
|
||||
alwaysEnabled: false,
|
||||
duration: 10000
|
||||
}
|
||||
};
|
||||
case 'sqlite':
|
||||
console.log('Using sqlite connection');
|
||||
return {
|
||||
type: 'sqlite',
|
||||
database: path.join(__dirname, 'phoenix.sqlite'),
|
||||
};
|
||||
case 'sqljs':
|
||||
console.log('Using sql.js connection');
|
||||
return {
|
||||
type: 'sqljs',
|
||||
autoSave: true,
|
||||
database: new Uint8Array([]),
|
||||
location: path.join(__dirname, 'phoenix.sqlite'),
|
||||
};
|
||||
case 'mysql':
|
||||
default:
|
||||
console.log('Using mysql connection');
|
||||
return {
|
||||
synchronize: true,
|
||||
type: 'mysql',
|
||||
host: '192.168.99.100',
|
||||
port: 3306,
|
||||
username: 'root',
|
||||
password: '',
|
||||
database: 'phoenix-dev',
|
||||
};
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user