commit ceb7538967d65227b68b0bb4047847a6a807d3ef Author: Yuri-Lima Date: Mon Aug 4 18:48:49 2025 +0200 Add self-hosted project folder diff --git a/.env b/.env new file mode 100644 index 0000000..b550cbe --- /dev/null +++ b/.env @@ -0,0 +1,32 @@ +# ===== Enviroment Stage ====== +NODE_ENV=production +# ====== Database Configuration ====== +POSTGRES_PASSWORD= +PGADMIN_DEFAULT_PASSWORD= +DB_NAME="phoenix" +DB_HOST="phoenixDB" +DB_PORT=5432 +DB_USERNAME="postgres" +PHX_SYSTEM_CONNECTION_POOL_MAX=5 +PHX_WORKER_CONNECTION_POOL_MAX=2 +# ===== PGADMIN CONFIGURATION ===== +PGADMIN_DEFAULT_EMAIL="info@phx-erp.de" +MAIL_SERVER="mail.phx-erp.de" +MAIL_PORT=465 +MAIL_USERNAME="internal@phx-erp.de" +MAIL_PASSWORD="8Kb2p4!o1" +SECURITY_EMAIL_SENDER="'No Reply PHX '" +# ====== Phoenix Super Admin Configuration ====== +SUPER_ADMIN_USER_PASSWORD=123 +# ====== Redis Configuration ====== +REDIS_PASSWORD= +# ===== Metris Configuration ====== +# Loki API URL -> The IP 5.75.153.161 is the Grafana Server where it has a firewall rule to allow the connection. Please, if you change here, need to be change in NGINX too. +LOKI_URL=http://grafana.phx-erp.de:3100/loki/api/v1/push +LOKI_RETRIES=5 +LOKI_BATCH_SIZE=500 +# ===== HTTPS-PORTAL Configuration ====== +HTTPS_PORTAL_DOMAINS= +# ====== PHX-SYSTEM Configuration ====== +PHOENIX_SYSTEM_REPLICAS=1 +PHX_HOST_NAME= diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..2a871d3 --- /dev/null +++ b/.gitignore @@ -0,0 +1,15 @@ + +# Backup files +*.bak +*.backup +*.sql +*.dump +backup/ +backups/ +*.tar.gz +*.zip +*.7z +*_backup +*_backup_* +backup_* +*.old diff --git a/README-Postgres-Upgrade.md b/README-Postgres-Upgrade.md new file mode 100644 index 0000000..9cab23f --- /dev/null +++ b/README-Postgres-Upgrade.md @@ -0,0 +1,16 @@ + +## Upgrade Modes +> [`Cloud Servers`](https://phx-erp.youtrack.cloud/articles/INT-A-105/PostgreSQL-Docker-Upgrade-Rollback-Guide-Any-Version-Any-Version?edit=true) +> [`Self Hosted`](https://phx-erp.youtrack.cloud/articles/INT-A-106/PostgreSQL-Upgrade-Rollback-Self-Hosted) + +# Quick Move + +## Upgrade PostgreSQL +```bash +docker compose --profile postgres-upgrade run --rm postgres-auto-upgrade +``` + +## Rollback PostgreSQL (if needed) +```bash +docker compose --profile postgres-rollback run --rm postgres-auto-rollback +``` diff --git a/app_custom/custom-style.css b/app_custom/custom-style.css new file mode 100644 index 0000000..ee9dc08 --- /dev/null +++ b/app_custom/custom-style.css @@ -0,0 +1,3 @@ +/* .login-logo-img { + background-image: url("/assets/custom/loginscreen-logo.png")!Important; +} */ \ No newline at end of file diff --git a/crash_diagnose.sh b/crash_diagnose.sh new file mode 100644 index 0000000..8417ef6 --- /dev/null +++ b/crash_diagnose.sh @@ -0,0 +1,87 @@ +#!/bin/bash +set -euo pipefail + +YELLOW='\033[1;33m' +RED='\033[1;31m' +NC='\033[0m' # No color + +echo -e "๐Ÿ“ฆ ${YELLOW}PHX Crash Diagnostic Tool (Docker + Linux Server)${NC}" +echo "๐Ÿ” Boot Timeline:" +journalctl --list-boots | head -3 + +echo -e "\nโš ๏ธ OOM Kills:" +journalctl -b -1 | grep -i 'killed process' || echo "None found." + +echo -e "\nโš ๏ธ Out of Memory Events:" +journalctl -b -1 | grep -i 'out of memory' || echo "None found." + +echo -e "\nโš ๏ธ systemd-oomd Events:" +journalctl -b -1 | grep systemd-oomd || echo "None found." + +echo -e "\n๐Ÿ”ฅ CPU/Load Pressure (dmesg/syslog):" +journalctl -b -1 | grep -Ei 'cpu|load average|soft lockup|hung task' || echo "None found." + +echo -e "\n๐Ÿšจ System Errors (priority 0โ€“3):" +journalctl -b -1 -p 3..0 || echo "None found." + +if command -v docker &> /dev/null && docker info >/dev/null 2>&1; then + echo -e "\n๐Ÿณ Docker detected and running." + + CONTAINERS=$(docker ps -aq) + if [[ -z "$CONTAINERS" ]]; then + echo -e "\nโš ๏ธ No containers found. Skipping container-specific diagnostics." + else + echo -e "\n๐Ÿณ Docker OOM-Killed Containers:" + docker inspect $CONTAINERS 2>/dev/null | grep -B10 '"OOMKilled": true' || echo "No containers were OOMKilled." + + echo -e "\n๐Ÿ” Recently Restarted Containers:" + docker ps -a --format '{{.Names}}\t{{.Status}}' | grep -i 'restarted' || echo "No recent restarts." + + echo -e "\n๐Ÿ“‰ Top 5 Containers by Memory Usage (now):" + docker stats --no-stream --format "table {{.Name}}\t{{.MemUsage}}" | sort -k2 -hr | head -n 6 + + echo -e "\n๐Ÿ“ˆ Top 5 Containers by CPU Usage (now):" + docker stats --no-stream --format "table {{.Name}}\t{{.CPUPerc}}" | sort -k2 -hr | head -n 6 + + echo -e "\n๐Ÿ“‹ Docker Container Memory Limits:" + docker inspect $CONTAINERS --format '{{ .Name }}: {{ .HostConfig.Memory }} bytes' | grep -v ': 0' || echo "None set" + + echo -e "\n๐Ÿ“‹ Containers With No Memory Limit:" + docker inspect $CONTAINERS --format '{{ .Name }}: {{ .HostConfig.Memory }}' | awk '$2 == 0 {print $1}' + + echo -e "\n๐Ÿ“ Last 100 Log Lines from PHX Containers:" + for name in $(docker ps -a --format '{{.Names}}' | grep -i 'phoenix\|pgadmin\|postgres'); do + echo -e "\n--- Logs for $name ---" + docker logs --tail=100 "$name" 2>/dev/null || echo "No logs for $name" + done + fi +else + echo -e "\n๐Ÿณ ${RED}Docker is not installed or not running.${NC}" +fi + +# Historical CPU/memory usage with 'sar' +if command -v sar &> /dev/null; then + echo -e "\n๐Ÿ“Š Analyzing Memory and CPU Usage via sar (last 60 mins if possible)..." + + echo -e "\n๐Ÿ” Memory Usage (High Usage if >90%):" + sar -r | awk ' + BEGIN { OFS="\t"; print "Time", "%memused", "%commit", "Status" } + /^[0-9]/ { + memused = $4; commit = $8; + status = (memused+0 > 90 || commit+0 > 95) ? "โš ๏ธ HIGH" : "OK"; + printf "%s\t%s%%\t%s%%\t%s\n", $1, memused, commit, status; + }' + + echo -e "\n๐Ÿ” CPU Usage (High if %idle < 10 or %system > 90):" + sar -u | awk ' + BEGIN { OFS="\t"; print "Time", "%user", "%system", "%idle", "Status" } + /^[0-9]/ { + user = $3; sys = $5; idle = $8; + status = (idle+0 < 10 || sys+0 > 90) ? "โš ๏ธ HIGH" : "OK"; + printf "%s\t%s%%\t%s%%\t%s%%\t%s\n", $1, user, sys, idle, status; + }' +else + echo -e "\nโ„น๏ธ 'sar' (sysstat) is not installed. Skipping historical CPU/memory analysis." +fi + +echo -e "\nโœ… ${YELLOW}Done. Use this script after crashes or schedule it in cron for proactive monitoring.${NC}" \ No newline at end of file diff --git a/docker-compose.yaml b/docker-compose.yaml new file mode 100644 index 0000000..fdca665 --- /dev/null +++ b/docker-compose.yaml @@ -0,0 +1,509 @@ +--- +services: + postgres-auto-upgrade: + profiles: + - postgres-upgrade # ๐ŸŸข This isolates the service + image: alpine:3.19 + container_name: postgres_auto_upgrade + working_dir: /opt/phx + volumes: + - .:/opt/phx:rw + - /var/run/docker.sock:/var/run/docker.sock + entrypoint: > + sh -c " + apk add --no-cache bash coreutils grep sed findutils curl docker-cli dos2unix && + mkdir -p ~/.docker/cli-plugins && + curl -SL https://github.com/docker/compose/releases/download/v2.27.0/docker-compose-linux-x86_64 -o ~/.docker/cli-plugins/docker-compose && + chmod +x ~/.docker/cli-plugins/docker-compose && + chmod +x ./postgres_upgrade.sh && + ./postgres_upgrade.sh" + restart: 'no' + depends_on: [] + network_mode: bridge + postgres-auto-rollback: + profiles: + - postgres-rollback # ๐ŸŸข This isolates the service + image: alpine:3.19 + container_name: postgres_rollback + working_dir: /opt/phx + volumes: + - .:/opt/phx:rw + - /var/run/docker.sock:/var/run/docker.sock + entrypoint: > + sh -c " + apk add --no-cache bash coreutils grep sed findutils curl docker-cli dos2unix && + mkdir -p ~/.docker/cli-plugins && + curl -SL https://github.com/docker/compose/releases/download/v2.27.0/docker-compose-linux-x86_64 -o ~/.docker/cli-plugins/docker-compose && + chmod +x ~/.docker/cli-plugins/docker-compose && + chmod +x ./rollback_postgres_upgrade.sh && + ./rollback_postgres_upgrade.sh" + restart: 'no' + depends_on: [] + network_mode: bridge + postgres: + restart: always + image: "postgres:15.1-alpine" + container_name: phoenixDB # Hostname + # logging: + # driver: loki + # options: + # loki-url: "${LOKI_URL}" + # loki-retries: "${LOKI_RETRIES}" + # loki-batch-size: "${LOKI_BATCH_SIZE}" + # loki-external-labels: "service=phx-postgres,env=prod" + networks: + - backend + environment: + DEBUG: true + POSTGRES_DB: phoenix + POSTGRES_PASSWORD: "8*6&Ti3TJxN^" + volumes: + - "./database:/var/lib/postgresql/data" + # deploy: + # restart_policy: # Define how the service should restart when it fails + # condition: on-failure # Only restart if the container exits with a non-zero code + # delay: 5s # Wait 5 seconds before attempting to restart + # max_attempts: 5 # Maximum number of restart attempts before giving up + # window: 120s # Time window to evaluate restart attempts (resets counter after this period) + # resources: # Resource allocation and limits for the container + # limits: # Maximum resources the container can use + # cpus: "0.75" # Maximum CPU cores (75% of one core) + # memory: 768M # Maximum memory usage (768 megabytes) + # reservations: # Guaranteed minimum resources for the container + # cpus: "0.25" # Reserved CPU cores (25% of one core) + # memory: 256M # Reserved memory (256 megabytes) + healthcheck: + test: [ "CMD-SHELL", "pg_isready -U postgres" ] + interval: 5s # Time between each health check + timeout: 2s # Number of failures before marking as unhealthy + retries: 5 # Grace period before health checks start + pgadmin: + restart: always + image: dpage/pgadmin4:9.6.0 + container_name: pgadmin4-ui + ports: + - "5050:80" + user: "5050:5050" + # logging: + # driver: loki + # options: + # loki-url: "${LOKI_URL}" + # loki-retries: "${LOKI_RETRIES}" + # loki-batch-size: "${LOKI_BATCH_SIZE}" + # loki-external-labels: "service=phx-pgadmin,env=prod" + networks: + - backend + - frontend + environment: + PGADMIN_DEFAULT_EMAIL: "info@phx-erp.de" + PGADMIN_DEFAULT_PASSWORD: "123" + PGADMIN_CONFIG_SERVER_MODE: 'True' + PGADMIN_CONFIG_PROXY_X_PROTO_COUNT: 1 + PGADMIN_SERVER_JSON_FILE: '/var/lib/pgadmin/servers.json' + PGADMIN_REPLACE_SERVERS_ON_STARTUP: 'True' + PGADMIN_CONFIG_DATA_DIR: "'/var/lib/pgadmin'" + PGADMIN_CONFIG_MASTER_PASSWORD_REQUIRED: 'False' + PGPASSFILE: /var/lib/pgadmin/pgpass + PGPASS_HOST: "phoenixDB" + PGPASS_PORT: 5432 + PGPASS_DB: "phoenix" + PGPASS_USER: "postgres" + PGPASS_PASSWORD: "" + ALLOW_SAVE_PASSWORD: 'False' + MFA_ENABLED: 'True' + MFA_FORCE_REGISTRATION: 'False' + MFA_SUPPORTED_METHODS: 'email' + MFA_EMAIL_SUBJECT: 'Your MFA code by PHX-ERP' + MAX_LOGIN_ATTEMPTS: 5 + ENHANCED_COOKIE_PROTECTION: 'True' + SHOW_GRAVATAR_IMAGE: 'True' + SECURITY_EMAIL_SENDER: "'No Reply PHX '" + MAIL_SERVER: "mail.phx-erp.de" + MAIL_PORT: 465 + MAIL_USE_SSL: 'False' + MAIL_USE_TLS: 'False' + MAIL_USERNAME: "internal@phx-erp.de" + MAIL_PASSWORD: "8Kb2p4!o1" + MAIL_DEBUG: 'False' + volumes: + - ./pgadmin/data:/var/lib/pgadmin + - ./pgadmin/pgadmin-entrypoint.sh:/docker-entrypoint.sh:ro + mem_limit: 512M + memswap_limit: 512M + deploy: + restart_policy: # Define how the service should restart when it fails + condition: on-failure # Only restart if the container exits with a non-zero code + delay: 5s # Wait 5 seconds before attempting to restart + max_attempts: 5 # Maximum number of restart attempts before giving up + window: 120s # Time window to evaluate restart attempts (resets counter after this period) + resources: # Resource allocation and limits for the container + limits: # Maximum resources the container can use + cpus: "1.0" # Maximum CPU cores (100% of one core) + memory: 512M # Maximum memory usage (512 megabytes) + reservations: # Guaranteed minimum resources for the container + cpus: "0.15" # Reserved CPU cores (15% of one core) + memory: 250M # Reserved memory (250 megabytes) + entrypoint: ["/bin/sh", "/docker-entrypoint.sh"] + depends_on: + postgres: + condition: service_healthy + healthcheck: + test: ["CMD", "wget", "-O", "-", "http://localhost:80/misc/ping"] + interval: 15s + timeout: 10s + retries: 5 + start_period: 120s + phoenix-app: + restart: always + image: "phxerp/phoenix-app:alpha" + container_name: phoenix-app + ports: + - "3000:3000" # Restrict to only allow access from Grafana Server IP + # logging: + # driver: loki + # options: + # loki-url: "${LOKI_URL}" + # loki-retries: "${LOKI_RETRIES}" + # loki-batch-size: "${LOKI_BATCH_SIZE}" + # loki-external-labels: "service=phx-app,env=prod,project=phoenix" + volumes: + - ./app_custom:/usr/share/nginx/html/assets/custom + # - ./nginx/nginx.conf:/etc/nginx/nginx.conf # Uncomment this if you want to use override the default nginx.conf + # - ./nginx/includes:/etc/nginx/includes:ro # Uncomment this if you want to use override the default includes + networks: + - backend + - frontend + # deploy: + # restart_policy: # Define how the service should restart when it fails + # condition: on-failure # Only restart if the container exits with a non-zero code + # delay: 5s # Wait 5 seconds before attempting to restart + # max_attempts: 5 # Maximum number of restart attempts before giving up + # window: 120s # Time window to evaluate restart attempts (resets counter after this period) + # resources: # Resource allocation and limits for the container + # limits: # Maximum resources the container can use + # cpus: "0.35" # Maximum CPU cores (35% of one core) + # memory: 384M # Maximum memory usage (384 megabytes) + # reservations: # Guaranteed minimum resources for the container + # cpus: "0.10" # Reserved CPU cores (10% of one core) + # memory: 128M # Reserved memory (128 megabytes) + healthcheck: + test: ["CMD", "wget", "--spider", "-q", "http://phoenix-app/login"] # localhost checks that the NGINX server inside the container is serving something at the root + interval: 10s # check every 10 seconds + timeout: 5s # allow 5 seconds per check + retries: 5 # mark as unhealthy after 5 failures + start_period: 15s # wait 15 seconds after container start before checking + phoenix-system: + restart: always + image: "phxerp/phoenix-system:alpha" + # logging: + # driver: loki + # options: + # loki-url: "${LOKI_URL}" + # loki-retries: "${LOKI_RETRIES}" + # loki-batch-size: "${LOKI_BATCH_SIZE}" + # loki-external-labels: "service=phoenix-system,env=prod" + environment: + - "DB_HOST=${DB_HOST}" + - "DB_NAME=${DB_NAME}" + - "DB_PASSWORD=${POSTGRES_PASSWORD}" + - "DB_USERNAME=${DB_USERNAME}" + - "SUPER_ADMIN_USER_PASSWORD=${SUPER_ADMIN_USER_PASSWORD}" + - "REDIS_PASSWORD=${REDIS_PASSWORD}" + - NODE_ENV=${NODE_ENV} + - PHX_HOST_NAME=${PHX_HOST_NAME} + - PERFORMANCE_STRUCTURED_LOGGING=${PERFORMANCE_STRUCTURED_LOGGING} + - PERFORMANCE_WARNING_THRESHOLD=${PERFORMANCE_WARNING_THRESHOLD} + - PERFORMANCE_DETAILED_MEMORY=${PERFORMANCE_DETAILED_MEMORY} + command: ["npm", "run", "start:server"] + deploy: + replicas: ${PHOENIX_SYSTEM_REPLICAS} #change here if u want to have more replicas. Cant find a way to set via variable right now + # restart_policy: # Define how the service should restart when it fails + # condition: on-failure # Only restart if the container exits with a non-zero code + # delay: 5s # Wait 5 seconds before attempting to restart + # max_attempts: 5 # Maximum number of restart attempts before giving up + # window: 120s # Time window to evaluate restart attempts (resets counter after this period) + # resources: # Resource allocation and limits for the container + # limits: # Maximum resources the container can use + # cpus: "1.50" # Maximum CPU cores (150% of one core) + # memory: 1600M # Maximum memory usage (1600 megabytes) + # reservations: # Guaranteed minimum resources for the container + # cpus: "0.50" # Reserved CPU cores (50% of one core) + # memory: 768M # Reserved memory (768 megabytes) + networks: + backend: + aliases: + - phoenix-system + depends_on: + postgres: + condition: service_healthy + phoenix-redis: + condition: service_healthy + healthcheck: + test: ["CMD-SHELL", "curl -s http://phoenix-system:3000/health | grep -q '\"admin-api\":{\"status\":\"up\"}' && curl -s http://phoenix-system:3000/health | grep -q '\"database\":{\"status\":\"up\"}'"] # Checks both admin-api and database status + interval: 10s # Time between each health check + timeout: 10s # Max time to wait for each check + retries: 20 # Number of failures before marking as unhealthy + start_period: 60s # Grace period before health checks start + volumes: + - "./assets:/usr/src/app/packages/dev-server/assets" + - "./server_custom:/usr/src/app/packages/dev-server/custom" + # - "./logs:/usr/src/app/packages/dev-server/logs" + phoenix-worker: + restart: always + image: "phxerp/phoenix-system:alpha" + container_name: "phoenix-worker" + ports: + - "3001:3001" # Restrict to only allow access from Grafana Server IP + # logging: + # driver: loki + # options: + # loki-url: "${LOKI_URL}" + # loki-retries: "${LOKI_RETRIES}" + # loki-batch-size: "${LOKI_BATCH_SIZE}" + # loki-external-labels: "service=phx-worker,env=prod" + networks: + - backend + environment: + - "DB_HOST=${DB_HOST}" + - "DB_NAME=${DB_NAME}" + - "DB_PASSWORD=${POSTGRES_PASSWORD}" + - "DB_USERNAME=${DB_USERNAME}" + - "SUPER_ADMIN_USER_PASSWORD=${SUPER_ADMIN_USER_PASSWORD}" + - REDIS_PASSWORD=${REDIS_PASSWORD} + - NODE_ENV=${NODE_ENV} + - PHX_HOST_NAME=${PHX_HOST_NAME} + - PERFORMANCE_STRUCTURED_LOGGING=${PERFORMANCE_STRUCTURED_LOGGING} + - PERFORMANCE_WARNING_THRESHOLD=${PERFORMANCE_WARNING_THRESHOLD} + - PERFORMANCE_DETAILED_MEMORY=${PERFORMANCE_DETAILED_MEMORY} + command: ['npm', 'run', 'start:worker'] + # deploy: + # restart_policy: # Define how the service should restart when it fails + # condition: on-failure # Only restart if the container exits with a non-zero code + # delay: 5s # Wait 5 seconds before attempting to restart + # max_attempts: 5 # Maximum number of restart attempts before giving up + # window: 120s # Time window to evaluate restart attempts (resets counter after this period) + # resources: # Resource allocation and limits for the container + # limits: # Maximum resources the container can use + # cpus: '2.0' # Maximum CPU cores (200% of one core) + # memory: 2G # Maximum memory usage (2 gigabytes) + # reservations: # Guaranteed minimum resources for the container + # cpus: '0.5' # Reserved CPU cores (50% of one core) + # memory: 512M # Reserved memory (512 megabytes) + depends_on: + phoenix-system: + condition: service_healthy + postgres: + condition: service_healthy + healthcheck: + test: [ "CMD-SHELL", "curl -s http://phoenix-worker:3001/health | grep -q '\"status\":\"ok\"'" ] # Check if worker responds with status ok + interval: 10s # Time between each health check + timeout: 6s # Max time to wait for each check + retries: 20 # Grace period before health checks start + start_period: 30s # Grace period before health checks start + volumes: + - "./assets:/usr/src/app/packages/dev-server/assets" + - "./server_custom:/usr/src/app/packages/dev-server/custom" + # - "./logs:/usr/src/app/packages/dev-server/logs" + phoenix-redis: + image: 'bitnami/redis:latest' + container_name: redis + command: /opt/bitnami/scripts/redis/run.sh # Not good, but as agreed. At some point i can start using this: --maxmemory + add eviction policy + user: root + # logging: + # driver: loki + # options: + # loki-url: "${LOKI_URL}" + # loki-retries: "${LOKI_RETRIES}" + # loki-batch-size: "${LOKI_BATCH_SIZE}" + # loki-external-labels: "service=phx-redis,env=prod" + networks: + - backend + restart: always + environment: + ALLOW_EMPTY_PASSWORD: "no" + REDIS_PASSWORD: ${REDIS_PASSWORD} + # deploy: + # restart_policy: # Define how the service should restart when it fails + # condition: on-failure # Only restart if the container exits with a non-zero code + # delay: 5s # Wait 5 seconds before attempting to restart + # max_attempts: 5 # Maximum number of restart attempts before giving up + # window: 120s # Time window to evaluate restart attempts (resets counter after this period) + # resources: # Resource allocation and limits for the container + # limits: # Maximum resources the container can use + # cpus: "0.25" # Maximum CPU cores (25% of one core) + # memory: 100M # Maximum memory usage (100 megabytes) + # reservations: # Guaranteed minimum resources for the container + # cpus: "0.05" # Reserved CPU cores (5% of one core) + # memory: 32M # Reserved memory (32 megabytes) + healthcheck: + test: [ + "CMD-SHELL", + "redis-cli --no-auth-warning -a ${REDIS_PASSWORD} ping | grep PONG && test -w /bitnami/redis/data" + ] + interval: 5s + retries: 10 + timeout: 5s + depends_on: + postgres: + condition: service_healthy + volumes: + - "./redis/data:/bitnami/redis/data" + phoenix-health-exporter: + image: phxerp/phoenix-health-exporter:alpha + container_name: health_exporter + restart: unless-stopped + # logging: + # driver: loki + # options: + # loki-url: "${LOKI_URL}" + # loki-retries: "${LOKI_RETRIES}" + # loki-batch-size: "${LOKI_BATCH_SIZE}" + # loki-external-labels: "service=phx-health-exporter,env=prod" + ports: + - "9800:9800" + environment: + DB_HOST: ${DB_HOST} + DB_NAME: ${DB_NAME} + DB_PASSWORD: ${POSTGRES_PASSWORD} + DB_USERNAME: ${DB_USERNAME} + REDIS_PASSWORD: ${REDIS_PASSWORD} + networks: + - frontend + - backend + volumes: + - /etc/hostname:/etc/host_hostname:ro # This ensures the container always uses the real machine hostname, even if restarted or recreated. + security_opt: + - no-new-privileges:true + memswap_limit: 512M + deploy: + restart_policy: # Define how the service should restart when it fails + condition: on-failure # Only restart if the container exits with a non-zero code + delay: 5s # Wait 5 seconds before attempting to restart + max_attempts: 5 # Maximum number of restart attempts before giving up + window: 120s # Time window to evaluate restart attempts (resets counter after this period) + resources: # Resource allocation and limits for the container + limits: # Maximum resources the container can use + cpus: "0.5" # Maximum CPU cores (50% of one core) + memory: 256M # Maximum memory usage (256 megabytes) + reservations: # Guaranteed minimum resources for the container + cpus: "0.1" # Reserved CPU cores (10% of one core) + memory: 64M # Reserved memory (64 megabytes) + depends_on: + phoenix-system: + condition: service_healthy + phoenix-worker: + condition: service_healthy + postgres: + condition: service_healthy + healthcheck: + test: ["CMD-SHELL", "curl -sf http://localhost:9800/healthz || exit 1"] + interval: 1m + timeout: 5s + retries: 3 + start_period: 15s + node-exporter: + image: quay.io/prometheus/node-exporter:latest + container_name: node_exporter + # logging: + # driver: loki + # options: + # loki-url: "${LOKI_URL}" + # loki-retries: "${LOKI_RETRIES}" + # loki-batch-size: "${LOKI_BATCH_SIZE}" + # loki-external-labels: "service=phx-node-exporter,env=prod" + networks: + - metrics + - frontend + restart: unless-stopped + ports: + - "9100:9100" # Restrict to only allow access from Grafana Server IP + command: + - "--path.procfs=/host/proc" + - "--path.sysfs=/host/sys" + - "--path.rootfs=/host" + - "--collector.filesystem.ignored-mount-points=^/(sys|proc|dev)($$|/)" + volumes: + - "/proc:/host/proc:ro" + - "/sys:/host/sys:ro" + - "/:/host:ro,rslave" + security_opt: + - no-new-privileges:true + memswap_limit: 512M + deploy: + restart_policy: # Define how the service should restart when it fails + condition: on-failure # Only restart if the container exits with a non-zero code + delay: 5s # Wait 5 seconds before attempting to restart + max_attempts: 5 # Maximum number of restart attempts before giving up + window: 120s # Time window to evaluate restart attempts (resets counter after this period) + resources: # Resource allocation and limits for the container + limits: # Maximum resources the container can use + cpus: "0.25" # Maximum CPU cores (25% of one core) + memory: 128M # Maximum memory usage (128 megabytes) + reservations: # Guaranteed minimum resources for the container + cpus: "0.05" # Reserved CPU cores (5% of one core) + memory: 32M # Reserved memory (32 megabytes) + depends_on: + phoenix-worker: # This is to avoid alocation of resources to the node-exporter if the phoenix-worker is not healthy yet. + condition: service_healthy + healthcheck: + test: ["CMD", "wget", "-qO-", "http://localhost:9100/metrics"] + interval: 15s + timeout: 5s + retries: 3 + start_period: 20s + # nginx-exporter: + # image: nginx/nginx-prometheus-exporter:1.4.2 + # container_name: nginx_exporter + # restart: unless-stopped + # # logging: + # # driver: loki + # # options: + # # loki-url: "${LOKI_URL}" + # # loki-retries: "${LOKI_RETRIES}" + # # loki-batch-size: "${LOKI_BATCH_SIZE}" + # # loki-external-labels: "service=phx-nginx-exporter,env=prod" + # ports: + # - "9113:9113" # Restrict to only allow access from Grafana Server IP + # command: + # - '--nginx.scrape-uri=http://phoenix-app/stub_status' + # security_opt: + # - no-new-privileges:true + # deploy: + # resources: + # limits: + # cpus: '0.25' + # memory: 128M + # depends_on: + # phoenix-app: + # condition: service_healthy + # networks: + # - frontend + # - metrics + # healthcheck: + # test: ["CMD", "wget", "-qO-", "http://localhost:9113/metrics"] # Not working as expected + # interval: 15s + # timeout: 5s + # retries: 3 + # start_period: 10s + +networks: + backend: + driver: bridge + external: false + ipam: + config: + - subnet: 172.19.0.0/16 + + frontend: + driver: bridge + external: false + ipam: + config: + - subnet: 172.20.0.0/16 + + metrics: + driver: bridge + external: false + ipam: + config: + - subnet: 172.22.0.0/16 diff --git a/helper.md b/helper.md new file mode 100644 index 0000000..6f37cc5 --- /dev/null +++ b/helper.md @@ -0,0 +1,8 @@ +Check if server.json is present +```bash +docker exec -it pgadmin_container ls -l /var/lib/pgadmin/servers.json +``` +Check the content of server.json +```bash +docker exec -it pgadmin_container cat /var/lib/pgadmin/servers.json +``` \ No newline at end of file diff --git a/pgadmin/pgadmin-entrypoint.sh b/pgadmin/pgadmin-entrypoint.sh new file mode 100644 index 0000000..72c0aa1 --- /dev/null +++ b/pgadmin/pgadmin-entrypoint.sh @@ -0,0 +1,49 @@ +#!/bin/sh +set -e + +echo "๐Ÿ”ง Entrypoint: Ensuring .pgpass directory and file" + +PGADMIN_HOME="/var/lib/pgadmin" +PGPASS_PATH="${PGADMIN_HOME}/pgpass" +SERVERS_JSON_PATH="/var/lib/pgadmin/servers.json" + +# Ensure parent directory exists +mkdir -p "$PGADMIN_HOME" + +# Create or overwrite .pgpass file +echo "${PGPASS_HOST}:${PGPASS_PORT}:${PGPASS_DB}:${PGPASS_USER}:${PGPASS_PASSWORD}" > "$PGPASS_PATH" +chmod 600 "$PGPASS_PATH" +chown 5050:5050 "$PGPASS_PATH" +export PGPASSFILE="$PGPASS_PATH" + +echo "โœ… .pgpass ready at $PGPASS_PATH" +echo "๐Ÿ› ๏ธ Generating servers.json for pgAdmin..." + +# Try to ensure /pgadmin4 is owned by 5050 if possible +if [ -d /pgadmin4 ]; then + echo "๐Ÿ”ง Attempting to chown /pgadmin4 to 5050:5050" + chown 5050:5050 /pgadmin4 2>/dev/null || echo "โš ๏ธ Could not chown /pgadmin4 (likely read-only or permission issue)" +fi + +cat < "$SERVERS_JSON_PATH" +{ + "Servers": { + "1": { + "Name": "Phoenix DB", + "Group": "PHX GROUP", + "Host": "${PGPASS_HOST}", + "Port": ${PGPASS_PORT}, + "MaintenanceDB": "${PGPASS_DB}", + "Username": "${PGPASS_USER}", + "SSLMode": "prefer", + "PassFile": "$PGPASSFILE" + } + } +} +EOF + +chmod 600 "$SERVERS_JSON_PATH" +chown 5050:5050 "$SERVERS_JSON_PATH" +echo "โœ… servers.json created at $SERVERS_JSON_PATH" + +exec /entrypoint.sh "$@" \ No newline at end of file diff --git a/postgres_upgrade.sh b/postgres_upgrade.sh new file mode 100644 index 0000000..c2e5c77 --- /dev/null +++ b/postgres_upgrade.sh @@ -0,0 +1,259 @@ +#!/usr/bin/env bash +set -euo pipefail +trap 'echo "โš ๏ธ An error occurred. Consider running rollback or checking backups."' ERR + +COMPOSE=./docker-compose.yaml +SERVICE=postgres +DATA_DIR=./database +PG_VERSION_FILE="$DATA_DIR/PG_VERSION" + +echo "๐Ÿงช Validating docker-compose config..." +docker compose -f "$COMPOSE" config > /dev/null || { + echo "โŒ docker-compose config failed. Restore aborted." + exit 1 +} + +if [ ! -d "$DATA_DIR" ]; then + echo "โŒ Expected data directory '${DATA_DIR}' does not exist. Aborting." + exit 1 +fi + +# echo "๐Ÿ” Checking if Postgres service is already running..." +# if ! docker compose ps --services --filter "status=running" | grep -q "^${SERVICE}$"; then +# echo "โš ๏ธ '${SERVICE}' service is not running. Skipping auto-upgrade step." +# echo "๐Ÿ”„ Attempting to start '${SERVICE}' service to detect version..." +# docker compose up -d $SERVICE + +# echo "โณ Waiting for PostgreSQL to become ready..." +# for i in $(seq 1 60); do +# if docker compose exec -T $SERVICE pg_isready -U postgres > /dev/null 2>&1; then +# break +# fi +# echo "โณ Still waiting... (${i}s)" +# sleep 1 +# done + +# if ! docker compose exec -T $SERVICE pg_isready -U postgres > /dev/null 2>&1; then +# echo "โŒ PostgreSQL did not become ready in time. Aborting." +# echo "๐Ÿ’ก Postgres is not running. Revert to the old version on you docker-compose.yaml file and start start the service!" +# echo "1. Run: docker compose up -d --force-recreate $SERVICE" +# echo "2. Run: docker compose --profile postgres-rollback run --rm postgres-auto-rollback" +# exit 1 +# fi +# fi + +# echo "โณ Waiting for PostgreSQL to become ready before dumping SQL..." +# for i in $(seq 1 120); do +# if docker compose exec -T $SERVICE pg_isready -U postgres > /dev/null 2>&1; then +# break +# fi +# echo "โณ Still waiting... (${i}s)" +# sleep 1 +# done + +# if ! docker compose exec -T $SERVICE pg_isready -U postgres > /dev/null 2>&1; then +# echo "โŒ PostgreSQL did not become ready in time. Aborting." +# exit 1 +# fi + +echo "๐Ÿ“ก Detecting running PostgreSQL version..." +OLD_VERSION=$(cat "$PG_VERSION_FILE") +echo "๐Ÿ” Detected running PostgreSQL version: $OLD_VERSION" +OLD_MAJOR=$(echo "$OLD_VERSION" | cut -d. -f1) +echo "๐Ÿ” Detected running PostgreSQL major version: $OLD_MAJOR" +OLD_IMG="${OLD_VERSION}-alpine" + + +echo "๐Ÿ†• Detecting target version from docker-compose.yaml..." +NEW_IMG=$(docker compose -f $COMPOSE config | grep "image:" | grep "$SERVICE" | awk '{print $2}') + +# Ensure NEW_IMG was detected +if [[ -z "$NEW_IMG" ]]; then + echo "โŒ Failed to detect target Postgres image from $COMPOSE. Aborting." + exit 1 +fi + +NEW_VERSION=$(echo "$NEW_IMG" | sed -E 's/^postgres://; s/-alpine.*$//') +NEW_MAJOR=$(echo "$NEW_VERSION" | cut -d. -f1) + +echo "๐Ÿ” From $OLD_VERSION (major $OLD_MAJOR) โ†’ $NEW_VERSION (major $NEW_MAJOR)" + +if [[ "$NEW_VERSION" == *beta* ]] || [[ "$NEW_VERSION" == *rc* ]] || [[ "$NEW_VERSION" == *bookworm* ]]; then + echo "โŒ Target version $NEW_VERSION appears to be a pre-release (beta/rc/bookworm). Skipping upgrade." + echo "๐Ÿ’ก Please upgrade to a stable version of Postgres." + exit 1 +fi + +# Early exit if no upgrade needed +if [ "$OLD_MAJOR" -eq "$NEW_MAJOR" ]; then + echo "โœ… Already running target major version. Skipping upgrade." + exit 0 +fi + +# Paths +BACKUP_DIR=${DATA_DIR}_backup_${OLD_IMG}_$(date +%Y%m%d_%H%M%S) +OLD_DATA_DIR=./database_old +UPGRADE_DIR=./database_tmp_upgrade + +# 1. Stop services +echo "๐Ÿ›‘ Stopping services..." +docker compose -f $COMPOSE down + +# 2. Backup database directory +echo "๐Ÿ” Creating backup at ${BACKUP_DIR}..." +cp -a "$DATA_DIR" "$BACKUP_DIR" + + +echo "๐Ÿ“ฆ Dumping full SQL backup using temporary PostgreSQL container..." +DUMP_FILE="backup_dump_${OLD_IMG}_$(date +%Y%m%d_%H%M%S).sql" +TMP_CONTAINER_NAME="pg-dump-${OLD_MAJOR}" + +# Run temporary postgres container with existing data dir +docker run -d --rm \ + --name "$TMP_CONTAINER_NAME" \ + -v "$DATA_DIR:/var/lib/postgresql/data" \ + -e POSTGRES_USER=postgres \ + postgres:${OLD_IMG} + +echo "โณ Waiting for pg_dump container to become ready..." +for i in $(seq 1 30); do + if docker exec "$TMP_CONTAINER_NAME" pg_isready -U postgres > /dev/null 2>&1; then + break + fi + echo "โณ Still waiting... (${i}s)" + sleep 1 +done + +if ! docker exec "$TMP_CONTAINER_NAME" pg_isready -U postgres > /dev/null 2>&1; then + echo "โŒ Temporary container for SQL dump did not become ready. Aborting." + docker rm -f "$TMP_CONTAINER_NAME" > /dev/null 2>&1 || true + exit 1 +fi + +docker exec "$TMP_CONTAINER_NAME" pg_dumpall -U postgres > "$DUMP_FILE" + +echo "๐Ÿงน Cleaning up older SQL dump files..." +ALL_DUMPS=( $(ls -t backup_dump_*.sql 2>/dev/null || true) ) + +if [ "${#ALL_DUMPS[@]}" -gt 1 ]; then + LATEST_DUMP="${ALL_DUMPS[0]}" + TO_DELETE=( "${ALL_DUMPS[@]:1}" ) + + for dump in "${TO_DELETE[@]}"; do + echo "๐Ÿ—‘๏ธ Removing old dump: $dump" + rm -f "$dump" + done + + echo "โœ… Only latest dump '${LATEST_DUMP}' preserved." +else + echo "โ„น๏ธ Only one dump file found. No cleanup needed." +fi + +docker rm -f "$TMP_CONTAINER_NAME" > /dev/null 2>&1 || true + +# 3. Create upgrade target folder +echo "๐Ÿ“ Creating upgrade workspace ${UPGRADE_DIR}..." +mkdir -p "$UPGRADE_DIR" + +# 4. Perform pg_upgrade +echo "๐Ÿ”ง Running pg_upgrade via tianon image..." +docker run --rm \ + -v "${BACKUP_DIR}:/var/lib/postgresql/${OLD_MAJOR}/data" \ + -v "${UPGRADE_DIR}:/var/lib/postgresql/${NEW_MAJOR}/data" \ + tianon/postgres-upgrade:${OLD_MAJOR}-to-${NEW_MAJOR} --copy + +# 5. Promote new data +echo "๐Ÿ” Swapping data directories..." +rm -rf "$DATA_DIR" +mv "$UPGRADE_DIR" "$DATA_DIR" + +# 6. Restore pg_hba.conf before startup +echo "๐Ÿ”„ Restoring pg_hba.conf if it existed..." +cp "${BACKUP_DIR}/pg_hba.conf" "${DATA_DIR}/pg_hba.conf" || echo "โœ… No custom pg_hba.conf to restore." + +# 7. Update image in docker-compose.yaml +echo "๐Ÿ“ Updating docker-compose to use image ${NEW_IMG}..." +sed -i.bak -E "s#postgres:[^ ]*${OLD_MAJOR}[^ ]*#postgres:${NEW_IMG}#" "$COMPOSE" + +# 8. Start container +echo "๐Ÿš€ Starting upgraded container..." +docker compose -f $COMPOSE up -d $SERVICE + +# 9. Wait until DB is accepting connections +echo "โณ Waiting for PostgreSQL to become ready..." +until docker compose exec -T $SERVICE pg_isready -U postgres; do + sleep 1 +done + +# 10. Collation and Reindexing +echo "๐Ÿ”ง Reindexing and refreshing collation versions..." +docker compose exec $SERVICE bash -c ' + set -e + DBS=$(psql -U postgres -tAc "SELECT datname FROM pg_database WHERE datallowconn") + for db in $DBS; do + echo "โžก๏ธ Reindexing $db..." + psql -U postgres -d "$db" -c "REINDEX DATABASE \"$db\";" || true + psql -U postgres -d "$db" -c "REINDEX SYSTEM \"$db\";" || true + + echo "โžก๏ธ Refreshing collation version for $db..." + if ! psql -U postgres -d "$db" -c "ALTER DATABASE \"$db\" REFRESH COLLATION VERSION;" 2>/dev/null; then + echo "โš ๏ธ Collation refresh failed. Forcing reset..." + psql -U postgres -d postgres -c "UPDATE pg_database SET datcollversion = NULL WHERE datname = '\''$db'\'';" || true + psql -U postgres -d "$db" -c "ALTER DATABASE \"$db\" REFRESH COLLATION VERSION;" || \ + echo "โŒ Still failed for $db. Review manually." + fi + + echo "โžก๏ธ Refreshing system collations in $db..." + for coll in $(psql -U postgres -d "$db" -tAc "SELECT nspname || '\''.'\'' || quote_ident(collname) FROM pg_collation JOIN pg_namespace ON collnamespace = pg_namespace.oid WHERE collprovider = '\''c'\'';"); do + echo " ๐ŸŒ€ ALTER COLLATION $coll REFRESH VERSION;" + psql -U postgres -d "$db" -c "ALTER COLLATION $coll REFRESH VERSION;" || \ + echo " โš ๏ธ Skipped $coll due to version mismatch (likely Alpine)." + done + done +' + +# 11. Suppress collation warnings on musl (Alpine) +if docker compose exec $SERVICE ldd --version 2>&1 | grep -qi 'musl'; then + echo "๐Ÿงผ Detected musl libc (Alpine). Resetting all datcollversion values..." + docker compose exec -T $SERVICE psql -U postgres -d postgres -c \ + "UPDATE pg_database SET datcollversion = NULL WHERE datcollversion IS NOT NULL;" +fi + +# 12. Make delete_old_cluster.sh executable +DELETE_SCRIPT="./delete_old_cluster.sh" +if [[ -f "$DELETE_SCRIPT" ]]; then + chmod +x "$DELETE_SCRIPT" +fi + +# 13. Make rollback script executable +ROLLBACK_SCRIPT="./rollback_postgres_upgrade.sh" +if [[ -f "$ROLLBACK_SCRIPT" ]]; then + chmod +x "$ROLLBACK_SCRIPT" +fi + +# 14. Final message +echo "โœ… Upgrade complete!" +echo "๐ŸŽ‰ Postgres is now running ${NEW_IMG} with data in '${DATA_DIR}'." +echo "๐Ÿงฐ Old version is saved in '${OLD_DATA_DIR}'." +echo "๐Ÿ’ก Next steps:" +echo " - โœ… Run smoke tests" +echo " - ๐Ÿงน If all OK - PLEASE MAKE SURE ON YOUR WEBSITE, YOU HAVE ALL THE DATA YOU NEED AFTER THE UPGRADE, run:" +echo " rm -rf ./database_backup_* ./database_upgraded_*" +echo "๐Ÿงน Cleaning up older backups..." +find . -maxdepth 1 -type d -name "database_backup_*" ! -path "./${BACKUP_DIR##*/}" -exec rm -rf {} + +echo "โœ… Only latest backup '${BACKUP_DIR}' preserved." + +# Step 15: Restart full application +echo "๐Ÿ”„ Pulling latest images..." +if ! docker compose pull; then + echo "โŒ Failed to pull images. Aborting." + exit 1 +fi + +echo "๐Ÿ”„ Starting full application stack..." +if ! docker compose up -d --force-recreate; then + echo "โŒ Failed to start application stack. Aborting." + exit 1 +fi + +echo "โœ… Deployment completed successfully." \ No newline at end of file diff --git a/resources-limits.md b/resources-limits.md new file mode 100644 index 0000000..d58b3cc --- /dev/null +++ b/resources-limits.md @@ -0,0 +1,7 @@ +## ๐Ÿ“š **References** + +- **YouTrack Documentation:** [Docker Compose Resource Limits](https://phx-erp.youtrack.cloud/articles/INT-A-107/Docker-Compose-Resource-Limits) - Comprehensive resource allocation guide for Phoenix ERP stack +- **Docker Documentation:** [Resource constraints](https://docs.docker.com/config/containers/resource_constraints/) +- **Docker Compose:** [Deploy specification](https://docs.docker.com/compose/compose-file/deploy/) + +_Last updated: 2025-07-16 (Comprehensive revision based on production docker-compose.yaml)_ diff --git a/rollback_postgres_upgrade.sh b/rollback_postgres_upgrade.sh new file mode 100644 index 0000000..2c78df7 --- /dev/null +++ b/rollback_postgres_upgrade.sh @@ -0,0 +1,115 @@ +#!/usr/bin/env bash +set -euo pipefail + +COMPOSE=./docker-compose.yaml +SERVICE=postgres +DATA_DIR=./database +ROLLBACK_TIMESTAMP=$(date +%Y%m%d_%H%M%S) + +echo "๐Ÿงช Validating docker-compose config..." +docker compose -f "$COMPOSE" config > /dev/null || { + echo "โŒ docker-compose config failed. Restore aborted." + exit 1 +} + +# Extract current Postgres image +CURRENT_IMG=$(docker compose -f "$COMPOSE" config | grep "image:" | grep "$SERVICE" | awk '{print $2}' || true) + +if [[ -z "$CURRENT_IMG" ]]; then + echo "โŒ Could not detect current image for service '$SERVICE'." + exit 1 +fi + +CURRENT_TAG=$(basename "$CURRENT_IMG") +CURRENT_VERSION=$(echo "$CURRENT_TAG" | cut -d'-' -f1) # e.g., 17.5 + +# Detect appropriate backup folder +BACKUP_CANDIDATES=($(ls -td ./database_backup_* 2>/dev/null || true)) + +if [[ ${#BACKUP_CANDIDATES[@]} -eq 0 ]]; then + echo "โŒ No backup directory found. Cannot determine previous version." + echo "โ„น๏ธ Available folders:" + ls -1d ./database_backup_* || true + exit 1 +elif [[ ${#BACKUP_CANDIDATES[@]} -eq 1 ]]; then + SELECTED_BACKUP="${BACKUP_CANDIDATES[0]}" + echo "โ„น๏ธ Only one backup found. Using: ${SELECTED_BACKUP}" +else + SELECTED_BACKUP="${BACKUP_CANDIDATES[1]}" + echo "โ„น๏ธ Multiple backups found. Using second latest: ${SELECTED_BACKUP}" +fi + +# Extract version from selected backup folder +OLD_TAG=$(basename "$SELECTED_BACKUP" | sed -E 's/database_backup_(([^_]+)-alpine).*/\1/') +OLD_IMG="postgres:${OLD_TAG}" + +DELETED_UPGRADE_DIR=./database_upgraded_${CURRENT_VERSION}_${ROLLBACK_TIMESTAMP} + +echo "โช Initiating rollback from Postgres ${CURRENT_TAG} to ${OLD_IMG}..." + +# Step 1: Confirm backup exists +if [ ! -d "$SELECTED_BACKUP" ]; then + echo "โŒ Backup folder '${SELECTED_BACKUP}' not found. Aborting." + exit 1 +fi + +# Step 2: Stop services +echo "๐Ÿ›‘ Stopping running services..." +docker compose -f "$COMPOSE" down + +# Step 3: Archive current (possibly broken) database +echo "๐Ÿ“ฆ Archiving current database directory as '${DELETED_UPGRADE_DIR}'..." +mv "$DATA_DIR" "$DELETED_UPGRADE_DIR" + +# Step 4: Restore previous version +echo "โ™ป๏ธ Restoring from backup folder '${SELECTED_BACKUP}'..." +cp -a "$SELECTED_BACKUP" "$DATA_DIR" + +# Step 5: Restore image tag in docker-compose.yaml +echo "๐Ÿ” Reverting docker-compose image tag to Postgres ${OLD_IMG}..." +update_image_tag() { + local svc="$1" + local file="$2" + local target_tag="$3" + + echo "๐Ÿ” Reverting docker-compose image tag for service '$svc' to Postgres: ${target_tag}..." + + # Use awk to scope updates within the service definition only + awk -v service="$svc" -v new_tag="$target_tag" ' + BEGIN { in_service = 0 } + /^[ ]{2}[a-zA-Z0-9_-]+:/ { + in_service = ($1 == service ":") ? 1 : 0 + } + in_service && /^\s*image:/ { + sub(/postgres:[^"'"'"']+/, "postgres:" new_tag) + } + { print } + ' "$file" > "${file}.tmp" && mv "${file}.tmp" "$file" +} +update_image_tag "$SERVICE" "$COMPOSE" "$OLD_TAG" + +# Step 6: Restart Postgres +echo "๐Ÿš€ Starting Postgres service with restored image..." +docker compose -f "$COMPOSE" up -d "$SERVICE" + +# Step 7: Final messages +echo "โœ… Rollback complete!" +echo "๐Ÿ—ƒ๏ธ PostgreSQL downgraded to '${OLD_IMG}' and data restored from '${SELECTED_BACKUP}'." +echo "๐Ÿ“ฆ The faulty upgrade has been archived in '${DELETED_UPGRADE_DIR}'." +echo " - To clean: rm -rf ${DELETED_UPGRADE_DIR}" +echo " - To verify: docker compose logs -f $SERVICE" + +# Step 8: Restart full application +echo "๐Ÿ”„ Pulling latest images..." +if ! docker compose pull; then + echo "โŒ Failed to pull images. Aborting." + exit 1 +fi + +echo "๐Ÿ”„ Starting full application stack..." +if ! docker compose up -d --force-recreate; then + echo "โŒ Failed to start application stack. Please check logs." + exit 1 +fi + +echo "โœ… Deployment completed successfully." \ No newline at end of file diff --git a/server_custom/config.ts b/server_custom/config.ts new file mode 100644 index 0000000..96bff7e --- /dev/null +++ b/server_custom/config.ts @@ -0,0 +1,260 @@ +/* tslint:disable:no-console */ +import { AssetServerPlugin } from '@phoenix/asset-server-plugin'; +import { ADMIN_API_PATH, API_PORT, SHOP_API_PATH, SUPER_ADMIN_USER_IDENTIFIER, WORKER_PORT } from '@phoenix/common'; +import { DefaultJobQueuePlugin, WinstonLogger, LogLevel, RedisSessionCachePlugin, SystemConfig, TypeOrmLogger, TypeORMHealthCheckStrategy } from '@phoenix/core'; +import { EmailPlugin, EmailPluginOptions, FileBasedTemplateLoader, defaultEmailHandlers } from '@phoenix/email-plugin'; +import path from 'path'; +import { ConnectionOptions } from 'typeorm'; +// Import EmailSettingsService + +//DEV for now +// import { BonnEmailEventHandler } from './plugins/bonn-api-plugin/handler/bonn-email-handler'; + +/** + * Config settings used during development + */ +export const devConfig: SystemConfig = { + apiOptions: { + port: API_PORT, + workerPort: WORKER_PORT, + // sslPort: API_SSL_PORT, + //sslCertPath: path.join(__dirname, './secrets/certificate.crt'), + //sslKeyPath: path.join(__dirname, './secrets/certificate.key'), + adminApiPath: ADMIN_API_PATH, + shopApiPath: SHOP_API_PATH, + cors: { + origin: true, + credentials: true, + }, + adminApiPlayground: true + }, + authOptions: { + disableAuth: true, + sessionSecret: 'some-secret', + requireVerification: false, + tokenMethod: "bearer", + superadminCredentials: { + identifier: SUPER_ADMIN_USER_IDENTIFIER, + password: process.env.SUPER_ADMIN_USER_PASSWORD || 'superadmin' + } + }, + dbConnectionOptions: { + // synchronize: true, + // logging: true, + logger: new TypeOrmLogger(), + migrations: [path.join(__dirname, 'migrations/*.ts')], + ...getDbConfig(), + // migrationsRun: true, + // migrations: ["migration/*.js"], + // cli: { + // migrationsDir: "migration" + // } + // logging: ["error"] + }, + // dbConnectionOptionsEx: [{ + // name: "sl", + // synchronize: false, + // host: 'localhost', + // username: 'sa', + // password: 'sa', + // database: 'SL_MWAWI', + // options: { encrypt: false, instanceName: "" }, + // extra: { trustedConnection: false }, + // logger: new TypeOrmLogger(), + // type: 'mssql' + // } as any], + // paymentOptions: { + // // paymentMethodHandlers: [examplePaymentHandler], + // }, + customFields: { + + Product: [ + // { + // name: 'customFieldx', + // type: 'string', + // } + + ], + DocumentLineItem: [ + + ], + }, + searchableFields: { + processResource: [ + "scanId" + ] + }, + logger: new WinstonLogger({ level: LogLevel.Debug }), + workerLogger: new WinstonLogger({ level: LogLevel.Info }), + importExportOptions: { + importProductAssetsDir: path.join(__dirname, 'import', 'product-assets'), + }, + defaults: { + defaultTakeNumber: 100, + }, + systemOptions: { + healthChecks: [new TypeORMHealthCheckStrategy(null, { key: 'database', timeout: 1000 })], + errorHandlers: [], + }, + plugins: [ + // not needed for local dev + RedisSessionCachePlugin.init({ + namespace: 'phx-session', + redisOptions: { + host: process.env.REDIS_HOST || 'redis', + port: process.env.REDIS_PORT ? parseInt(process.env.REDIS_PORT) : 6379, + db: process.env.REDIS_DB ? parseInt(process.env.REDIS_DB) : 0, + password: process.env.REDIS_PASSWORD || 'admin' + } + }), + AssetServerPlugin.init({ + route: 'remote-assets', + assetUploadDir: path.join(__dirname, 'assets'), + port: 5002, + assetUrlPrefix: "\\remote-assets\\" // to make it relative for client + }), + // only 4 dev + // BonnAPIPlugin.init({ + // callerID: 'b64f845c-e4ed-43e9-b1f8-2e0b274afde0', + // apikey: 'ab9748dd-ac5f-40d8-954c-5c6d01092d80', + // XAccessToken: '48jerefi21r9itwp7ax88fxv2v20blhh', + // lotInfoUrl: 'https://api.zf.com/ZFMessTraceAuxSvc/v2.0/bptrace/lot-info', + // emailReceiver: 'ds@cts-schmid.de', + // autoBelegReportId: "9d77ddff-afec-4412-97dd-9272b497e0c3", + // printerHost: 'DESKTOP-OEEV0PG', + // MESAssignLotUrl: "" + // }), + // BonnAPIPlugin.init({ + // callerID: '18265a9e-7792-4671-88b2-8fa2ac4af5d4', + // apikey: '83997009-248c-472d-8579-5ec681c29daa', + // XAccessToken: 'gjyntdym13u7hsb8wxnk0bfwnxko52xo', + // lotInfoUrl: 'https://apidev.zf.com/ZFMessTraceAuxSvc/v1.0/bptrace/lot-info' + // }), + // ReinerSCTPlugin.init( + // { + // hostname: 'https://timecard.bonn-unternehmensgruppe.de', + // username: 'ctsapi', + // password: 'Tje6tiuEsY' + // } + // ) + // , + //just for dev for now + // EdiTransusPlugin.init({ + // url: "https://webconnect.transus.com/exchange.asmx", + // clientId: "10904548", + // clientKey: "R304WGXHKBZG" + // }), + DefaultJobQueuePlugin.init({ + useDatabaseForBuffer: true + }), + // DefaultStoragePlaceRankPlugin.init({}) + // new DefaultSearchPlugin(), + // new ElasticsearchPlugin({ + // host: 'http://192.168.99.100', + // port: 9200, + // }), + // DocusignPlugin.init({ + // devMode:true, + // handlers: defaultDocusignHandlers, + // assetDownloadDir: path.join(__dirname, 'docusign'), + // assetUploadDir: path.join(__dirname, 'docusign'), + // port: API_PORT, + // route: "docusign" + // }), + EmailPlugin.init({ + route: 'mailbox', + handlers: [...defaultEmailHandlers], + // Dynamic Email Templates + templateLoader: new FileBasedTemplateLoader(path.join(__dirname, '../email-plugin/templates')), + outputPath: path.join(__dirname, 'test-emails'), + globalTemplateVars: { + verifyEmailAddressUrl: 'http://localhost:4201/verify', + passwordResetUrl: 'http://localhost:4201/reset-password', + changeEmailAddressUrl: 'http://localhost:4201/change-email-address', + }, + // transport: { + // type: 'smtp', + // host: '', + // port: null, + // secure: false, + // auth: { + // user: '', + // pass: '', + // }, + // tls: { + // rejectUnauthorized: false, + // }, + // } + } as EmailPluginOptions), + ], +}; + +function getDbConfig(): ConnectionOptions { + const dbType = process.env.DB || 'postgres'; + const dbHost = process.env.DB_HOST || 'localhost'; + const dbPort = +process.env.DB_PORT || 5432; + + const connectionPoolMax = process.env.CONNECTION_POOL_MAX ?? 20; + + const dbUsername = process.env.DB_USERNAME || 'postgres'; + const password = process.env.DB_PASSWORD || 'admin'; + const database = process.env.DB_NAME || 'phoenix' + + if (password == "admin") + console.warn("default postgres password is used!"); + + if (process.env.DB_HOST) + console.log(`using DB Host ${dbHost} from env`); + + console.log(`using Database ${database}`); + console.log(`using User ${dbUsername}`); + + switch (dbType) { + case 'postgres': + console.log('Using postgres connection at ' + dbHost); + return { + synchronize: true, + type: 'postgres', + //host: '127.0.0.1', + host: dbHost, + port: dbPort, + username: dbUsername, + password: password, + database: database, + // logging: "all", + extra: { + max: connectionPoolMax + }, + cache: { + alwaysEnabled: false, + duration: 10000 + } + }; + case 'sqlite': + console.log('Using sqlite connection'); + return { + type: 'sqlite', + database: path.join(__dirname, 'phoenix.sqlite'), + }; + case 'sqljs': + console.log('Using sql.js connection'); + return { + type: 'sqljs', + autoSave: true, + database: new Uint8Array([]), + location: path.join(__dirname, 'phoenix.sqlite'), + }; + case 'mysql': + default: + console.log('Using mysql connection'); + return { + synchronize: true, + type: 'mysql', + host: '192.168.99.100', + port: 3306, + username: 'root', + password: '', + database: 'phoenix-dev', + }; + } +} \ No newline at end of file