first commit

This commit is contained in:
Yuri-Lima
2025-07-29 09:46:59 +02:00
commit b54fa7ec76
17 changed files with 177550 additions and 0 deletions

33
.env Normal file
View File

@@ -0,0 +1,33 @@
# ===== Enviroment Stage ======
NODE_ENV=production
# ====== Database Configuration ======
PG_ADMIN_ACTUAL=ikVJvAyb6Gr6Dc5
POSTGRES_PASSWORD=XDjhSX7H7z
PHX_SYSTEM_CONNECTION_POOL_MAX=5
PHX_WORKER_CONNECTION_POOL_MAX=2
DB_NAME="phoenix"
DB_HOST="phoenixDB"
DB_PORT=5432
DB_USERNAME="postgres"
PGADMIN_DEFAULT_EMAIL="info@phx-erp.de"
MAIL_SERVER="mail.phx-erp.de"
MAIL_PORT=465
MAIL_USERNAME="internal@phx-erp.de"
MAIL_PASSWORD="0rB0@et68"
SECURITY_EMAIL_SENDER="'No Reply PHX <no-reply@phx-erp.de>'"
PGADMIN_DEFAULT_PASSWORD=XDjhSX7H7z
# ====== Phoenix Super Admin Configuration ======
SUPER_ADMIN_USER_PASSWORD=akuuURkxM7
# ====== Redis Configuration ======
REDIS_PASSWORD=admin
# ====== Worker Configuration ======
RUN_JOB_QUEUE=0
# ===== Metris Configuration ======
# Loki API URL -> The IP 5.75.153.161 is the Grafana Server where it has a firewall rule to allow the connection. Please, if you change here, need to be change in NGINX too.
LOKI_URL=http://grafana.phx-erp.de:3100/loki/api/v1/push
LOKI_RETRIES=5
LOKI_BATCH_SIZE=500
# ===== HTTPS-PORTAL Configuration ======
HTTPS_PORTAL_DOMAINS='alpha.phx-erp.de -> phoenix-app'
# ====== PHX-SYSTEM Configuration ======
PHOENIX_SYSTEM_REPLICAS=1

8
.gitignore vendored Normal file
View File

@@ -0,0 +1,8 @@
fail2ban
https_portal
logs
database
assets
database_bkp
pgadmin
redis

150030
alpha_backup.sql Normal file

File diff suppressed because one or more lines are too long

23421
backups/dump_db.sql Normal file

File diff suppressed because one or more lines are too long

0
backups/dump_db_1.sql Normal file
View File

224
docker-compose copy.yaml Normal file
View File

@@ -0,0 +1,224 @@
---
services:
postgres:
restart: always
image: "postgres:15.1-alpine"
container_name: phoenixDB # Hostname
# logging:
# driver: loki
# options:
# loki-url: "${LOKI_URL}"
# loki-retries: "${LOKI_RETRIES}"
# loki-batch-size: "${LOKI_BATCH_SIZE}"
# loki-external-labels: "service=phx-postgres,env=prod"
networks:
- backend
environment:
DEBUG: true
POSTGRES_DB: ${DB_NAME}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
volumes:
- "./database:/var/lib/postgresql/data"
healthcheck:
test: [ "CMD-SHELL", "pg_isready -U postgres" ]
interval: 5s # Time between each health check
timeout: 2s # Number of failures before marking as unhealthy
retries: 5 # Grace period before health checks start
pgadmin:
image: dpage/pgadmin4
container_name: pgAdmin4_Ui
user: "5050:5050"
# logging:
# driver: loki
# options:
# loki-url: "${LOKI_URL}"
# loki-retries: "${LOKI_RETRIES}"
# loki-batch-size: "${LOKI_BATCH_SIZE}"
# loki-external-labels: "service=phx-pgadmin,env=prod"
networks:
- backend
- frontend
environment:
PGADMIN_DEFAULT_EMAIL: ${PGADMIN_DEFAULT_EMAIL}
PGADMIN_DEFAULT_PASSWORD: ${SUPER_ADMIN_USER_PASSWORD}
PGADMIN_CONFIG_SERVER_MODE: 'True'
PGADMIN_CONFIG_WSGI_SCRIPT_NAME: "'/pgadmin4'"
PGADMIN_CONFIG_PROXY_X_PROTO_COUNT: 1
PGADMIN_SERVER_JSON_FILE: '/var/lib/pgadmin/servers.json'
PGADMIN_REPLACE_SERVERS_ON_STARTUP: 'True'
PGADMIN_CONFIG_DATA_DIR: "'/var/lib/pgadmin'"
PGADMIN_CONFIG_MASTER_PASSWORD_REQUIRED: 'False'
# pgpass dynamic vars
PGPASSFILE: /var/lib/pgadmin/pgpass
PGPASS_HOST: ${DB_HOST}
PGPASS_PORT: ${DB_PORT}
PGPASS_DB: ${DB_NAME}
PGPASS_USER: ${DB_USERNAME}
PGPASS_PASSWORD: ${POSTGRES_PASSWORD}
# Other config
ALLOW_SAVE_PASSWORD: 'False'
MFA_ENABLED: 'True'
MFA_FORCE_REGISTRATION: 'False'
MFA_SUPPORTED_METHODS: 'email'
MFA_EMAIL_SUBJECT: 'Your MFA code by PHX-ERP'
MAX_LOGIN_ATTEMPTS: 5
ENHANCED_COOKIE_PROTECTION: 'True'
SHOW_GRAVATAR_IMAGE: 'True'
SECURITY_EMAIL_SENDER: ${SECURITY_EMAIL_SENDER}
MAIL_SERVER: ${MAIL_SERVER}
MAIL_PORT: ${MAIL_PORT}
MAIL_USE_SSL: 'False'
MAIL_USE_TLS: 'False'
MAIL_USERNAME: ${MAIL_USERNAME}
MAIL_PASSWORD: ${MAIL_PASSWORD}
MAIL_DEBUG: 'False'
volumes:
- ./pgadmin/data:/var/lib/pgadmin
- ./pgadmin/pgadmin-entrypoint.sh:/docker-entrypoint.sh:ro
entrypoint: ["/bin/sh", "/docker-entrypoint.sh"]
depends_on:
postgres:
condition: service_healthy
healthcheck:
test: ["CMD", "wget", "-O", "-", "http://localhost:80/misc/ping"]
interval: 15s
timeout: 10s
retries: 5
start_period: 60s
phoenix-app:
restart: always
image: "phxerp/phoenix-app:alpha"
container_name: phoenixAppProd
volumes:
- "/opt/containers/phx/app_custom:/usr/share/nginx/html/assets/custom"
- "/opt/containers/phx/nginx/nginx.conf:/etc/nginx/nginx.conf"
- ./nginx/includes:/etc/nginx/includes:ro
ports:
- "8081:80" # This port might be relate to Traefik, needs to be checked, since this compose is different from our default compose.
- "3000:3000" # Restrict to only allow access from Grafana Server IP
labels:
- "traefik.enable=true"
- "traefik.http.routers.phxalpha.entrypoints=https"
- "traefik.http.routers.phxalpha.rule=Host(`alpha.phx-erp.de`)"
- "traefik.http.routers.phxalpha.middlewares=secHeaders@file"
- "traefik.http.routers.phxalpha.tls=true"
- "traefik.http.routers.phxalpha.tls.certresolver=http"
- "traefik.http.routers.phxalpha.service=phxalpha"
- "traefik.http.services.phxalpha.loadbalancer.server.port=80"
- "traefik.docker.network=proxy"
- "traefik.http.routers.phxalpha-insecure.entrypoints=http"
- "traefik.http.routers.phxalpha-insecure.rule=Host(`alpha.phx-erp.de`)"
- "traefik.http.routers.phxalpha-insecure.tls=false"
networks:
- backend
- frontend
depends_on:
pgadmin:
condition: service_healthy
phoenix-system:
restart: always
image: "phxerp/phoenix-system:alpha"
# logging:
# driver: loki
# options:
# loki-url: "${LOKI_URL}"
# loki-retries: "${LOKI_RETRIES}"
# loki-batch-size: "${LOKI_BATCH_SIZE}"
# loki-external-labels: "service=phoenix-system,env=prod"
environment:
- "DB_HOST=phoenixDB"
- "DB_NAME=${DB_NAME}"
- "DB_PASSWORD=${POSTGRES_PASSWORD}"
- "DB_USERNAME=postgres"
- "SUPER_ADMIN_USER_PASSWORD=${SUPER_ADMIN_USER_PASSWORD}"
- "REDIS_PASSWORD=${REDIS_PASSWORD}"
- RUN_JOB_QUEUE=${RUN_JOB_QUEUE}
- NODE_ENV=${NODE_ENV}
command: ["npm", "run", "start:server"]
depends_on:
postgres:
condition: service_healthy
volumes:
- "./logs:/usr/src/app/packages/dev-server/logs"
- "asset-data:/usr/src/app/packages/dev-server/assets"
- "/opt/containers/phx/server_custom:/usr/src/app/packages/dev-server/custom" # it seems tobe no effect if we make changes, not 100% of sure!
networks:
- postgres
deploy:
replicas: 1
phoenix-worker:
restart: always
image: "phxerp/phoenix-system:alpha"
environment:
- DB_HOST=phoenixDB
- "DB_PASSWORD=${POSTGRES_PASSWORD}"
- DB_USERNAME=postgres
- "SUPER_ADMIN_USER_PASSWORD=${SUPER_ADMIN_USER_PASSWORD}"
- REDIS_PASSWORD=${REDIS_PASSWORD}
# command: ["npm", "run", "start:worker"]
entrypoint: ./entrypoint-phoenix-worker.sh
depends_on:
postgres:
condition: service_healthy
volumes:
# - "/opt/containers/phx/assets:/usr/src/app/packages/dev-server/custo/assets"
# - "asset-data:/usr/src/app/packages/dev-server/assets"
- "/opt/containers/phx/server_custom:/usr/src/app/packages/dev-server/custom"
- "./logs:/usr/src/app/packages/dev-server/logs"
networks:
- postgres
node_exporter:
image: quay.io/prometheus/node-exporter:latest
container_name: node_exporter
ports:
- "9100:9100" # Exposing the metrics port
networks:
- metrics
restart: unless-stopped
command:
- "--path.procfs=/host/proc"
- "--path.sysfs=/host/sys"
- "--path.rootfs=/host"
- "--collector.filesystem.ignored-mount-points=^/(sys|proc|dev)($$|/)"
volumes:
- "/proc:/host/proc:ro"
- "/sys:/host/sys:ro"
- "/:/host:ro,rslave"
phoenix-redis:
image: 'bitnami/redis:latest'
container_name: redis
#command: redis-server --save 20 1 --appendonly no --requirepass ${REDIS_PASSWORD} --loglevel warning
command: /opt/bitnami/scripts/redis/run.sh --maxmemory 100mb --appendonly no
user: root # Non-root user in Bitnami images The /bitnami/redis/data directory inside the container is already owned by 1001, avoiding permission issues.
restart: always
environment:
# REDIS_APPENDFSYNC: "always"
ALLOW_EMPTY_PASSWORD: "no"
# REDIS_DISABLE_COMMANDS: FLUSHDB,FLUSHALL,CONFIG
REDIS_PASSWORD: ${REDIS_PASSWORD}
healthcheck:
test: [ "CMD", "redis-cli", "--raw", "incr", "ping" ]
networks:
- postgres
volumes:
- /opt/containers/phx/redis/data:/bitnami/redis/data
- /opt/containers/phx/redis/tmp:/opt/bitnami/redis/tmp # ✅ Fix permission issue
# - /opt/containers/phx/redis/logs:/opt/bitnami/redis/logs # ✅ Fix logs permission issue
# - ./redis.conf:/opt/bitnami/redis/etc/redis.conf # ✅ Use a writable redis.conf
volumes:
db-data: null
app-data: null
asset-data: null
pgadmin: null
networks:
postgres:
driver: bridge
proxy:
external: true
metrics:
driver: bridge

185
docker-compose-OLD.yaml Normal file
View File

@@ -0,0 +1,185 @@
version: "2.1"
services:
postgres:
restart: always
image: "postgres:15.1-alpine"
container_name: phoenixDB
ports:
- "5432:5432"
environment:
- DEBUG=false
- POSTGRES_DB=phoenix
- "POSTGRES_PASSWORD=${POSTGRES_PASSWORD}"
volumes:
- "db-data:/var/lib/postgresql/data"
healthcheck:
test:
- CMD-SHELL
- pg_isready -U postgres
interval: 5s
timeout: 2s
retries: 5
networks:
- postgres
pgadmin:
restart: always
image: dpage/pgadmin4
container_name: pgadmin_container
environment:
PGADMIN_DEFAULT_EMAIL: "pgadmin4@pgadmin.org"
PGADMIN_DEFAULT_PASSWORD: '${PGADMIN_DEFAULT_PASSWORD}'
PGADMIN_CONFIG_SERVER_MODE: 'False'
ports:
- "${PGADMIN_PORT:-5050}:80"
volumes:
- "pgadmin:/var/lib/pgadmin"
networks:
- postgres
phoenix-app:
restart: always
image: "dennx/phoenix-app:alpha"
container_name: phoenixAppProd
volumes:
- "/opt/containers/phx/app_custom:/usr/share/nginx/html/assets/custom"
- "/opt/containers/phx/nginx/nginx.conf:/etc/nginx/nginx.conf"
# - phxnginx:/etc/nginx
ports:
- "8081:80"
# - "443:443"
labels:
- "traefik.enable=true"
- "traefik.http.routers.phxalpha.entrypoints=https"
- "traefik.http.routers.phxalpha.rule=Host(`alpha.phx-erp.de`)"
- "traefik.http.routers.phxalpha.middlewares=secHeaders@file"
- "traefik.http.routers.phxalpha.tls=true"
- "traefik.http.routers.phxalpha.tls.certresolver=http"
- "traefik.http.routers.phxalpha.service=phxalpha"
- "traefik.http.services.phxalpha.loadbalancer.server.port=80"
- "traefik.docker.network=proxy"
- "traefik.http.routers.phxalpha-insecure.entrypoints=http"
- "traefik.http.routers.phxalpha-insecure.rule=Host(`alpha.phx-erp.de`)"
- "traefik.http.routers.phxalpha-insecure.tls=false"
networks:
- proxy
- postgres
depends_on:
- phoenix-system
phoenix-system:
restart: always
image: "dennx/phoenix-system:alpha"
# container_name: phoenixSystemProd
# env_file: .env
environment:
- DB_HOST=phoenixDB
- "DB_PASSWORD=${POSTGRES_PASSWORD}"
- DB_USERNAME=postgres
- "SUPER_ADMIN_USER_PASSWORD=${SUPER_ADMIN_USER_PASSWORD}"
- RUN_JOB_QUEUE=${RUN_JOB_QUEUE}
- SMTP_FROM=${SMTP_FROM}
- SMTP_TYPE=${SMTP_TYPE}
- SMTP_NAME=${SMTP_NAME}
- SMTP_HOST=${SMTP_HOST}
- SMTP_PORT=${SMTP_PORT}
- SMTP_SECURE=${SMTP_SECURE}
- SMTP_USER=${SMTP_USER}
- SMTP_PASS=${SMTP_PASS}
- SMTP_LOGGING=${SMTP_LOGGING}
- SMTP_DEBUG=${SMTP_DEBUG}
- SMTP_TLS_REJECT_UNAUTHORIZED=${SMTP_TLS_REJECT_UNAUTHORIZED}
- SMTP_SECURE_CONNECTION=${SMTP_SECURE_CONNECTION}
- ENV_MODE=${ENV_MODE}
- NODE_ENV=${NODE_ENV}
- SMTP_TLS_CIPHERS={SMTP_TLS_CIPHERS}
- BILL_BEE_ACTIVE=${BILL_BEE_ACTIVE}
- CHANNEL_PILOT_PRO_ACTIVE=${CHANNEL_PILOT_PRO_ACTIVE}
- SHOPIFY_ACTIVE=${SHOPIFY_ACTIVE}
command: ["npm", "run", "start:server"]
# ports:
# - "3000:3000"
# - "3400:3400"
# - "9615:9615"
# - "587:587" # Email Port
depends_on:
postgres:
condition: service_healthy
volumes:
- "./logs:/usr/src/app/packages/dev-server/logs"
- "asset-data:/usr/src/app/packages/dev-server/assets"
- "/opt/containers/phx/server_custom:/usr/src/app/packages/dev-server/custom" # it seems tobe no effect if we make changes, not 100% of sure!
networks:
- postgres
deploy:
replicas: 1
phoenix-worker:
restart: always
image: "dennx/phoenix-system:alpha"
environment:
- DB_HOST=phoenixDB
- "DB_PASSWORD=${POSTGRES_PASSWORD}"
- DB_USERNAME=postgres
- "SUPER_ADMIN_USER_PASSWORD=${SUPER_ADMIN_USER_PASSWORD}"
- REDIS_PASSWORD=${REDIS_PASSWORD}
# command: ["npm", "run", "start:worker"]
entrypoint: ./entrypoint-phoenix-worker.sh
depends_on:
postgres:
condition: service_healthy
volumes:
# - "/opt/containers/phx/assets:/usr/src/app/packages/dev-server/custo/assets"
# - "asset-data:/usr/src/app/packages/dev-server/assets"
- "/opt/containers/phx/server_custom:/usr/src/app/packages/dev-server/custom"
- "./logs:/usr/src/app/packages/dev-server/logs"
networks:
- postgres
node_exporter:
image: quay.io/prometheus/node-exporter:latest
container_name: node_exporter
ports:
- "9100:9100" # Exposing the metrics port
networks:
- metrics
restart: unless-stopped
command:
- "--path.procfs=/host/proc"
- "--path.sysfs=/host/sys"
- "--path.rootfs=/host"
- "--collector.filesystem.ignored-mount-points=^/(sys|proc|dev)($$|/)"
volumes:
- "/proc:/host/proc:ro"
- "/sys:/host/sys:ro"
- "/:/host:ro,rslave"
phoenix-redis:
image: 'bitnami/redis:latest'
container_name: redis
#command: redis-server --save 20 1 --appendonly no --requirepass ${REDIS_PASSWORD} --loglevel warning
command: /opt/bitnami/scripts/redis/run.sh --maxmemory 100mb --appendonly no
user: root # Non-root user in Bitnami images The /bitnami/redis/data directory inside the container is already owned by 1001, avoiding permission issues.
restart: always
environment:
# REDIS_APPENDFSYNC: "always"
ALLOW_EMPTY_PASSWORD: "no"
# REDIS_DISABLE_COMMANDS: FLUSHDB,FLUSHALL,CONFIG
REDIS_PASSWORD: ${REDIS_PASSWORD}
healthcheck:
test: [ "CMD", "redis-cli", "--raw", "incr", "ping" ]
networks:
- postgres
volumes:
- /opt/containers/phx/redis/data:/bitnami/redis/data
- /opt/containers/phx/redis/tmp:/opt/bitnami/redis/tmp # ✅ Fix permission issue
# - /opt/containers/phx/redis/logs:/opt/bitnami/redis/logs # ✅ Fix logs permission issue
# - ./redis.conf:/opt/bitnami/redis/etc/redis.conf # ✅ Use a writable redis.conf
volumes:
db-data: null
app-data: null
asset-data: null
pgadmin: null
networks:
postgres:
driver: bridge
proxy:
external: true
metrics:
driver: bridge

415
docker-compose.yaml Normal file
View File

@@ -0,0 +1,415 @@
---
services:
postgres:
restart: always
image: "postgres:15.1-alpine"
container_name: phoenixDB # Hostname
# logging:
# driver: loki
# options:
# loki-url: "${LOKI_URL}"
# loki-retries: "${LOKI_RETRIES}"
# loki-batch-size: "${LOKI_BATCH_SIZE}"
# loki-external-labels: "service=phx-postgres,env=prod"
networks:
- backend
environment:
DEBUG: true
POSTGRES_DB: ${DB_NAME}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
volumes:
- "./database:/var/lib/postgresql/data"
healthcheck:
test: [ "CMD-SHELL", "pg_isready -U postgres" ]
interval: 5s # Time between each health check
timeout: 2s # Number of failures before marking as unhealthy
retries: 5 # Grace period before health checks start
pgadmin:
restart: always
image: dpage/pgadmin4
container_name: pgadmin4-ui
user: "5050:5050"
# logging:
# driver: loki
# options:
# loki-url: "${LOKI_URL}"
# loki-retries: "${LOKI_RETRIES}"
# loki-batch-size: "${LOKI_BATCH_SIZE}"
# loki-external-labels: "service=phx-pgadmin,env=prod"
networks:
- backend
- frontend
environment:
PGADMIN_DEFAULT_EMAIL: ${PGADMIN_DEFAULT_EMAIL}
PGADMIN_DEFAULT_PASSWORD: ${SUPER_ADMIN_USER_PASSWORD}
PGADMIN_CONFIG_SERVER_MODE: 'True'
PGADMIN_CONFIG_WSGI_SCRIPT_NAME: "'/pgadmin4'"
PGADMIN_CONFIG_PROXY_X_PROTO_COUNT: 1
PGADMIN_SERVER_JSON_FILE: '/var/lib/pgadmin/servers.json'
PGADMIN_REPLACE_SERVERS_ON_STARTUP: 'True'
PGADMIN_CONFIG_DATA_DIR: "'/var/lib/pgadmin'"
PGADMIN_CONFIG_MASTER_PASSWORD_REQUIRED: 'False'
# pgpass dynamic vars
PGPASSFILE: /var/lib/pgadmin/pgpass
PGPASS_HOST: ${DB_HOST}
PGPASS_PORT: ${DB_PORT}
PGPASS_DB: ${DB_NAME}
PGPASS_USER: ${DB_USERNAME}
PGPASS_PASSWORD: ${POSTGRES_PASSWORD}
# Other config
ALLOW_SAVE_PASSWORD: 'False'
MFA_ENABLED: 'True'
MFA_FORCE_REGISTRATION: 'False'
MFA_SUPPORTED_METHODS: 'email'
MFA_EMAIL_SUBJECT: 'Your MFA code by PHX-ERP'
MAX_LOGIN_ATTEMPTS: 5
ENHANCED_COOKIE_PROTECTION: 'True'
SHOW_GRAVATAR_IMAGE: 'True'
SECURITY_EMAIL_SENDER: ${SECURITY_EMAIL_SENDER}
MAIL_SERVER: ${MAIL_SERVER}
MAIL_PORT: ${MAIL_PORT}
MAIL_USE_SSL: 'False'
MAIL_USE_TLS: 'False'
MAIL_USERNAME: ${MAIL_USERNAME}
MAIL_PASSWORD: ${MAIL_PASSWORD}
MAIL_DEBUG: 'False'
volumes:
- ./pgadmin/data:/var/lib/pgadmin
- ./pgadmin/pgadmin-entrypoint.sh:/docker-entrypoint.sh:ro
entrypoint: ["/bin/sh", "/docker-entrypoint.sh"]
depends_on:
postgres:
condition: service_healthy
healthcheck:
test: ["CMD", "wget", "-O", "-", "http://localhost:80/misc/ping"]
interval: 15s
timeout: 10s
retries: 5
start_period: 60s
phoenix-app:
restart: always
image: "phxerp/phoenix-app:alpha"
container_name: phoenix-app
ports:
- "3000:3000" # Restrict to only allow access from Grafana Server IP
# logging:
# driver: loki
# options:
# loki-url: "${LOKI_URL}"
# loki-retries: "${LOKI_RETRIES}"
# loki-batch-size: "${LOKI_BATCH_SIZE}"
# loki-external-labels: "service=phx-app,env=prod,project=phoenix"
volumes:
- "/opt/containers/phx/app_custom:/usr/share/nginx/html/assets/custom"
- "/opt/containers/phx/nginx/nginx.conf:/etc/nginx/nginx.conf"
- ./nginx/includes:/etc/nginx/includes:ro
networks:
- backend
- frontend
depends_on:
pgadmin:
condition: service_healthy
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "http://phoenix-app/login"] # localhost checks that the NGINX server inside the container is serving something at the root
interval: 10s # check every 10 seconds
timeout: 5s # allow 5 seconds per check
retries: 5 # mark as unhealthy after 5 failures
start_period: 15s # wait 15 seconds after container start before checking
phoenix-system:
restart: "no"
image: "phxerp/phoenix-system:alpha"
# logging:
# driver: loki
# options:
# loki-url: "${LOKI_URL}"
# loki-retries: "${LOKI_RETRIES}"
# loki-batch-size: "${LOKI_BATCH_SIZE}"
# loki-external-labels: "service=phoenix-system,env=prod"
environment:
- "DB_HOST=phoenixDB"
- "DB_NAME=${DB_NAME}"
- "DB_PASSWORD=${POSTGRES_PASSWORD}"
- "DB_USERNAME=postgres"
- "SUPER_ADMIN_USER_PASSWORD=${SUPER_ADMIN_USER_PASSWORD}"
- "REDIS_PASSWORD=${REDIS_PASSWORD}"
- RUN_JOB_QUEUE=${RUN_JOB_QUEUE}
- NODE_ENV=${NODE_ENV}
command: ["npm", "run", "start:server"]
deploy:
replicas: ${PHOENIX_SYSTEM_REPLICAS} #change here if u want to have more replicas. Cant find a way to set via variable right now
networks:
backend:
aliases:
- phoenix-system
depends_on:
postgres:
condition: service_healthy
phoenix-redis:
condition: service_healthy
healthcheck:
test: ["CMD-SHELL", "curl -s http://phoenix-system:3000/health | grep -q '\"admin-api\":{\"status\":\"up\"}' && curl -s http://phoenix-system:3000/health | grep -q '\"database\":{\"status\":\"up\"}'"] # Checks both admin-api and database status
interval: 10s # Time between each health check
timeout: 6s # Max time to wait for each check
retries: 10 # Number of failures before marking as unhealthy
start_period: 40s # Grace period before health checks start
volumes:
- "./assets:/usr/src/app/packages/dev-server/assets"
- "/opt/containers/phx/server_custom:/usr/src/app/packages/dev-server/custom" # it seems tobe no effect if we make changes, not 100% of sure!
- "./logs:/usr/src/app/packages/dev-server/logs"
phoenix-worker:
restart: "no"
image: "phxerp/phoenix-system:alpha"
container_name: "phoenix-worker"
ports:
- "3001:3001" # Restrict to only allow access from Grafana Server IP
# logging:
# driver: loki
# options:
# loki-url: "${LOKI_URL}"
# loki-retries: "${LOKI_RETRIES}"
# loki-batch-size: "${LOKI_BATCH_SIZE}"
# loki-external-labels: "service=phx-worker,env=prod"
networks:
- backend
environment:
- DB_HOST=phoenixDB
- "DB_NAME=${DB_NAME}"
- "DB_PASSWORD=${POSTGRES_PASSWORD}"
- DB_USERNAME=postgres
- "SUPER_ADMIN_USER_PASSWORD=${SUPER_ADMIN_USER_PASSWORD}"
- REDIS_PASSWORD=${REDIS_PASSWORD}
- NODE_ENV=${NODE_ENV}
command: ['npm', 'run', 'start:worker']
depends_on:
phoenix-system:
condition: service_healthy
postgres:
condition: service_healthy
healthcheck:
test: [ "CMD-SHELL", "curl -s http://phoenix-worker:3001/health | grep -q '\"status\":\"ok\"'" ] # Check if worker responds with status ok
interval: 10s # Time between each health check
timeout: 6s # Max time to wait for each check
retries: 20 # Grace period before health checks start
start_period: 30s # Grace period before health checks start
volumes:
# - "/opt/containers/phx/assets:/usr/src/app/packages/dev-server/custo/assets"
- "./assets:/usr/src/app/packages/dev-server/assets"
- "/opt/containers/phx/server_custom:/usr/src/app/packages/dev-server/custom"
- "./logs:/usr/src/app/packages/dev-server/logs"
phoenix-redis:
image: 'bitnami/redis:latest'
container_name: redis
command: /opt/bitnami/scripts/redis/run.sh --maxmemory 100mb
user: root
# logging:
# driver: loki
# options:
# loki-url: "${LOKI_URL}"
# loki-retries: "${LOKI_RETRIES}"
# loki-batch-size: "${LOKI_BATCH_SIZE}"
# loki-external-labels: "service=phx-redis,env=prod"
networks:
- backend
restart: always
environment:
ALLOW_EMPTY_PASSWORD: "no"
REDIS_PASSWORD: ${REDIS_PASSWORD}
healthcheck:
test: [ "CMD", "redis-cli", "--raw", "incr", "ping" ]
interval: 5s
retries: 10 # Increase retries if Redis takes a while to start
timeout: 5s # Increase timeout if needed
depends_on:
postgres:
condition: service_healthy
volumes:
- /opt/containers/phx/redis/data:/bitnami/redis/data
phoenix-health-exporter:
image: phxerp/phoenix-health-exporter:alpha
container_name: health_exporter
restart: unless-stopped
# logging:
# driver: loki
# options:
# loki-url: "${LOKI_URL}"
# loki-retries: "${LOKI_RETRIES}"
# loki-batch-size: "${LOKI_BATCH_SIZE}"
# loki-external-labels: "service=phx-health-exporter,env=prod"
ports:
- "9800:9800"
environment:
DB_HOST: ${DB_HOST}
DB_NAME: ${DB_NAME}
DB_PASSWORD: ${POSTGRES_PASSWORD}
DB_USERNAME: ${DB_USERNAME}
networks:
- frontend
- backend
volumes:
- /etc/hostname:/etc/host_hostname:ro # This ensures the container always uses the real machine hostname, even if restarted or recreated.
security_opt:
- no-new-privileges:true
deploy:
resources:
limits:
cpus: '0.25'
memory: 128M
depends_on:
phoenix-system:
condition: service_healthy
phoenix-worker:
condition: service_healthy
postgres:
condition: service_healthy
healthcheck:
test: ["CMD-SHELL", "curl -sf http://localhost:9800/healthz || exit 1"]
interval: 1m
timeout: 5s
retries: 3
start_period: 15s
node-exporter:
image: quay.io/prometheus/node-exporter:latest
container_name: node_exporter
# logging:
# driver: loki
# options:
# loki-url: "${LOKI_URL}"
# loki-retries: "${LOKI_RETRIES}"
# loki-batch-size: "${LOKI_BATCH_SIZE}"
# loki-external-labels: "service=phx-node-exporter,env=prod"
networks:
- metrics
- frontend
restart: unless-stopped
ports:
- "9100:9100" # Restrict to only allow access from Grafana Server IP
command:
- "--path.procfs=/host/proc"
- "--path.sysfs=/host/sys"
- "--path.rootfs=/host"
- "--collector.filesystem.ignored-mount-points=^/(sys|proc|dev)($$|/)"
volumes:
- "/proc:/host/proc:ro"
- "/sys:/host/sys:ro"
- "/:/host:ro,rslave"
security_opt:
- no-new-privileges:true
deploy:
resources:
limits:
cpus: '0.25'
memory: 128M
healthcheck:
test: ["CMD", "wget", "-qO-", "http://localhost:9100/metrics"]
interval: 15s
timeout: 5s
retries: 3
start_period: 20s
# nginx-exporter:
# image: nginx/nginx-prometheus-exporter:1.4.2
# container_name: nginx_exporter
# restart: unless-stopped
# # logging:
# # driver: loki
# # options:
# # loki-url: "${LOKI_URL}"
# # loki-retries: "${LOKI_RETRIES}"
# # loki-batch-size: "${LOKI_BATCH_SIZE}"
# # loki-external-labels: "service=phx-nginx-exporter,env=prod"
# ports:
# - "9113:9113" # Restrict to only allow access from Grafana Server IP
# command:
# - '--nginx.scrape-uri=http://phoenixApp/stub_status'
# security_opt:
# - no-new-privileges:true
# deploy:
# resources:
# limits:
# cpus: '0.25'
# memory: 128M
# depends_on:
# phoenix-app:
# condition: service_healthy
# networks:
# - frontend
# - metrics
# healthcheck:
# test: ["CMD", "wget", "-qO-", "http://localhost:9113/metrics"] # Not working as expected
# interval: 15s
# timeout: 5s
# retries: 3
# start_period: 10s
https_portal:
container_name: https_portal
image: "steveltn/https-portal:1.21"
restart: unless-stopped
# logging:
# driver: loki
# options:
# loki-url: "${LOKI_URL}"
# loki-retries: "${LOKI_RETRIES}"
# loki-batch-size: "${LOKI_BATCH_SIZE}"
# loki-external-labels: "service=phx-https-portal,env=prod"
networks:
- frontend # [ PgAdmin, Phoenix-App ]
ports:
- "80:80"
- "443:443"
# - host:container
environment:
STAGE: "production" # Use Let's Encrypt production server
WEBSOCKET: "true" # Enable websocket support
DEBUG: "true"
RENEW_MARGIN_DAYS: 30
CLIENT_MAX_BODY_SIZE: 0
SERVER_NAMES_HASH_BUCKET_SIZE: 128 # Increase hash bucket size for server names - good for bigger domains names, if not set correctly, it will throw an error, break the container.
# FORCE_RENEW: 'true'
DOMAINS: "${HTTPS_PORTAL_DOMAINS}"
volumes:
- ./https_portal/data:/var/lib/https-portal # ssl_certs, vhost.d, htdocs
- ./https_portal/log:/var/log/nginx # nginx logs
# - ./https_portal/config/custom_nginx.conf:/opt/custom_nginx.conf:ro # ✅ Mount file in a safe path
depends_on:
pgadmin:
condition: service_healthy
postgres:
condition: service_healthy
fail2ban:
image: crazymax/fail2ban:latest
container_name: fail2ban
network_mode: 'host'
cap_add:
- NET_ADMIN
- NET_RAW
volumes:
- ./fail2ban/data:/data
- ./fail2ban/jail.d:/etc/fail2ban/jail.d
- /var/log:/var/log:ro
restart: always
volumes:
asset-data: null
networks:
backend:
driver: bridge
external: false
ipam:
config:
- subnet: 172.19.0.0/16
frontend:
driver: bridge
external: false
ipam:
config:
- subnet: 172.20.0.0/16
metrics:
driver: bridge
external: false
ipam:
config:
- subnet: 172.22.0.0/16

54
helper.md Normal file
View File

@@ -0,0 +1,54 @@
Create Back Up File
docker exec -t phoenixDB pg_dump -U postgres -d phoenix > phoenix_backup.sql
# PostgreSQL Database Backup in Docker
## 🛠️ Backup Commands for `phoenixDB`
### 1⃣ **Dump Database (Including DROP Statements)**
```sh
docker exec -t phoenixDB pg_dump -U postgres -d phoenix -c > phoenix_backup.sql
```
📖 Explanation:
• docker exec -t phoenixDB → Runs the pg_dump command inside the phoenixDB container.
• pg_dump -U postgres -d phoenix -c:
• -U postgres → Connect as user postgres.
• -d phoenix → Dump the phoenix database.
• -c → Includes DROP statements (cleans tables before restoring).
• > phoenix_backup.sql → Saves the dump to your host machine.
🔹 When to Use?
• Use this when you want to completely overwrite an existing database on restore.
• Ensures that old tables are dropped before new ones are created.
# Dump Database (Without DROP Statements)
```sh
docker exec -t phoenixDB pg_dump -U postgres -d phoenix > phoenix_backup.sql
```
📖 Explanation:
• Similar to the previous command, but without -c.
• This does NOT include DROP TABLE statements, meaning:
• It will fail if restoring to a database that already has tables with the same names.
• Use this when restoring to a new empty database.
🔹 When to Use?
• If youre backing up for archival purposes.
• If you dont want to drop existing data on restore.
# 🛠️ How to Restore the Backup?
## Restore to a Database (bkp1)
```sh
cat phoenix_backup.sql | docker exec -i phoenixDB psql -U postgres -d bkp1
```
📖 Explanation:
• cat phoenix_backup.sql → Reads the dump file.
• | → Pipes it into the psql command inside the container.
• docker exec -i phoenixDB psql -U postgres -d bkp2
• -i → Allows input redirection.
• -U postgres → Connects as the postgres user.
• -d bkp2 → Restores to the bkp2 database
# 🎯 Final Notes
• Use the -c flag when restoring into an existing database and you want to drop old tables first.
• Omit -c when restoring into a new, empty database.

View File

@@ -0,0 +1,18 @@
# pgAdmin reverse proxy (under subpath)
location /pgadmin4 {
proxy_pass http://pgadmin4-ui/;
proxy_set_header X-Script-Name /pgadmin4;
proxy_set_header X-Scheme $scheme;
proxy_set_header X-Forwarded-Proto $forwarded_proto;
# Include headers for proxying
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Proto $forwarded_proto;
# End of headers
proxy_redirect off;
# ⚠️ Rewrite required to remove /pgadmin4 from the path
rewrite ^/pgadmin4(/.*)$ $1 break;
}

124
nginx/nginx copy 2.conf Normal file
View File

@@ -0,0 +1,124 @@
worker_processes 1;
events {
worker_connections 1024;
}
http {
sendfile on;
client_max_body_size 64m;
#client_body_temp_path /data/temp;
server {
listen 80;
server_name localhost;
root /usr/share/nginx/html;
index index.html index.htm;
include /etc/nginx/mime.types;
gzip on;
gzip_min_length 1000;
gzip_proxied expired no-cache no-store private auth;
gzip_types text/plain text/css application/json application/javascript application/x-javascript text/xml application/xml application/xml+rss text/javascript;
sendfile on;
client_max_body_size 64m;
location / {
try_files $uri $uri/ /index.html;
}
# https://serverfault.com/questions/379675/nginx-reverse-proxy-url-rewrite
location /backend-api/ {
#rewrite ^/backend-api(.*) /$1 break;
proxy_pass http://phoenix-system:3000/;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
}
location /admin-api {
proxy_pass http://phoenix-system:3000/admin-api;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
}
location /remote-assets {
proxy_pass http://phoenix-system:3000/remote-assets;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
}
location /sti {
proxy_pass http://phoenix-system:3000/sti;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
}
location /ws {
proxy_pass http://phoenix-system:3000/graphql;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
}
server { # This new server will watch for traffic on 443
listen 443 ssl http2;
server_name localhost;
ssl_certificate /etc/nginx/external-certificate/certificate.crt;
ssl_certificate_key /etc/nginx/external-certificate/certificate.key;
root /usr/share/nginx/html;
index index.html index.htm;
include /etc/nginx/mime.types;
gzip on;
gzip_min_length 1000;
gzip_proxied expired no-cache no-store private auth;
gzip_types text/plain text/css application/json application/javascript application/x-javascript text/xml application/xml application/xml+rss text/javascript;
sendfile on;
client_max_body_size 64m;
location / {
try_files $uri $uri/ /index.html;
}
# https://serverfault.com/questions/379675/nginx-reverse-proxy-url-rewrite
location /backend-api/ {
#rewrite ^/backend-api(.*) /$1 break;
proxy_pass http://phoenix-system:3000/;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-Proto https;
}
location /admin-api {
proxy_pass http://phoenix-system:3000/admin-api;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
}
location /remote-assets {
proxy_pass http://phoenix-system:3000/remote-assets;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-Proto https;
}
location /sti {
proxy_pass http://phoenix-system:3000/sti;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
}
location /ws {
proxy_pass http://phoenix-system:3000/graphql;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
}
}

126
nginx/nginx copy.conf Normal file
View File

@@ -0,0 +1,126 @@
worker_processes 1;
events {
worker_connections 1024;
}
http {
sendfile on;
client_max_body_size 64m;
#client_body_temp_path /data/temp;
server {
listen 80;
server_name localhost;
root /usr/share/nginx/html;
index index.html index.htm;
include /etc/nginx/mime.types;
gzip on;
gzip_min_length 1000;
gzip_proxied expired no-cache no-store private auth;
gzip_types text/plain text/css application/json application/javascript application/x-javascript text/xml application/xml application/xml+rss text/javascript;
sendfile on;
client_max_body_size 64m;
location / {
try_files $uri $uri/ /index.html;
}
# https://serverfault.com/questions/379675/nginx-reverse-proxy-url-rewrite
location /backend-api/ {
#rewrite ^/backend-api(.*) /$1 break;
proxy_pass http://phoenix-system:3000/;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
}
location /admin-api {
proxy_pass http://phoenix-system:3000/admin-api;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
}
location /remote-assets {
proxy_pass http://phoenix-system:3000/remote-assets;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
}
location /sti {
proxy_pass http://phoenix-system:3000/sti;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
}
location /ws {
proxy_pass http://phoenix-system:3000/graphql;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
}
server { # This new server will watch for traffic on 443
listen 443 ssl http2;
server_name localhost;
ssl_certificate /etc/nginx/external-certificate/certificate.crt;
ssl_certificate_key /etc/nginx/external-certificate/certificate.key;
root /usr/share/nginx/html;
index index.html index.htm;
include /etc/nginx/mime.types;
gzip on;
gzip_min_length 1000;
gzip_proxied expired no-cache no-store private auth;
gzip_types text/plain text/css application/json application/javascript application/x-javascript text/xml application/xml application/xml+rss text/javascript;
sendfile on;
client_max_body_size 64m;
location / {
try_files $uri $uri/ /index.html;
}
# https://serverfault.com/questions/379675/nginx-reverse-proxy-url-rewrite
location /backend-api/ {
#rewrite ^/backend-api(.*) /$1 break;
proxy_pass http://phoenix-system:3000/;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-Proto https;
}
location /admin-api {
proxy_pass http://phoenix-system:3000/admin-api;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
}
location /remote-assets {
proxy_pass http://phoenix-system:3000/remote-assets;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-Proto https;
}
location /sti {
proxy_pass http://phoenix-system:3000/sti;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
}
location /ws {
proxy_pass http://phoenix-system:3000/graphql;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
}
}

402
nginx/nginx.conf Normal file
View File

@@ -0,0 +1,402 @@
# Main process configuration
worker_processes 1;
events {
worker_connections 1024;
}
http {
geo $frontend_whitelist {
default 1;
127.0.0.1 1;
172.20.0.0/16 1; # Frontend Docker subnet
5.75.153.161 1; # Grafana or monitoring
167.235.254.4 1; # Ansible server IP
}
geo $backend_whitelist {
default 1;
127.0.0.1 1;
172.19.0.0/16 1; # Backend Docker subnet
5.75.153.161 1; # Grafana or monitoring
167.235.254.4 1; # Ansible server IP
}
# These settings ensure that $remote_addr reflects the real client IP forwarded by https-portal, which is needed for your allow rules to work correctly
# Recommended for resolving client IP behind proxy
# Docker networks where both frontend and backend containers communicate through NGINX.
# To avoid potential misclassification of real client IPs from backend routes.
# The set_real_ip_from directive doesnt allow access — it just instructs NGINX to trust the X-Forwarded-For header from those IPs.
set_real_ip_from 172.20.0.0/16; # Replace with your Docker network subnet (matches your `frontend` network)
set_real_ip_from 172.19.0.0/16; # Replace with your Docker network subnet (matches your `backend` network)
real_ip_header X-Forwarded-For;
real_ip_recursive on;
resolver 127.0.0.11 valid=10s;
resolver_timeout 5s;
upstream phoenix_system_cluster {
zone phoenix_system_cluster 64k;
least_conn;
server phoenix-system:3000 resolve fail_timeout=1s max_fails=0;
# ADD_SYSTEM_SERVERS_HERE
}
upstream phoenix_worker_cluster {
zone phoenix_worker_cluster 64k;
least_conn;
server phoenix-worker:3001 resolve fail_timeout=1s max_fails=0;
# ADD_WORKER_SERVERS_HERE
}
server_tokens off; # Disable NGINX version tokens to avoid leaking NGINX version.
# File handling & upload limits
sendfile on;
client_max_body_size 64m;
# Prevent warning when setting many proxy headers, like we do
proxy_headers_hash_max_size 1024;
proxy_headers_hash_bucket_size 128;
# Gzip compression (for better bandwidth efficiency)
gzip on;
gzip_min_length 1000;
gzip_proxied expired no-cache no-store private auth;
gzip_types text/plain text/css application/json application/javascript application/x-javascript text/xml application/xml application/xml+rss text/javascript;
# Trust the protocol from upstream proxy/load balancer
map $http_x_forwarded_proto $forwarded_proto {
default $scheme;
https https;
http http;
}
# File types and default mime type
include /etc/nginx/mime.types;
default_type application/octet-stream;
# 🧩 Logs
map $request_uri $loggable {
default 1;
~^/stub_status 0;
~^/health/system 0;
~^/health/worker 0;
}
log_format main_with_realip '$remote_addr - $realip_remote_addr [$time_local] '
'"$request" $status $body_bytes_sent '
'"$http_referer" "$http_user_agent"';
log_format json_compatible escape=json '{'
'"time":"$time_iso8601",'
'"remote_addr":"$remote_addr",'
'"proxy_addr":"$proxy_protocol_addr",'
'"x_forwarded_for":"$http_x_forwarded_for",'
'"request_method":"$request_method",'
'"request_uri":"$request_uri",'
'"status":$status,'
'"body_bytes_sent":$body_bytes_sent,'
'"request_time":$request_time,'
'"upstream_response_time":"$upstream_response_time",'
'"http_referer":"$http_referer",'
'"http_user_agent":"$http_user_agent",'
'"host":"$host",'
'"realip":"$realip_remote_addr"'
'}';
access_log /var/log/nginx/access_json.log json_compatible if=$loggable; # JSON format for Loki
access_log /var/log/nginx/access.log main_with_realip if=$loggable;
# End of logs
##################################################################
# 🧩 HTTP Server Block
##################################################################
server {
listen 80;
server_name _;
# Security headers
add_header X-Frame-Options "SAMEORIGIN" always;
add_header X-XSS-Protection "1; mode=block" always;
add_header X-Content-Type-Options "nosniff" always;
add_header Referrer-Policy "no-referrer-when-downgrade" always;
root /usr/share/nginx/html;
index index.html index.htm;
# Frontend SPA fallback
location / {
try_files $uri $uri/ /index.html;
}
# Backend API routes
location /backend-api/ {
proxy_pass http://phoenix_system_cluster/;
# Include headers for proxying
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Proto $forwarded_proto;
# End of headers
# Increase timeout settings for file uploads
proxy_connect_timeout 600;
proxy_send_timeout 600;
proxy_read_timeout 600;
send_timeout 600;
}
location /admin-api {
proxy_pass http://phoenix_system_cluster/admin-api;
# Include headers for proxying
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Proto $forwarded_proto;
# End of headers
}
location /remote-assets {
proxy_pass http://phoenix_system_cluster/remote-assets;
# Include headers for proxying
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Proto $forwarded_proto;
# End of headers
}
location /sti {
proxy_pass http://phoenix_system_cluster/sti;
# Include headers for proxying
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Proto $forwarded_proto;
# End of headers
}
# WebSocket support
location /ws {
proxy_pass http://phoenix_system_cluster/graphql;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
# Include headers for proxying
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Proto $forwarded_proto;
# End of headers
}
# Reverse proxy for pgAdmin (subpath support)
include /etc/nginx/includes/*.conf;
# Health check endpoints -> used by the health check exporter
location /health/system {
proxy_pass http://phoenix_system_cluster/health;
# Secure the health check endpoint
if ($backend_whitelist = 0) {
return 403;
}
# End of security
# Include headers for proxying
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $forwarded_proto;
# End of headers
}
# location /health/system/metrics {
# proxy_pass http://phoenix_system_cluster/health/metrics;
# # Secure the health check endpoint
# # if ($backend_whitelist = 0) {
# # return 403;
# # }
# # End of security
# # Include headers for proxying
# proxy_set_header Host $host;
# proxy_set_header X-Real-IP $remote_addr;
# proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# proxy_set_header X-Forwarded-Proto $forwarded_proto;
# # End of headers
# }
location /health/worker {
proxy_pass http://phoenix_worker_cluster/health;
# Secure the health check endpoint
if ($backend_whitelist = 0) {
return 403;
}
# End of security
# Include headers for proxying
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $forwarded_proto;
# End of headers
}
# location /health/worker/metrics {
# proxy_pass http://phoenix_worker_cluster/health/metrics;
# # Secure the health check endpoint
# # if ($backend_whitelist = 0) {
# # return 403;
# # }
# # End of security
# # Include headers for proxying
# proxy_set_header Host $host;
# proxy_set_header X-Real-IP $remote_addr;
# proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# proxy_set_header X-Forwarded-Proto $forwarded_proto;
# # End of headers
# }
location /stub_status {
stub_status;
# Secure the stub status endpoint
if ($frontend_whitelist = 0) {
return 403;
}
# End of security
}
}
##################################################################
# 🔐 HTTPS Server Block
##################################################################
server {
listen 443 ssl;
http2 on;
server_name _;
# Security headers
add_header X-Frame-Options "SAMEORIGIN" always;
add_header X-XSS-Protection "1; mode=block" always;
add_header X-Content-Type-Options "nosniff" always;
add_header Referrer-Policy "no-referrer-when-downgrade" always;
ssl_certificate /etc/nginx/external-certificate/certificate.crt;
ssl_certificate_key /etc/nginx/external-certificate/certificate.key;
root /usr/share/nginx/html;
index index.html index.htm;
location / {
try_files $uri $uri/ /index.html;
}
# Secure API routes
location /backend-api/ {
proxy_pass http://phoenix_system_cluster/;
# Include headers for proxying
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Proto $forwarded_proto;
# End of headers
# Increase timeout settings for file uploads
proxy_connect_timeout 600;
proxy_send_timeout 600;
proxy_read_timeout 600;
send_timeout 600;
}
location /admin-api {
proxy_pass http://phoenix_system_cluster/admin-api;
# Include headers for proxying
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Proto $forwarded_proto;
# End of headers
}
location /remote-assets {
proxy_pass http://phoenix_system_cluster/remote-assets;
# Include headers for proxying
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Proto $forwarded_proto;
# End of headers
}
location /sti {
proxy_pass http://phoenix_system_cluster/sti;
# Include headers for proxying
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Proto $forwarded_proto;
# End of headers
}
location /ws {
proxy_pass http://phoenix_system_cluster/graphql;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
# Include headers for proxying
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Proto $forwarded_proto;
# End of headers
}
# Reverse proxy for pgAdmin (subpath support)
include /etc/nginx/includes/*.conf;
location /health/system {
proxy_pass http://phoenix_system_cluster/health;
# Secure the health check endpoint
if ($backend_whitelist = 0) {
return 403;
}
# End of security
# Include headers for proxying
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $forwarded_proto;
# End of headers
}
location /health/worker {
proxy_pass http://phoenix_worker_cluster/health;
# Secure the health check endpoint
if ($backend_whitelist = 0) {
return 403;
}
# End of security
# Include headers for proxying
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $forwarded_proto;
# End of headers
}
location /stub_status {
stub_status;
# Secure the stub status endpoint
if ($frontend_whitelist = 0) {
return 403;
}
# End of security
}
}
}

5
oom Executable file
View File

@@ -0,0 +1,5 @@
docker ps -aq | while read cid; do
if docker inspect "$cid" | grep -q '"OOMKilled": true'; then
echo "OOMKilled: $cid - $(docker inspect -f '{{.Name}}' $cid)"
fi
done

5
phoenix_backup.sql Normal file
View File

@@ -0,0 +1,5 @@
pg_dump: error: query failed: ERROR: deadlock detected
DETAIL: Process 10340 waits for AccessShareLock on relation 17813 of database 16384; blocked by process 10326.
Process 10326 waits for AccessExclusiveLock on relation 17373 of database 16384; blocked by process 10340.
HINT: See server log for query details.
pg_dump: detail: Query was: LOCK TABLE public.document IN ACCESS SHARE MODE

2296
redis.conf Normal file

File diff suppressed because it is too large Load Diff

204
server_custom/xconfig.ts Normal file
View File

@@ -0,0 +1,204 @@
/* tslint:disable:no-console */
import path from 'path';
import { ConnectionOptions } from 'typeorm';
import { WinstonLogger, RedisSessionCachePlugin, LogLevel, TypeOrmLogger, SystemConfig, DefaultJobQueuePlugin, TypeORMHealthCheckStrategy } from '@phoenix/core';
import { AssetServerPlugin } from '@phoenix/asset-server-plugin';
import { ADMIN_API_PATH, API_PORT, SHOP_API_PATH, SUPER_ADMIN_USER_IDENTIFIER, WORKER_PORT } from '@phoenix/common';
import { EmailPlugin, FileBasedTemplateLoader, defaultEmailHandlers } from '@phoenix/email-plugin';
import { BillBeePlugin } from "@phoenix/bill-bee-plugin";
import { ChannelPilotProPlugin } from "@phoenix/channel-pilot-pro-plugin";
import { ShopifyPlugin } from '@phoenix/shopify-plugin';
//import { BonnAPIPlugin } from '../plugins/bonn-api-plugin/bonn-api-plugin.module';
/**
* Config settings used during development
*/
export const customConfig: SystemConfig = {
apiOptions: {
port: API_PORT,
workerPort: WORKER_PORT,
// sslPort: API_SSL_PORT,
//sslCertPath: path.join(__dirname, '../secrets/certificate.crt'),
//sslKeyPath: path.join(__dirname, '../secrets/certificate.key'),
adminApiPath: ADMIN_API_PATH,
shopApiPath: SHOP_API_PATH,
cors: {
origin: true,
credentials: true,
},
},
authOptions: {
disableAuth: true,
sessionSecret: 'some-secret',
requireVerification: false,
tokenMethod: "bearer",
superadminCredentials: {
identifier: SUPER_ADMIN_USER_IDENTIFIER,
password: process.env.SUPER_ADMIN_USER_PASSWORD || 'superadmin'
}
},
dbConnectionOptions: {
// synchronize: true,
// logging: true,
logger: new TypeOrmLogger(),
...getDbConfig(),
// logging: ["error"]
},
// paymentOptions: {
// // paymentMethodHandlers: [examplePaymentHandler],
// },
customFields: {
Product: [
{
name: 'testo',
type: 'string',
}
],
DocumentLineItem: [
],
PostProductionDetail: [
],
},
searchableFields: {
processResource: [
"scanId"
]
},
logger: new WinstonLogger({ level: LogLevel.Debug }),
workerLogger: new WinstonLogger({ level: LogLevel.Info }),
//importExportOptions: {
// importProductAssetsDir: path.join(__dirname, 'import', 'product-assets'),
//},
defaults: {
defaultTakeNumber: 100,
},
plugins: [
RedisSessionCachePlugin.init({
namespace: 'phx-session',
redisOptions: {
host: process.env.REDIS_HOST || 'redis',
port: process.env.REDIS_PORT ? parseInt(process.env.REDIS_PORT) : 6379,
db: process.env.REDIS_DB ? parseInt(process.env.REDIS_DB) : 0,
password: process.env.REDIS_PASSWORD || 'admin'
}
}),
AssetServerPlugin.init({
route: 'remote-assets',
assetUploadDir: path.join(__dirname, 'assets'),
port: 5002,
assetUrlPrefix: "\\remote-assets\\" // to make it relative for client
}),
DefaultJobQueuePlugin.init({
useDatabaseForBuffer: true
}),
EmailPlugin.init({
sendRealEmails: true,
route: 'mailbox',
handlers: [...defaultEmailHandlers],
// Dynamic Email Templates
templateLoader: new FileBasedTemplateLoader(path.join(__dirname, '..', '../email-plugin/templates')),
globalTemplateVars: {
verifyEmailAddressUrl: 'http://localhost:4201/verify',
passwordResetUrl: 'http://localhost:4201/reset-password',
changeEmailAddressUrl: 'http://localhost:4201/change-email-address',
}
}),
BillBeePlugin.init({
active: process.env.BILL_BEE_ACTIVE === 'true'
}),
ChannelPilotProPlugin.init({
active: process.env.CHANNEL_PILOT_PRO_ACTIVE === 'true',
}),
ShopifyPlugin.init({
active: 1 === 1
}),
// DefaultStoragePlaceRankPlugin.init({})
// new DefaultSearchPlugin(),
// new ElasticsearchPlugin({
// host: 'http://192.168.99.100',
// port: 9200,
// }),
// DocusignPlugin.init({
// devMode:true,
// handlers: defaultDocusignHandlers,
// assetDownloadDir: path.join(__dirname, 'docusign'),
// assetUploadDir: path.join(__dirname, 'docusign'),
// port: API_PORT,
// route: "docusign"
// }),
// new AdminUiPlugin({
// port: 5001,
// }),
],
systemOptions: {
healthChecks: [new TypeORMHealthCheckStrategy(null, { key: 'database', timeout: 1000 })],
errorHandlers: [],
},
// ApolloEngineApiKey: "service:Logic-Bits-2900:5w1aCP5YUtF-1ErRG0KNQw"
};
function getDbConfig(): ConnectionOptions {
const dbType = process.env.DB || 'postgres';
const dbHost = process.env.DB_HOST || 'localhost';
const dbPort = +process.env.DB_PORT || 5432;
const connectionPoolMax = process.env.CONNECTION_POOL_MAX ?? 20;
const dbUsername = process.env.DB_USERNAME || 'postgres';
const password = process.env.DB_PASSWORD || 'admin';
const database = process.env.DB_NAME || 'phoenix'
if (password == "admin")
console.warn("default postgres password is used!");
if (process.env.DB_HOST)
console.log(`using DB Host ${dbHost} from env`);
console.log(`using Database ${database}`);
console.log(`using User ${dbUsername}`);
switch (dbType) {
case 'postgres':
console.log('Using postgres connection at ' + dbHost);
return {
synchronize: true,
type: 'postgres',
//host: '127.0.0.1',
host: dbHost,
port: dbPort,
username: dbUsername,
password: password,
database: database,
// logging: "all",
extra: {
max: connectionPoolMax
}
};
case 'sqlite':
console.log('Using sqlite connection');
return {
type: 'sqlite',
database: path.join(__dirname, 'phoenix.sqlite'),
};
case 'sqljs':
console.log('Using sql.js connection');
return {
type: 'sqljs',
autoSave: true,
database: new Uint8Array([]),
location: path.join(__dirname, 'phoenix.sqlite'),
};
case 'mysql':
default:
console.log('Using mysql connection');
return {
synchronize: true,
type: 'mysql',
host: '192.168.99.100',
port: 3306,
username: 'root',
password: '',
database: 'phoenix-dev',
};
}
}