Update SSL certificate, NGINX configuration, and pgAdmin setup

- Updated the SSL certificate for yuri.phx-erp.de with a new signed certificate.
- Added a new NGINX configuration file for pgAdmin reverse proxy under the subpath /pgadmin4.
- Enhanced the main NGINX configuration with improved logging, security headers, and real IP handling.
- Implemented health check endpoints for both system and worker services with IP whitelisting.
- Created a new entrypoint script for pgAdmin to manage .pgpass and servers.json configuration.
- Removed the redis.conf file and commented out Redis session caching in the configuration.
This commit is contained in:
2025-05-27 08:50:14 +00:00
parent 709362b1c0
commit 05f2f8aaa5
10 changed files with 696 additions and 228 deletions

48
.env
View File

@@ -1,50 +1,40 @@
# ===== Enviroment Stage ====== # ===== Enviroment Stage ======
ENV_MODE=production
NODE_ENV=production NODE_ENV=production
# ====== Database Configuration ====== # ====== Database Configuration ======
POSTGRES_PASSWORD=6MaBQd9ICYeZnCb POSTGRES_PASSWORD=6MaBQd9ICYeZnCb
PGADMIN_DEFAULT_PASSWORD=bET23d76xKArCqf PGADMIN_DEFAULT_PASSWORD=bET23d76xKArCqf
DB_NAME="phoenix"
DB_HOST="phoenixDB"
DB_PORT=5432
DB_USERNAME="postgres"
PGADMIN_DEFAULT_EMAIL="info@phx-erp.de"
MAIL_SERVER="mail.phx-erp.de"
MAIL_PORT=465
MAIL_USERNAME="internal@phx-erp.de "
MAIL_PASSWORD="8Kb2p4!o1"
SECURITY_EMAIL_SENDER="'No Reply PHX <no-reply@phx-erp.de>'"
# ====== Phoenix Super Admin Configuration ====== # ====== Phoenix Super Admin Configuration ======
SUPER_ADMIN_USER_PASSWORD=zrRzSW5pC4cWwroX976oXcGQU SUPER_ADMIN_USER_PASSWORD=zrRzSW5pC4cWwroX976oXcGQU
# ====== Redis Configuration ====== # ====== Redis Configuration ======
REDIS_PASSWORD=sBCuVsJkKTSErgF REDIS_PASSWORD=sBCuVsJkKTSErgF
# ====== Worker Configuration ====== # ====== Worker Configuration ======
RUN_JOB_QUEUE=1 RUN_JOB_QUEUE=1
# ====== Email Configuration ====== # ===== Metris Configuration ======
SMTP_FROM="Yuri Lima <yuri.lima@phx-erp.de>" # Loki API URL -> The IP 5.75.153.161 is the Grafana Server where it has a firewall rule to allow the connection. Please, if you change here, need to be change in NGINX too.
SMTP_TYPE='smtp' LOKI_URL=http://grafana.phx-erp.de:3100/loki/api/v1/push
SMTP_NAME='mail.phx-erp.de' LOKI_RETRIES=5
SMTP_HOST='mail.phx-erp.de' LOKI_BATCH_SIZE=500
SMTP_PORT=465 # ===== HTTPS-PORTAL Configuration ======
SMTP_SECURE='true' HTTPS_PORTAL_DOMAINS='yuri.phx-erp.de -> phoenix-app'
SMTP_USER='yuri.lima@phx-erp.de' # ====== PHX-SYSTEM Configuration ======
SMTP_PASS='0rB0@et68' PHOENIX_SYSTEM_REPLICAS=1
SMTP_LOGGING='true'
SMTP_DEBUG='true'
SMTP_TLS_REJECT_UNAUTHORIZED='false' # If true will reject self-signed certificates. It will show: Hostname/IP does not match certificate's altnames: Host: mail.phx-erp.de. is not in the cert's altnames: DNS:*.netcup.net, DNS:netcup.net trace
SMTP_SECURE_CONNECTION='true' # Not in use
# ======= GraphQl ============= # ======= GraphQl =============
GRAPHQL_DEBUG='true' GRAPHQL_DEBUG='true'
GRAPHQL_TRACING='true' GRAPHQL_TRACING='true'
# ======= Integrations ============= # ======= Integrations =============
# Bill Bee # Bill Bee
BILL_BEE_ACTIVE='true' BILL_BEE_ACTIVE='true'
BILL_BEE_API_KEY='200EEBAD-06E8-4184-B430-3428D6447B92'
BILL_BEE_API_USERNAME="yuri.lima"
BILL_BEE_API_SECRET='YTB6tgm.dzb0ntf@zqr'
BILL_BEE_API_URL='https://api.billbee.io/api/v1'
# Channel Pilot Pro # Channel Pilot Pro
CHANNEL_PILOT_PRO_ACTIVE='true' CHANNEL_PILOT_PRO_ACTIVE='true'
CHANNEL_PILOT_PRO_URL='https://capi.channelpilot.com'
CHANNEL_PILOT_PRO_API_MERCHANT_ID=''
CHANNEL_PILOT_PRO_API_TOKEN=''
CHANNEL_PILOT_PRO_ACCESS_TOKEN=''
CHANNEL_PILOT_PRO_EXPIRED_AT=''
# Shopify # Shopify
SHOPIFY_ACTIVE='true' SHOPIFY_ACTIVE='true'
SHOPIFY_HOST_NAME='https://phxerpdev.myshopify.com/admin/api/2024-10/graphql.json'
SHOPIFY_API_KEY='159142eaee1b747e5cb084cc77564b3e'
SHOPIFY_API_SECRET='1be9e99cad669092247f8735da3e0570'
SHOPIFY_TOKEN='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'
SHOPIFY_HOST_SCHEME='https'
SHOPIFY_IS_EMBEDDED_APP='true'

1
.gitignore vendored
View File

@@ -7,3 +7,4 @@ e2e
https_portal/log https_portal/log
https_portal/data https_portal/data
assets assets
pgadmin/data

View File

View File

@@ -1,165 +1,287 @@
--- ---
services: services:
postgres: postgres:
restart: always restart: always
image: "postgres:15.6-alpine" image: "postgres:15.1-alpine"
container_name: phoenixDB container_name: phoenixDB # Hostname
# logging:
# driver: loki
# options:
# loki-url: "${LOKI_URL}"
# loki-retries: "${LOKI_RETRIES}"
# loki-batch-size: "${LOKI_BATCH_SIZE}"
# loki-external-labels: "service=phx-postgres,env=prod"
networks: networks:
- backend - backend
ports:
- "5432:5432"
environment: environment:
- PGUSER=postgres DEBUG: true
- DEBUG=false POSTGRES_DB: ${DB_NAME}
- POSTGRES_DB=phoenix POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
- "POSTGRES_PASSWORD=${POSTGRES_PASSWORD}"
volumes: volumes:
- "./database:/var/lib/postgresql/data" - "./database:/var/lib/postgresql/data"
- "./database/pg_hba.conf:/etc/postgresql/pg_hba.conf" # Correct location
command: ["postgres", "-c", "hba_file=/etc/postgresql/pg_hba.conf"] # ✅ Tell PostgreSQL where to find it
healthcheck: healthcheck:
test: test: [ "CMD-SHELL", "pg_isready -U postgres" ]
- CMD-SHELL interval: 5s # Time between each health check
- pg_isready -U postgres timeout: 2s # Number of failures before marking as unhealthy
interval: 5s retries: 5 # Grace period before health checks start
timeout: 2s
retries: 5
pgadmin: pgadmin:
image: dpage/pgadmin4 image: dpage/pgadmin4
container_name: pgadmin_container container_name: pgAdmin4_Ui
user: "5050:5050"
# logging:
# driver: loki
# options:
# loki-url: "${LOKI_URL}"
# loki-retries: "${LOKI_RETRIES}"
# loki-batch-size: "${LOKI_BATCH_SIZE}"
# loki-external-labels: "service=phx-pgadmin,env=prod"
networks: networks:
- backend - backend
- frontend - frontend
environment: environment:
PGADMIN_DEFAULT_EMAIL: "pgadmin4@pgadmin.org" PGADMIN_DEFAULT_EMAIL: ${PGADMIN_DEFAULT_EMAIL}
PGADMIN_DEFAULT_PASSWORD: ${PGADMIN_DEFAULT_PASSWORD} PGADMIN_DEFAULT_PASSWORD: ${SUPER_ADMIN_USER_PASSWORD}
PGADMIN_CONFIG_SERVER_MODE: 'False' PGADMIN_CONFIG_SERVER_MODE: 'True'
ports: PGADMIN_CONFIG_WSGI_SCRIPT_NAME: "'/pgadmin4'"
- "${PGADMIN_PORT:-5050}:80" PGADMIN_CONFIG_PROXY_X_PROTO_COUNT: 1
PGADMIN_SERVER_JSON_FILE: '/var/lib/pgadmin/servers.json'
PGADMIN_REPLACE_SERVERS_ON_STARTUP: 'True'
PGADMIN_CONFIG_DATA_DIR: "'/var/lib/pgadmin'"
PGADMIN_CONFIG_MASTER_PASSWORD_REQUIRED: 'False'
# pgpass dynamic vars
PGPASSFILE: /var/lib/pgadmin/pgpass
PGPASS_HOST: ${DB_HOST}
PGPASS_PORT: ${DB_PORT}
PGPASS_DB: ${DB_NAME}
PGPASS_USER: ${DB_USERNAME}
PGPASS_PASSWORD: ${POSTGRES_PASSWORD}
# Other config
ALLOW_SAVE_PASSWORD: 'False'
MFA_ENABLED: 'True'
MFA_FORCE_REGISTRATION: 'False'
MFA_SUPPORTED_METHODS: 'email'
MFA_EMAIL_SUBJECT: 'Your MFA code by PHX-ERP'
MAX_LOGIN_ATTEMPTS: 5
ENHANCED_COOKIE_PROTECTION: 'True'
SHOW_GRAVATAR_IMAGE: 'True'
SECURITY_EMAIL_SENDER: ${SECURITY_EMAIL_SENDER}
MAIL_SERVER: ${MAIL_SERVER}
MAIL_PORT: ${MAIL_PORT}
MAIL_USE_SSL: 'False'
MAIL_USE_TLS: 'False'
MAIL_USERNAME: ${MAIL_USERNAME}
MAIL_PASSWORD: ${MAIL_PASSWORD}
MAIL_DEBUG: 'False'
volumes: volumes:
- "pgadmin:/var/lib/pgadmin" - ./pgadmin/data:/var/lib/pgadmin
- ./pgadmin/pgadmin-entrypoint.sh:/docker-entrypoint.sh:ro
entrypoint: ["/bin/sh", "/docker-entrypoint.sh"]
depends_on:
postgres:
condition: service_healthy
healthcheck:
test: ["CMD", "wget", "-O", "-", "http://localhost:80/misc/ping"]
interval: 15s
timeout: 10s
retries: 5
start_period: 60s
phoenix-app: phoenix-app:
restart: always restart: always
image: "yurimatoslima/phoenix-frontend:alpha" image: "yurimatoslima/phoenix-frontend:alpha"
container_name: phoenixApp container_name: phoenixApp
ports:
- "3000:3000" # Restrict to only allow access from Grafana Server IP
# logging:
# driver: loki
# options:
# loki-url: "${LOKI_URL}"
# loki-retries: "${LOKI_RETRIES}"
# loki-batch-size: "${LOKI_BATCH_SIZE}"
# loki-external-labels: "service=phx-app,env=prod,project=phoenix"
volumes: volumes:
- "./app_custom:/usr/share/nginx/html/assets/custom" - ./app_custom:/usr/share/nginx/html/assets/custom
- ./nginx/nginx.conf:/etc/nginx/nginx.conf
- ./nginx/includes:/etc/nginx/includes:ro
networks: networks:
- backend # primary network [external] - backend
- frontend # internal network - frontend
depends_on: depends_on:
- phoenix-system pgadmin:
condition: service_healthy
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "http://phoenixApp/login"] # localhost checks that the NGINX server inside the container is serving something at the root
interval: 10s # check every 10 seconds
timeout: 5s # allow 5 seconds per check
retries: 5 # mark as unhealthy after 5 failures
start_period: 15s # wait 15 seconds after container start before checking
phoenix-system: phoenix-system:
restart: always restart: always
image: "yurimatoslima/phoenix-backend:alpha" image: "yurimatoslima/phoenix-backend:alpha"
# logging:
# driver: loki
# options:
# loki-url: "${LOKI_URL}"
# loki-retries: "${LOKI_RETRIES}"
# loki-batch-size: "${LOKI_BATCH_SIZE}"
# loki-external-labels: "service=phoenix-system,env=prod"
environment: environment:
- "DB_HOST=phoenixDB" - "DB_HOST=phoenixDB"
- "DB_NAME=${DB_NAME}"
- "DB_PASSWORD=${POSTGRES_PASSWORD}" - "DB_PASSWORD=${POSTGRES_PASSWORD}"
- "DB_USERNAME=postgres" - "DB_USERNAME=postgres"
- "SUPER_ADMIN_USER_PASSWORD=${SUPER_ADMIN_USER_PASSWORD}" - "SUPER_ADMIN_USER_PASSWORD=${SUPER_ADMIN_USER_PASSWORD}"
- "REDIS_PASSWORD=${REDIS_PASSWORD}" - "REDIS_PASSWORD=${REDIS_PASSWORD}"
- RUN_JOB_QUEUE=${RUN_JOB_QUEUE} - RUN_JOB_QUEUE=${RUN_JOB_QUEUE}
- SMTP_FROM=${SMTP_FROM}
- SMTP_TYPE=${SMTP_TYPE}
- SMTP_NAME=${SMTP_NAME}
- SMTP_HOST=${SMTP_HOST}
- SMTP_PORT=${SMTP_PORT}
- SMTP_SECURE=${SMTP_SECURE}
- SMTP_USER=${SMTP_USER}
- SMTP_PASS=${SMTP_PASS}
- SMTP_LOGGING=${SMTP_LOGGING}
- SMTP_DEBUG=${SMTP_DEBUG}
- SMTP_TLS_REJECT_UNAUTHORIZED=${SMTP_TLS_REJECT_UNAUTHORIZED}
- SMTP_SECURE_CONNECTION=${SMTP_SECURE_CONNECTION}
- ENV_MODE=${ENV_MODE}
- NODE_ENV=${NODE_ENV} - NODE_ENV=${NODE_ENV}
- SMTP_TLS_CIPHERS={SMTP_TLS_CIPHERS}
- BILL_BEE_ACTIVE=${BILL_BEE_ACTIVE}
- BILL_BEE_API_KEY=${BILL_BEE_API_KEY}
- BILL_BEE_API_USERNAME=${BILL_BEE_API_USERNAME}
- BILL_BEE_API_SECRET=${BILL_BEE_API_SECRET}
- BILL_BEE_API_URL=${BILL_BEE_API_URL}
- CHANNEL_PILOT_PRO_ACTIVE=${CHANNEL_PILOT_PRO_ACTIVE}
- CHANNEL_PILOT_PRO_URL=${CHANNEL_PILOT_PRO_URL}
- CHANNEL_PILOT_PRO_API_MERCHANT_ID=${CHANNEL_PILOT_PRO_API_MERCHANT_ID}
- CHANNEL_PILOT_PRO_API_TOKEN=${CHANNEL_PILOT_PRO_API_TOKEN}
- CHANNEL_PILOT_PRO_ACCESS_TOKEN=${CHANNEL_PILOT_PRO_ACCESS_TOKEN}
- CHANNEL_PILOT_PRO_EXPIRED_AT=${CHANNEL_PILOT_PRO_EXPIRED_AT}
- SHOPIFY_ACTIVE=${SHOPIFY_ACTIVE}
- SHOPIFY_HOST_NAME=${SHOPIFY_HOST_NAME}
- SHOPIFY_API_KEY=${SHOPIFY_API_KEY}
- SHOPIFY_API_SECRET=${SHOPIFY_API_SECRET}
- SHOPIFY_HOST_SCHEME=${SHOPIFY_HOST_SCHEME}
- SHOPIFY_IS_EMBEDDED_APP=${SHOPIFY_IS_EMBEDDED_APP}
command: ["npm", "run", "start:server"] command: ["npm", "run", "start:server"]
deploy: deploy:
replicas: 1 #change here if u want to have more replicas. Cant find a way to set via variable right now replicas: ${PHOENIX_SYSTEM_REPLICAS} #change here if u want to have more replicas. Cant find a way to set via variable right now
networks: networks:
- backend backend:
aliases:
- phoenix-system
depends_on: depends_on:
postgres: postgres:
condition: service_healthy condition: service_healthy
phoenix-redis: phoenix-redis:
condition: service_healthy condition: service_healthy
healthcheck:
test: ["CMD-SHELL", "curl -s http://phoenix-system:3000/health | grep -q '\"admin-api\":{\"status\":\"up\"}' && curl -s http://phoenix-system:3000/health | grep -q '\"database\":{\"status\":\"up\"}'"] # Checks both admin-api and database status
interval: 10s # Time between each health check
timeout: 6s # Max time to wait for each check
retries: 10 # Number of failures before marking as unhealthy
start_period: 40s # Grace period before health checks start
volumes: volumes:
- "./logs:/usr/src/app/packages/dev-server/logs"
- "./assets:/usr/src/app/packages/dev-server/assets" - "./assets:/usr/src/app/packages/dev-server/assets"
- "./server_custom:/usr/src/app/packages/dev-server/custom" - "./server_custom:/usr/src/app/packages/dev-server/custom"
# - "./logs:/usr/src/app/packages/dev-server/logs"
phoenix-worker: phoenix-worker:
restart: always restart: always
image: "yurimatoslima/phoenix-backend:alpha" image: "yurimatoslima/phoenix-backend:alpha"
container_name: "phoenix-worker"
ports:
- "3001:3001" # Restrict to only allow access from Grafana Server IP
# logging:
# driver: loki
# options:
# loki-url: "${LOKI_URL}"
# loki-retries: "${LOKI_RETRIES}"
# loki-batch-size: "${LOKI_BATCH_SIZE}"
# loki-external-labels: "service=phx-worker,env=prod"
networks: networks:
- backend - backend
environment: environment:
- DB_HOST=phoenixDB - DB_HOST=phoenixDB
- "DB_NAME=${DB_NAME}"
- "DB_PASSWORD=${POSTGRES_PASSWORD}" - "DB_PASSWORD=${POSTGRES_PASSWORD}"
- DB_USERNAME=postgres - DB_USERNAME=postgres
- "SUPER_ADMIN_USER_PASSWORD=${SUPER_ADMIN_USER_PASSWORD}" - "SUPER_ADMIN_USER_PASSWORD=${SUPER_ADMIN_USER_PASSWORD}"
- REDIS_PASSWORD=${REDIS_PASSWORD} - REDIS_PASSWORD=${REDIS_PASSWORD}
# command: ["npm", "run", "start:worker"] - NODE_ENV=${NODE_ENV}
entrypoint: ./entrypoint-phoenix-worker.sh
depends_on: depends_on:
phoenix-system:
condition: service_healthy
postgres: postgres:
condition: service_healthy condition: service_healthy
healthcheck:
test: [ "CMD-SHELL", "curl -s http://phoenix-worker:3001/health | grep -q '\"status\":\"ok\"'" ] # Check if worker responds with status ok
interval: 10s # Time between each health check
timeout: 6s # Max time to wait for each check
retries: 20 # Grace period before health checks start
start_period: 30s # Grace period before health checks start
volumes: volumes:
- "./assets:/usr/src/app/packages/dev-server/assets" - "./assets:/usr/src/app/packages/dev-server/assets"
- "./server_custom:/usr/src/app/packages/dev-server/custom" - "./server_custom:/usr/src/app/packages/dev-server/custom"
- "./logs:/usr/src/app/packages/dev-server/logs" # - "./logs:/usr/src/app/packages/dev-server/logs"
phoenix-redis: phoenix-redis:
image: 'bitnami/redis:latest' image: 'bitnami/redis:latest'
container_name: redis container_name: redis
command: /opt/bitnami/scripts/redis/run.sh --maxmemory 100mb --dir /bitnami/redis/data command: /opt/bitnami/scripts/redis/run.sh --maxmemory 100mb
user: 1001:1001 # Non-root user in Bitnami images The /bitnami/redis/data directory inside the container is already owned by 1001, avoiding permission issues. user: root
# logging:
# driver: loki
# options:
# loki-url: "${LOKI_URL}"
# loki-retries: "${LOKI_RETRIES}"
# loki-batch-size: "${LOKI_BATCH_SIZE}"
# loki-external-labels: "service=phx-redis,env=prod"
networks: networks:
- backend - backend
restart: always restart: always
environment: environment:
ALLOW_EMPTY_PASSWORD: "no" ALLOW_EMPTY_PASSWORD: "no"
REDIS_DISABLE_COMMANDS: FLUSHDB,FLUSHALL,CONFIG
REDIS_PASSWORD: ${REDIS_PASSWORD} REDIS_PASSWORD: ${REDIS_PASSWORD}
healthcheck: healthcheck:
test: [ "CMD", "redis-cli", "--raw", "incr", "ping" ] test: [ "CMD", "redis-cli", "--raw", "incr", "ping" ]
interval: 5s interval: 5s
retries: 10 # Increase retries if Redis takes a while to start retries: 10 # Increase retries if Redis takes a while to start
timeout: 5s # Increase timeout if needed timeout: 5s # Increase timeout if needed
volumes:
- "./redis/data:/bitnami/redis/data"
- /opt/phx/redis/tmp:/opt/bitnami/redis/tmp # ✅ Fix permission issue
- /opt/phx/redis/logs:/opt/bitnami/redis/logs # ✅ Fix logs permission issue
- ./redis.conf:/opt/bitnami/redis/etc/redis.conf # ✅ Use a writable redis.conf
depends_on: depends_on:
postgres: postgres:
condition: service_healthy condition: service_healthy
node_exporter: volumes:
- "./redis/data:/bitnami/redis/data"
phoenix-health-exporter:
image: phxerp/phoenix-health-exporter:alpha
container_name: health_exporter
restart: unless-stopped
# logging:
# driver: loki
# options:
# loki-url: "${LOKI_URL}"
# loki-retries: "${LOKI_RETRIES}"
# loki-batch-size: "${LOKI_BATCH_SIZE}"
# loki-external-labels: "service=phx-health-exporter,env=prod"
ports:
- "9800:9800"
environment:
DB_HOST: ${DB_HOST}
DB_NAME: ${DB_NAME}
DB_PASSWORD: ${POSTGRES_PASSWORD}
DB_USERNAME: ${DB_USERNAME}
networks:
- frontend
- backend
volumes:
- /etc/hostname:/etc/host_hostname:ro # This ensures the container always uses the real machine hostname, even if restarted or recreated.
security_opt:
- no-new-privileges:true
deploy:
resources:
limits:
cpus: '0.25'
memory: 128M
depends_on:
phoenix-system:
condition: service_healthy
phoenix-worker:
condition: service_healthy
postgres:
condition: service_healthy
healthcheck:
test: ["CMD-SHELL", "curl -sf http://localhost:9800/healthz || exit 1"]
interval: 1m
timeout: 5s
retries: 3
start_period: 15s
node-exporter:
image: quay.io/prometheus/node-exporter:latest image: quay.io/prometheus/node-exporter:latest
container_name: node_exporter container_name: node_exporter
# logging:
# driver: loki
# options:
# loki-url: "${LOKI_URL}"
# loki-retries: "${LOKI_RETRIES}"
# loki-batch-size: "${LOKI_BATCH_SIZE}"
# loki-external-labels: "service=phx-node-exporter,env=prod"
networks: networks:
- metrics - metrics
- frontend
restart: unless-stopped restart: unless-stopped
ports: ports:
- "9100:9100" - "9100:9100" # Restrict to only allow access from Grafana Server IP
command: command:
- "--path.procfs=/host/proc" - "--path.procfs=/host/proc"
- "--path.sysfs=/host/sys" - "--path.sysfs=/host/sys"
@@ -169,13 +291,66 @@ services:
- "/proc:/host/proc:ro" - "/proc:/host/proc:ro"
- "/sys:/host/sys:ro" - "/sys:/host/sys:ro"
- "/:/host:ro,rslave" - "/:/host:ro,rslave"
security_opt:
- no-new-privileges:true
deploy:
resources:
limits:
cpus: '0.25'
memory: 128M
healthcheck:
test: ["CMD", "wget", "-qO-", "http://localhost:9100/metrics"]
interval: 15s
timeout: 5s
retries: 3
start_period: 20s
# nginx-exporter:
# image: nginx/nginx-prometheus-exporter:1.4.2
# container_name: nginx_exporter
# restart: unless-stopped
# # logging:
# # driver: loki
# # options:
# # loki-url: "${LOKI_URL}"
# # loki-retries: "${LOKI_RETRIES}"
# # loki-batch-size: "${LOKI_BATCH_SIZE}"
# # loki-external-labels: "service=phx-nginx-exporter,env=prod"
# ports:
# - "9113:9113" # Restrict to only allow access from Grafana Server IP
# command:
# - '--nginx.scrape-uri=http://phoenixApp/stub_status'
# security_opt:
# - no-new-privileges:true
# deploy:
# resources:
# limits:
# cpus: '0.25'
# memory: 128M
# depends_on:
# phoenix-app:
# condition: service_healthy
# networks:
# - frontend
# - metrics
# healthcheck:
# test: ["CMD", "wget", "-qO-", "http://localhost:9113/metrics"] # Not working as expected
# interval: 15s
# timeout: 5s
# retries: 3
# start_period: 10s
https_portal: https_portal:
container_name: https_portal container_name: https_portal
image: "steveltn/https-portal:1.21" image: "steveltn/https-portal:1.21"
restart: unless-stopped restart: unless-stopped
# logging:
# driver: loki
# options:
# loki-url: "${LOKI_URL}"
# loki-retries: "${LOKI_RETRIES}"
# loki-batch-size: "${LOKI_BATCH_SIZE}"
# loki-external-labels: "service=phx-https-portal,env=prod"
networks: networks:
- frontend # [ PgAdmin, Phoenix-App ] - frontend # [ PgAdmin, Phoenix-App ]
- external # [ Outside of the World]
ports: ports:
- "80:80" - "80:80"
- "443:443" - "443:443"
@@ -186,18 +361,18 @@ services:
DEBUG: "true" DEBUG: "true"
RENEW_MARGIN_DAYS: 30 RENEW_MARGIN_DAYS: 30
CLIENT_MAX_BODY_SIZE: 0 CLIENT_MAX_BODY_SIZE: 0
SERVER_NAMES_HASH_BUCKET_SIZE: 128 # Increase hash bucket size for server names - good for bigger domains names, if not set correctly, it will throw an error, break the container.
# FORCE_RENEW: 'true' # FORCE_RENEW: 'true'
DOMAINS: 'yuri.phx-erp.de -> phoenix-app' DOMAINS: "${HTTPS_PORTAL_DOMAINS}"
volumes: volumes:
- ./https_portal/data:/var/lib/https-portal # ssl_certs, vhost.d, htdocs - ./https_portal/data:/var/lib/https-portal # ssl_certs, vhost.d, htdocs
- ./https_portal/log:/var/log/nginx # nginx logs - ./https_portal/log:/var/log/nginx # nginx logs
- ./https_portal/config/custom_nginx.conf:/opt/custom_nginx.conf:ro # ✅ Mount file in a safe path # - ./https_portal/config/custom_nginx.conf:/opt/custom_nginx.conf:ro # ✅ Mount file in a safe path
depends_on: depends_on:
- phoenix-app pgadmin:
- phoenix-system condition: service_healthy
- pgadmin postgres:
- phoenix-redis condition: service_healthy
- postgres
fail2ban: fail2ban:
image: crazymax/fail2ban:latest image: crazymax/fail2ban:latest
container_name: fail2ban container_name: fail2ban
@@ -210,6 +385,7 @@ services:
- ./fail2ban/jail.d:/etc/fail2ban/jail.d - ./fail2ban/jail.d:/etc/fail2ban/jail.d
- /var/log:/var/log:ro - /var/log:/var/log:ro
restart: always restart: always
networks: networks:
backend: backend:
driver: bridge driver: bridge
@@ -224,17 +400,10 @@ networks:
ipam: ipam:
config: config:
- subnet: 172.20.0.0/16 - subnet: 172.20.0.0/16
external:
driver: bridge
external: false
metrics: metrics:
driver: bridge driver: bridge
external: false external: false
ipam: ipam:
config: config:
- subnet: 172.22.0.0/16 - subnet: 172.22.0.0/16
volumes:
pgadmin: null

View File

@@ -1,31 +1,31 @@
-----BEGIN CERTIFICATE----- -----BEGIN CERTIFICATE-----
MIIE7TCCA9WgAwIBAgISBN9fSOkvS54NMLTgeStNS2oTMA0GCSqGSIb3DQEBCwUA MIIE+zCCA+OgAwIBAgISBeKK1knRhuU1oi3S5b7is2lPMA0GCSqGSIb3DQEBCwUA
MDMxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1MZXQncyBFbmNyeXB0MQwwCgYDVQQD MDMxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1MZXQncyBFbmNyeXB0MQwwCgYDVQQD
EwNSMTEwHhcNMjUwMTA4MDMyMzU2WhcNMjUwNDA4MDMyMzU1WjAaMRgwFgYDVQQD EwNSMTEwHhcNMjUwNTA4MTUxNTE3WhcNMjUwODA2MTUxNTE2WjAaMRgwFgYDVQQD
Ew95dXJpLnBoeC1lcnAuZGUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB Ew95dXJpLnBoeC1lcnAuZGUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB
AQCkoSgHOsFmtqy02FZoJflGQlqx2Lc9WP12L5i5D/hutP8gpMoEfq6WyHD7Gn25 AQCkoSgHOsFmtqy02FZoJflGQlqx2Lc9WP12L5i5D/hutP8gpMoEfq6WyHD7Gn25
LR2BTn8ceqMMoArfJs6SsEXT7xdbmWSS9r4pWtbpZLWO/jwtYBbg/lwCTJUbiIvD LR2BTn8ceqMMoArfJs6SsEXT7xdbmWSS9r4pWtbpZLWO/jwtYBbg/lwCTJUbiIvD
wwRLtjP+xVlwfuslkgcEdPCD9CaigGkhcLQKgzL2hhwYwBMaA94MVX0rhd8w66zA wwRLtjP+xVlwfuslkgcEdPCD9CaigGkhcLQKgzL2hhwYwBMaA94MVX0rhd8w66zA
cwMcwo8VWDb0PwD2TAJqBXupmjMQ8XIob57rC0drO9175Wp2UX13W3m/NaOylKyU cwMcwo8VWDb0PwD2TAJqBXupmjMQ8XIob57rC0drO9175Wp2UX13W3m/NaOylKyU
Ct7uoClu/LtBXdXG0TmKTSlYdlEkwyWJYBNEnog0QWbsxl4PCaRaMianjgln5BGt Ct7uoClu/LtBXdXG0TmKTSlYdlEkwyWJYBNEnog0QWbsxl4PCaRaMianjgln5BGt
rmBUApxH0qm+Ct1Svw5HdB9tAgMBAAGjggISMIICDjAOBgNVHQ8BAf8EBAMCBaAw rmBUApxH0qm+Ct1Svw5HdB9tAgMBAAGjggIgMIICHDAOBgNVHQ8BAf8EBAMCBaAw
HQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwHQYD HQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwHQYD
VR0OBBYEFEKJ54GyFoST7ssNgV+4Qn/QEubyMB8GA1UdIwQYMBaAFMXPRqTq9MPA VR0OBBYEFEKJ54GyFoST7ssNgV+4Qn/QEubyMB8GA1UdIwQYMBaAFMXPRqTq9MPA
emyVxC2wXpIvJuO5MFcGCCsGAQUFBwEBBEswSTAiBggrBgEFBQcwAYYWaHR0cDov emyVxC2wXpIvJuO5MDMGCCsGAQUFBwEBBCcwJTAjBggrBgEFBQcwAoYXaHR0cDov
L3IxMS5vLmxlbmNyLm9yZzAjBggrBgEFBQcwAoYXaHR0cDovL3IxMS5pLmxlbmNy L3IxMS5pLmxlbmNyLm9yZy8wGgYDVR0RBBMwEYIPeXVyaS5waHgtZXJwLmRlMBMG
Lm9yZy8wGgYDVR0RBBMwEYIPeXVyaS5waHgtZXJwLmRlMBMGA1UdIAQMMAowCAYG A1UdIAQMMAowCAYGZ4EMAQIBMC4GA1UdHwQnMCUwI6AhoB+GHWh0dHA6Ly9yMTEu
Z4EMAQIBMIIBAwYKKwYBBAHWeQIEAgSB9ASB8QDvAHUAouMK5EXvva2bfjjtR2d3 Yy5sZW5jci5vcmcvODAuY3JsMIIBBQYKKwYBBAHWeQIEAgSB9gSB8wDxAHYAzPsP
U9eCW4SU1yteGyzEuVCkR+cAAAGURCZHUAAABAMARjBEAiAxBCBQSJT8mWTph0yI aoVxCWX+lZtTzumyfCLphVwNl422qX5UwP5MDbAAAAGWsKyn1QAABAMARzBFAiBz
HQsoR6RFyMExrihZhIKpkMoRwwIgftlv5sQPZnto1KMuwrKwQXUdMej2Pb2/QgUM v6PsRPAwXWcYu6BOQl5QmzzI6BP9Jl/t+teqR7rVgAIhAPrl3JjVjIPBuWh/LvfR
eIyplEQAdgDM+w9qhXEJZf6Vm1PO6bJ8IumFXA2XjbapflTA/kwNsAAAAZREJkdh SSH1bJSLVuZpP/czfG1yZI+XAHcArxgaKNaMo+CpikycZ6sJ+Lu8IrquvLE4o6Gd
AAAEAwBHMEUCIG02M+HEsqp2J7GT9Lkce/1FJKyFOo3lupETe/wvtfzQAiEA7PnP 0/m2Aw0AAAGWsKyqywAABAMASDBGAiEArUQdsL6CsB9cBR2ZaXd5yOC3VypYyOmp
MRZ481CHAg6HMYBv/lcSCBBOsjt6NASP8ZIgNvQwDQYJKoZIhvcNAQELBQADggEB ZQPD/iPIOP8CIQDt0R00Uq3MTybefEXwTo34ixkQyN9vw/xh2OkbYYBP8DANBgkq
AFAeYvfig+Eb9IZlLxAXVKpnnIc11D3Tyvfe/c3YNbv2krXWd7n64TMEdE8IqzLl hkiG9w0BAQsFAAOCAQEAGxx7PNuGrMyX530iBmPfUtmRXBLmsVUmj1jalbUzx//i
Ew/2R7v7Zm8dsgnmWET2TGDT0O6ZAzeYTictTqaYkg0WMGGq1gfovjUt6E3aGhYm oFxJ7DWAau8MZHPF+tSvrbReKjCFgpkOfSdaSMubWHlAPaSMP6NGZUwmLt7jApke
TaacT0ypXm6zE0JpotXkJESNbfYx+zO0VNTCxYtfcTxeGFvqG41ZljvB5tWx2ODU qzYKgYWhovh/J8uYgQ7KEDPJeXYeDIHbmnyyHtgxI0eXKlpN3hgQiIxC2Q9JZc7+
dlYh9omk1OnIgxY6LCdNdhIpNIfcswx0FN6dLc4hNIlZeUwAznao0/DB7M9kKKbL enktsKskPWpwHNxVPHYKF9VGbFMdOxBjr6wSRecmzD3lGXv0O0r9e84ULSfmK8KQ
JEISF1PD7+qgBCOLnKxylYx+aV3Bmg9jaUzySB2j95MLJirPYqqDa4ObU1UKa9v0 a+TCZnwVM1tTNMm5TMKRa79nQE0+3R2wsXrqj2PfFtjS2haJgBbSQfSSqWlcft9C
RWOYa6/PjG44rdUjvU6GsLY= WPuFNj8uG3ZVjqw7uOXjE6hy1AqdJbDt2gBSEB3gIA==
-----END CERTIFICATE----- -----END CERTIFICATE-----
-----BEGIN CERTIFICATE----- -----BEGIN CERTIFICATE-----

View File

@@ -0,0 +1,15 @@
# pgAdmin reverse proxy (under subpath)
location /pgadmin4 {
proxy_pass http://pgAdmin4_Ui/;
proxy_set_header X-Script-Name /pgadmin4;
proxy_set_header X-Scheme $scheme;
proxy_set_header X-Forwarded-Proto $forwarded_proto;
# Include headers for proxying
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Proto $forwarded_proto;
# End of headers
proxy_redirect off;
}

View File

@@ -1,126 +1,392 @@
worker_processes 1; # Main process configuration
worker_processes 1;
events { events {
worker_connections 1024; worker_connections 1024;
} }
http { http {
geo $frontend_whitelist {
default 1;
127.0.0.1 1;
172.20.0.0/16 1; # Frontend Docker subnet
5.75.153.161 1; # Grafana or monitoring
167.235.254.4 1; # Ansible server IP
}
geo $backend_whitelist {
default 1;
127.0.0.1 1;
172.19.0.0/16 1; # Backend Docker subnet
5.75.153.161 1; # Grafana or monitoring
167.235.254.4 1; # Ansible server IP
}
# These settings ensure that $remote_addr reflects the real client IP forwarded by https-portal, which is needed for your allow rules to work correctly
# Recommended for resolving client IP behind proxy
# Docker networks where both frontend and backend containers communicate through NGINX.
# To avoid potential misclassification of real client IPs from backend routes.
# The set_real_ip_from directive doesnt allow access — it just instructs NGINX to trust the X-Forwarded-For header from those IPs.
set_real_ip_from 172.20.0.0/16; # Replace with your Docker network subnet (matches your `frontend` network)
set_real_ip_from 172.19.0.0/16; # Replace with your Docker network subnet (matches your `backend` network)
real_ip_header X-Forwarded-For;
real_ip_recursive on;
resolver 127.0.0.11 valid=10s;
resolver_timeout 5s;
upstream phoenix_system_cluster {
zone phoenix_system_cluster 64k;
least_conn;
server phoenix-system:3000 resolve fail_timeout=1s max_fails=0;
# ADD_SYSTEM_SERVERS_HERE
}
upstream phoenix_worker_cluster {
zone phoenix_worker_cluster 64k;
least_conn;
server phoenix-worker:3001 resolve fail_timeout=1s max_fails=0;
# ADD_WORKER_SERVERS_HERE
}
server_tokens off; # Disable NGINX version tokens to avoid leaking NGINX version.
# File handling & upload limits
sendfile on; sendfile on;
client_max_body_size 64m; client_max_body_size 64m;
#client_body_temp_path /data/temp;
# Prevent warning when setting many proxy headers, like we do
proxy_headers_hash_max_size 1024;
proxy_headers_hash_bucket_size 128;
# Gzip compression (for better bandwidth efficiency)
gzip on;
gzip_min_length 1000;
gzip_proxied expired no-cache no-store private auth;
gzip_types text/plain text/css application/json application/javascript application/x-javascript text/xml application/xml application/xml+rss text/javascript;
# Trust the protocol from upstream proxy/load balancer
map $http_x_forwarded_proto $forwarded_proto {
default $scheme;
https https;
http http;
}
# File types and default mime type
include /etc/nginx/mime.types;
default_type application/octet-stream;
# 🧩 Logs
map $request_uri $loggable {
default 1;
~^/stub_status 0;
~^/health/system 0;
~^/health/worker 0;
}
log_format main_with_realip '$remote_addr - $realip_remote_addr [$time_local] '
'"$request" $status $body_bytes_sent '
'"$http_referer" "$http_user_agent"';
log_format json_compatible escape=json '{'
'"time":"$time_iso8601",'
'"remote_addr":"$remote_addr",'
'"proxy_addr":"$proxy_protocol_addr",'
'"x_forwarded_for":"$http_x_forwarded_for",'
'"request_method":"$request_method",'
'"request_uri":"$request_uri",'
'"status":$status,'
'"body_bytes_sent":$body_bytes_sent,'
'"request_time":$request_time,'
'"upstream_response_time":"$upstream_response_time",'
'"http_referer":"$http_referer",'
'"http_user_agent":"$http_user_agent",'
'"host":"$host",'
'"realip":"$realip_remote_addr"'
'}';
access_log /var/log/nginx/access_json.log json_compatible if=$loggable; # JSON format for Loki
access_log /var/log/nginx/access.log main_with_realip if=$loggable;
# End of logs
##################################################################
# 🧩 HTTP Server Block
##################################################################
server { server {
listen 80; listen 80;
server_name localhost; server_name _;
root /usr/share/nginx/html; # Security headers
index index.html index.htm; add_header X-Frame-Options "SAMEORIGIN" always;
include /etc/nginx/mime.types; add_header X-XSS-Protection "1; mode=block" always;
add_header X-Content-Type-Options "nosniff" always;
add_header Referrer-Policy "no-referrer-when-downgrade" always;
gzip on; root /usr/share/nginx/html;
gzip_min_length 1000; index index.html index.htm;
gzip_proxied expired no-cache no-store private auth;
gzip_types text/plain text/css application/json application/javascript application/x-javascript text/xml application/xml application/xml+rss text/javascript;
sendfile on;
client_max_body_size 64m;
# Frontend SPA fallback
location / { location / {
try_files $uri $uri/ /index.html; try_files $uri $uri/ /index.html;
} }
# https://serverfault.com/questions/379675/nginx-reverse-proxy-url-rewrite
# Backend API routes
location /backend-api/ { location /backend-api/ {
#rewrite ^/backend-api(.*) /$1 break; proxy_pass http://phoenix_system_cluster/;
proxy_pass http://phoenix-system:3000/; # Include headers for proxying
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host; proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Proto $forwarded_proto;
# End of headers
} }
location /admin-api { location /admin-api {
proxy_pass http://phoenix-system:3000/admin-api; proxy_pass http://phoenix_system_cluster/admin-api;
# Include headers for proxying
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host; proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Proto $forwarded_proto;
# End of headers
} }
location /remote-assets { location /remote-assets {
proxy_pass http://phoenix-system:3000/remote-assets; proxy_pass http://phoenix_system_cluster/remote-assets;
# Include headers for proxying
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host; proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Proto $forwarded_proto;
# End of headers
} }
location /sti { location /sti {
proxy_pass http://phoenix-system:3000/sti; proxy_pass http://phoenix_system_cluster/sti;
# Include headers for proxying
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host; proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Proto $forwarded_proto;
# End of headers
} }
# WebSocket support
location /ws { location /ws {
proxy_pass http://phoenix-system:3000/graphql; proxy_pass http://phoenix_system_cluster/graphql;
proxy_http_version 1.1; proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade; proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade"; proxy_set_header Connection "upgrade";
# Include headers for proxying
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Proto $forwarded_proto;
# End of headers
}
# Reverse proxy for pgAdmin (subpath support)
include /etc/nginx/includes/*.conf;
# Health check endpoints -> used by the health check exporter
location /health/system {
proxy_pass http://phoenix_system_cluster/health;
# Secure the health check endpoint
if ($backend_whitelist = 0) {
return 403;
}
# End of security
# Include headers for proxying
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $forwarded_proto;
# End of headers
}
# location /health/system/metrics {
# proxy_pass http://phoenix_system_cluster/health/metrics;
# # Secure the health check endpoint
# # if ($backend_whitelist = 0) {
# # return 403;
# # }
# # End of security
# # Include headers for proxying
# proxy_set_header Host $host;
# proxy_set_header X-Real-IP $remote_addr;
# proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# proxy_set_header X-Forwarded-Proto $forwarded_proto;
# # End of headers
# }
location /health/worker {
proxy_pass http://phoenix_worker_cluster/health;
# Secure the health check endpoint
if ($backend_whitelist = 0) {
return 403;
}
# End of security
# Include headers for proxying
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $forwarded_proto;
# End of headers
}
# location /health/worker/metrics {
# proxy_pass http://phoenix_worker_cluster/health/metrics;
# # Secure the health check endpoint
# # if ($backend_whitelist = 0) {
# # return 403;
# # }
# # End of security
# # Include headers for proxying
# proxy_set_header Host $host;
# proxy_set_header X-Real-IP $remote_addr;
# proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# proxy_set_header X-Forwarded-Proto $forwarded_proto;
# # End of headers
# }
location /stub_status {
stub_status;
# Secure the stub status endpoint
if ($frontend_whitelist = 0) {
return 403;
}
# End of security
} }
} }
server { # This new server will watch for traffic on 443 ##################################################################
listen 443 ssl http2; # 🔐 HTTPS Server Block
server_name localhost; ##################################################################
server {
listen 443 ssl;
http2 on;
server_name _;
# Security headers
add_header X-Frame-Options "SAMEORIGIN" always;
add_header X-XSS-Protection "1; mode=block" always;
add_header X-Content-Type-Options "nosniff" always;
add_header Referrer-Policy "no-referrer-when-downgrade" always;
ssl_certificate /etc/nginx/external-certificate/certificate.crt; ssl_certificate /etc/nginx/external-certificate/certificate.crt;
ssl_certificate_key /etc/nginx/external-certificate/certificate.key; ssl_certificate_key /etc/nginx/external-certificate/certificate.key;
root /usr/share/nginx/html;
index index.html index.htm;
include /etc/nginx/mime.types;
gzip on; root /usr/share/nginx/html;
gzip_min_length 1000; index index.html index.htm;
gzip_proxied expired no-cache no-store private auth;
gzip_types text/plain text/css application/json application/javascript application/x-javascript text/xml application/xml application/xml+rss text/javascript;
sendfile on;
client_max_body_size 64m;
location / { location / {
try_files $uri $uri/ /index.html; try_files $uri $uri/ /index.html;
} }
# https://serverfault.com/questions/379675/nginx-reverse-proxy-url-rewrite # Secure API routes
location /backend-api/ { location /backend-api/ {
#rewrite ^/backend-api(.*) /$1 break; proxy_pass http://phoenix_system_cluster/;
proxy_pass http://phoenix-system:3000/; # Include headers for proxying
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host; proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Proto https; proxy_set_header X-Forwarded-Proto $forwarded_proto;
# End of headers
} }
location /admin-api { location /admin-api {
proxy_pass http://phoenix-system:3000/admin-api; proxy_pass http://phoenix_system_cluster/admin-api;
# Include headers for proxying
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host; proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Proto $forwarded_proto;
# End of headers
} }
location /remote-assets { location /remote-assets {
proxy_pass http://phoenix-system:3000/remote-assets; proxy_pass http://phoenix_system_cluster/remote-assets;
# Include headers for proxying
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host; proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Proto https; proxy_set_header X-Forwarded-Proto $forwarded_proto;
# End of headers
} }
location /sti { location /sti {
proxy_pass http://phoenix-system:3000/sti; proxy_pass http://phoenix_system_cluster/sti;
# Include headers for proxying
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host; proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Proto $forwarded_proto;
# End of headers
} }
location /ws { location /ws {
proxy_pass http://phoenix-system:3000/graphql; proxy_pass http://phoenix_system_cluster/graphql;
proxy_http_version 1.1; proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade; proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade"; proxy_set_header Connection "upgrade";
# Include headers for proxying
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Proto $forwarded_proto;
# End of headers
} }
# Reverse proxy for pgAdmin (subpath support)
include /etc/nginx/includes/*.conf;
location /health/system {
proxy_pass http://phoenix_system_cluster/health;
# Secure the health check endpoint
if ($backend_whitelist = 0) {
return 403;
}
# End of security
# Include headers for proxying
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $forwarded_proto;
# End of headers
}
location /health/worker {
proxy_pass http://phoenix_worker_cluster/health;
# Secure the health check endpoint
if ($backend_whitelist = 0) {
return 403;
}
# End of security
# Include headers for proxying
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $forwarded_proto;
# End of headers
}
location /stub_status {
stub_status;
# Secure the stub status endpoint
if ($frontend_whitelist = 0) {
return 403;
}
# End of security
}
} }
} }

View File

@@ -0,0 +1,49 @@
#!/bin/sh
set -e
echo "🔧 Entrypoint: Ensuring .pgpass directory and file"
PGADMIN_HOME="/var/lib/pgadmin"
PGPASS_PATH="${PGADMIN_HOME}/pgpass"
SERVERS_JSON_PATH="/var/lib/pgadmin/servers.json"
# Ensure parent directory exists
mkdir -p "$PGADMIN_HOME"
# Create or overwrite .pgpass file
echo "${PGPASS_HOST}:${PGPASS_PORT}:${PGPASS_DB}:${PGPASS_USER}:${PGPASS_PASSWORD}" > "$PGPASS_PATH"
chmod 600 "$PGPASS_PATH"
chown 5050:5050 "$PGPASS_PATH"
export PGPASSFILE="$PGPASS_PATH"
echo "✅ .pgpass ready at $PGPASS_PATH"
echo "🛠️ Generating servers.json for pgAdmin..."
# Try to ensure /pgadmin4 is owned by 5050 if possible
if [ -d /pgadmin4 ]; then
echo "🔧 Attempting to chown /pgadmin4 to 5050:5050"
chown 5050:5050 /pgadmin4 2>/dev/null || echo "⚠️ Could not chown /pgadmin4 (likely read-only or permission issue)"
fi
cat <<EOF > "$SERVERS_JSON_PATH"
{
"Servers": {
"1": {
"Name": "Phoenix DB",
"Group": "PHX GROUP",
"Host": "${PGPASS_HOST}",
"Port": ${PGPASS_PORT},
"MaintenanceDB": "${PGPASS_DB}",
"Username": "${PGPASS_USER}",
"SSLMode": "prefer",
"PassFile": "$PGPASSFILE"
}
}
}
EOF
chmod 600 "$SERVERS_JSON_PATH"
chown 5050:5050 "$SERVERS_JSON_PATH"
echo "✅ servers.json created at $SERVERS_JSON_PATH"
exec /entrypoint.sh "$@"

View File

@@ -1,4 +0,0 @@
rename-command FLUSHDB ""
rename-command FLUSHALL ""
rename-command CONFIG ""

View File

@@ -1,8 +1,7 @@
/* tslint:disable:no-console */ /* tslint:disable:no-console */
import path from 'path'; import path from 'path';
import { ConnectionOptions } from 'typeorm'; import { DataSourceOptions } from 'typeorm';
// import { DataSourceOptions } from 'typeorm'; import { WinstonLogger, LogLevel, TypeOrmLogger, SystemConfig, DefaultJobQueuePlugin } from '@phoenix/core';
import { WinstonLogger, RedisSessionCachePlugin, LogLevel, TypeOrmLogger, SystemConfig, DefaultJobQueuePlugin } from '@phoenix/core';
import { AssetServerPlugin } from '@phoenix/asset-server-plugin'; import { AssetServerPlugin } from '@phoenix/asset-server-plugin';
import { ADMIN_API_PATH, API_PORT, SHOP_API_PATH, SUPER_ADMIN_USER_IDENTIFIER } from '@phoenix/common'; import { ADMIN_API_PATH, API_PORT, SHOP_API_PATH, SUPER_ADMIN_USER_IDENTIFIER } from '@phoenix/common';
import { EmailPlugin, FileBasedTemplateLoader, defaultEmailHandlers } from '@phoenix/email-plugin'; import { EmailPlugin, FileBasedTemplateLoader, defaultEmailHandlers } from '@phoenix/email-plugin';
@@ -10,6 +9,8 @@ import { BillBeePlugin } from "@phoenix/bill-bee-plugin";
import { ChannelPilotProPlugin } from "@phoenix/channel-pilot-pro-plugin"; import { ChannelPilotProPlugin } from "@phoenix/channel-pilot-pro-plugin";
import { ShopifyPlugin } from '@phoenix/shopify-plugin'; import { ShopifyPlugin } from '@phoenix/shopify-plugin';
// RedisSessionCachePlugin
/** /**
* Config settings used during development * Config settings used during development
*/ */
@@ -72,15 +73,15 @@ export const customConfig: SystemConfig = {
defaultTakeNumber: 100, defaultTakeNumber: 100,
}, },
plugins: [ plugins: [
RedisSessionCachePlugin.init({ // RedisSessionCachePlugin.init({
namespace: 'phx-session', // namespace: 'phx-session',
redisOptions: { // redisOptions: {
host: process.env.REDIS_HOST || 'redis', // host: process.env.REDIS_HOST || 'redis',
port: process.env.REDIS_PORT ? parseInt(process.env.REDIS_PORT) : 6379, // port: process.env.REDIS_PORT ? parseInt(process.env.REDIS_PORT) : 6379,
db: process.env.REDIS_DB ? parseInt(process.env.REDIS_DB) : 0, // db: process.env.REDIS_DB ? parseInt(process.env.REDIS_DB) : 0,
password: process.env.REDIS_PASSWORD || 'admin' // password: process.env.REDIS_PASSWORD || 'admin'
} // }
}), // }),
AssetServerPlugin.init({ AssetServerPlugin.init({
route: 'remote-assets', route: 'remote-assets',
assetUploadDir: path.join(__dirname, 'assets'), assetUploadDir: path.join(__dirname, 'assets'),
@@ -104,28 +105,9 @@ export const customConfig: SystemConfig = {
}), }),
BillBeePlugin.init({ BillBeePlugin.init({
active: process.env.BILL_BEE_ACTIVE === 'true', active: process.env.BILL_BEE_ACTIVE === 'true',
apiUrl: process.env.BILL_BEE_API_URL,
apiKey: process.env.BILL_BEE_API_KEY,
username: process.env.BILL_BEE_API_USERNAME,
password: process.env.BILL_BEE_API_SECRET,
header: {
'X-Billbee-Api-Key': process.env.BILL_BEE_API_KEY,
'Authorization': `Basic ${Buffer.from(`${process.env.BILL_BEE_API_USERNAME}:${process.env.BILL_BEE_API_SECRET}`).toString('base64')}`,
}
}), }),
ChannelPilotProPlugin.init({ ChannelPilotProPlugin.init({
active: process.env.CHANNEL_PILOT_PRO_ACTIVE === 'true', active: process.env.CHANNEL_PILOT_PRO_ACTIVE === 'true'
connectionInfo: {
url: process.env.CHANNEL_PILOT_PRO_URL,
apiMerchantId: process.env.CHANNEL_PILOT_PRO_API_MERCHANT_ID,
apiToken: process.env.CHANNEL_PILOT_PRO_API_TOKEN,
access_token: process.env.CHANNEL_PILOT_PRO_ACCESS_TOKEN,
expiredAt: process.env.CHANNEL_PILOT_PRO_EXPIRED_AT,
tokenType: 'Bearer'
},
header: {
'Authorization': `Bearer ${process.env.CHANNEL_PILOT_PRO_ACCESS_TOKEN}`,
}
}), }),
ShopifyPlugin.init({ ShopifyPlugin.init({
active: process.env.SHOPIFY_ACTIVE === 'true' active: process.env.SHOPIFY_ACTIVE === 'true'
@@ -151,7 +133,7 @@ export const customConfig: SystemConfig = {
// ApolloEngineApiKey: "service:Logic-Bits-2900:5w1aCP5YUtF-1ErRG0KNQw" // ApolloEngineApiKey: "service:Logic-Bits-2900:5w1aCP5YUtF-1ErRG0KNQw"
}; };
function getDbConfig(): ConnectionOptions { function getDbConfig(): DataSourceOptions {
const dbType = process.env.DB || 'postgres'; const dbType = process.env.DB || 'postgres';
const dbHost = process.env.DB_HOST || 'localhost'; const dbHost = process.env.DB_HOST || 'localhost';
const dbPort = +process.env.DB_PORT || 5432; const dbPort = +process.env.DB_PORT || 5432;