diff --git a/.env b/.env index c5c30eb..0b84298 100644 --- a/.env +++ b/.env @@ -1,50 +1,40 @@ # ===== Enviroment Stage ====== -ENV_MODE=production NODE_ENV=production # ====== Database Configuration ====== POSTGRES_PASSWORD=6MaBQd9ICYeZnCb PGADMIN_DEFAULT_PASSWORD=bET23d76xKArCqf +DB_NAME="phoenix" +DB_HOST="phoenixDB" +DB_PORT=5432 +DB_USERNAME="postgres" +PGADMIN_DEFAULT_EMAIL="info@phx-erp.de" +MAIL_SERVER="mail.phx-erp.de" +MAIL_PORT=465 +MAIL_USERNAME="internal@phx-erp.de " +MAIL_PASSWORD="8Kb2p4!o1" +SECURITY_EMAIL_SENDER="'No Reply PHX '" # ====== Phoenix Super Admin Configuration ====== SUPER_ADMIN_USER_PASSWORD=zrRzSW5pC4cWwroX976oXcGQU # ====== Redis Configuration ====== REDIS_PASSWORD=sBCuVsJkKTSErgF # ====== Worker Configuration ====== RUN_JOB_QUEUE=1 -# ====== Email Configuration ====== -SMTP_FROM="Yuri Lima " -SMTP_TYPE='smtp' -SMTP_NAME='mail.phx-erp.de' -SMTP_HOST='mail.phx-erp.de' -SMTP_PORT=465 -SMTP_SECURE='true' -SMTP_USER='yuri.lima@phx-erp.de' -SMTP_PASS='0rB0@et68' -SMTP_LOGGING='true' -SMTP_DEBUG='true' -SMTP_TLS_REJECT_UNAUTHORIZED='false' # If true will reject self-signed certificates. It will show: Hostname/IP does not match certificate's altnames: Host: mail.phx-erp.de. is not in the cert's altnames: DNS:*.netcup.net, DNS:netcup.net trace -SMTP_SECURE_CONNECTION='true' # Not in use +# ===== Metris Configuration ====== +# Loki API URL -> The IP 5.75.153.161 is the Grafana Server where it has a firewall rule to allow the connection. Please, if you change here, need to be change in NGINX too. +LOKI_URL=http://grafana.phx-erp.de:3100/loki/api/v1/push +LOKI_RETRIES=5 +LOKI_BATCH_SIZE=500 +# ===== HTTPS-PORTAL Configuration ====== +HTTPS_PORTAL_DOMAINS='yuri.phx-erp.de -> phoenix-app' +# ====== PHX-SYSTEM Configuration ====== +PHOENIX_SYSTEM_REPLICAS=1 # ======= GraphQl ============= GRAPHQL_DEBUG='true' GRAPHQL_TRACING='true' # ======= Integrations ============= # Bill Bee BILL_BEE_ACTIVE='true' -BILL_BEE_API_KEY='200EEBAD-06E8-4184-B430-3428D6447B92' -BILL_BEE_API_USERNAME="yuri.lima" -BILL_BEE_API_SECRET='YTB6tgm.dzb0ntf@zqr' -BILL_BEE_API_URL='https://api.billbee.io/api/v1' # Channel Pilot Pro CHANNEL_PILOT_PRO_ACTIVE='true' -CHANNEL_PILOT_PRO_URL='https://capi.channelpilot.com' -CHANNEL_PILOT_PRO_API_MERCHANT_ID='' -CHANNEL_PILOT_PRO_API_TOKEN='' -CHANNEL_PILOT_PRO_ACCESS_TOKEN='' -CHANNEL_PILOT_PRO_EXPIRED_AT='' # Shopify SHOPIFY_ACTIVE='true' -SHOPIFY_HOST_NAME='https://phxerpdev.myshopify.com/admin/api/2024-10/graphql.json' -SHOPIFY_API_KEY='159142eaee1b747e5cb084cc77564b3e' -SHOPIFY_API_SECRET='1be9e99cad669092247f8735da3e0570' -SHOPIFY_TOKEN='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' -SHOPIFY_HOST_SCHEME='https' -SHOPIFY_IS_EMBEDDED_APP='true' diff --git a/.gitignore b/.gitignore index 92c42b2..0dd4cca 100644 --- a/.gitignore +++ b/.gitignore @@ -7,3 +7,4 @@ e2e https_portal/log https_portal/data assets +pgadmin/data diff --git a/custom_pg_hba.conf b/custom_pg_hba.conf deleted file mode 100644 index e69de29..0000000 diff --git a/docker-compose.yaml b/docker-compose.yaml index de0cc37..c2b09fb 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -1,165 +1,287 @@ --- - services: postgres: restart: always - image: "postgres:15.6-alpine" - container_name: phoenixDB + image: "postgres:15.1-alpine" + container_name: phoenixDB # Hostname + # logging: + # driver: loki + # options: + # loki-url: "${LOKI_URL}" + # loki-retries: "${LOKI_RETRIES}" + # loki-batch-size: "${LOKI_BATCH_SIZE}" + # loki-external-labels: "service=phx-postgres,env=prod" networks: - backend - ports: - - "5432:5432" environment: - - PGUSER=postgres - - DEBUG=false - - POSTGRES_DB=phoenix - - "POSTGRES_PASSWORD=${POSTGRES_PASSWORD}" + DEBUG: true + POSTGRES_DB: ${DB_NAME} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} volumes: - "./database:/var/lib/postgresql/data" - - "./database/pg_hba.conf:/etc/postgresql/pg_hba.conf" # Correct location - command: ["postgres", "-c", "hba_file=/etc/postgresql/pg_hba.conf"] # ✅ Tell PostgreSQL where to find it healthcheck: - test: - - CMD-SHELL - - pg_isready -U postgres - interval: 5s - timeout: 2s - retries: 5 + test: [ "CMD-SHELL", "pg_isready -U postgres" ] + interval: 5s # Time between each health check + timeout: 2s # Number of failures before marking as unhealthy + retries: 5 # Grace period before health checks start pgadmin: image: dpage/pgadmin4 - container_name: pgadmin_container + container_name: pgAdmin4_Ui + user: "5050:5050" + # logging: + # driver: loki + # options: + # loki-url: "${LOKI_URL}" + # loki-retries: "${LOKI_RETRIES}" + # loki-batch-size: "${LOKI_BATCH_SIZE}" + # loki-external-labels: "service=phx-pgadmin,env=prod" networks: - backend - frontend environment: - PGADMIN_DEFAULT_EMAIL: "pgadmin4@pgadmin.org" - PGADMIN_DEFAULT_PASSWORD: ${PGADMIN_DEFAULT_PASSWORD} - PGADMIN_CONFIG_SERVER_MODE: 'False' - ports: - - "${PGADMIN_PORT:-5050}:80" + PGADMIN_DEFAULT_EMAIL: ${PGADMIN_DEFAULT_EMAIL} + PGADMIN_DEFAULT_PASSWORD: ${SUPER_ADMIN_USER_PASSWORD} + PGADMIN_CONFIG_SERVER_MODE: 'True' + PGADMIN_CONFIG_WSGI_SCRIPT_NAME: "'/pgadmin4'" + PGADMIN_CONFIG_PROXY_X_PROTO_COUNT: 1 + PGADMIN_SERVER_JSON_FILE: '/var/lib/pgadmin/servers.json' + PGADMIN_REPLACE_SERVERS_ON_STARTUP: 'True' + PGADMIN_CONFIG_DATA_DIR: "'/var/lib/pgadmin'" + PGADMIN_CONFIG_MASTER_PASSWORD_REQUIRED: 'False' + + # pgpass dynamic vars + PGPASSFILE: /var/lib/pgadmin/pgpass + PGPASS_HOST: ${DB_HOST} + PGPASS_PORT: ${DB_PORT} + PGPASS_DB: ${DB_NAME} + PGPASS_USER: ${DB_USERNAME} + PGPASS_PASSWORD: ${POSTGRES_PASSWORD} + + # Other config + ALLOW_SAVE_PASSWORD: 'False' + MFA_ENABLED: 'True' + MFA_FORCE_REGISTRATION: 'False' + MFA_SUPPORTED_METHODS: 'email' + MFA_EMAIL_SUBJECT: 'Your MFA code by PHX-ERP' + MAX_LOGIN_ATTEMPTS: 5 + ENHANCED_COOKIE_PROTECTION: 'True' + SHOW_GRAVATAR_IMAGE: 'True' + SECURITY_EMAIL_SENDER: ${SECURITY_EMAIL_SENDER} + MAIL_SERVER: ${MAIL_SERVER} + MAIL_PORT: ${MAIL_PORT} + MAIL_USE_SSL: 'False' + MAIL_USE_TLS: 'False' + MAIL_USERNAME: ${MAIL_USERNAME} + MAIL_PASSWORD: ${MAIL_PASSWORD} + MAIL_DEBUG: 'False' volumes: - - "pgadmin:/var/lib/pgadmin" + - ./pgadmin/data:/var/lib/pgadmin + - ./pgadmin/pgadmin-entrypoint.sh:/docker-entrypoint.sh:ro + entrypoint: ["/bin/sh", "/docker-entrypoint.sh"] + depends_on: + postgres: + condition: service_healthy + healthcheck: + test: ["CMD", "wget", "-O", "-", "http://localhost:80/misc/ping"] + interval: 15s + timeout: 10s + retries: 5 + start_period: 60s phoenix-app: restart: always image: "yurimatoslima/phoenix-frontend:alpha" container_name: phoenixApp + ports: + - "3000:3000" # Restrict to only allow access from Grafana Server IP + # logging: + # driver: loki + # options: + # loki-url: "${LOKI_URL}" + # loki-retries: "${LOKI_RETRIES}" + # loki-batch-size: "${LOKI_BATCH_SIZE}" + # loki-external-labels: "service=phx-app,env=prod,project=phoenix" volumes: - - "./app_custom:/usr/share/nginx/html/assets/custom" + - ./app_custom:/usr/share/nginx/html/assets/custom + - ./nginx/nginx.conf:/etc/nginx/nginx.conf + - ./nginx/includes:/etc/nginx/includes:ro networks: - - backend # primary network [external] - - frontend # internal network + - backend + - frontend depends_on: - - phoenix-system + pgadmin: + condition: service_healthy + healthcheck: + test: ["CMD", "wget", "--spider", "-q", "http://phoenixApp/login"] # localhost checks that the NGINX server inside the container is serving something at the root + interval: 10s # check every 10 seconds + timeout: 5s # allow 5 seconds per check + retries: 5 # mark as unhealthy after 5 failures + start_period: 15s # wait 15 seconds after container start before checking phoenix-system: restart: always image: "yurimatoslima/phoenix-backend:alpha" + # logging: + # driver: loki + # options: + # loki-url: "${LOKI_URL}" + # loki-retries: "${LOKI_RETRIES}" + # loki-batch-size: "${LOKI_BATCH_SIZE}" + # loki-external-labels: "service=phoenix-system,env=prod" environment: - "DB_HOST=phoenixDB" + - "DB_NAME=${DB_NAME}" - "DB_PASSWORD=${POSTGRES_PASSWORD}" - "DB_USERNAME=postgres" - "SUPER_ADMIN_USER_PASSWORD=${SUPER_ADMIN_USER_PASSWORD}" - "REDIS_PASSWORD=${REDIS_PASSWORD}" - RUN_JOB_QUEUE=${RUN_JOB_QUEUE} - - SMTP_FROM=${SMTP_FROM} - - SMTP_TYPE=${SMTP_TYPE} - - SMTP_NAME=${SMTP_NAME} - - SMTP_HOST=${SMTP_HOST} - - SMTP_PORT=${SMTP_PORT} - - SMTP_SECURE=${SMTP_SECURE} - - SMTP_USER=${SMTP_USER} - - SMTP_PASS=${SMTP_PASS} - - SMTP_LOGGING=${SMTP_LOGGING} - - SMTP_DEBUG=${SMTP_DEBUG} - - SMTP_TLS_REJECT_UNAUTHORIZED=${SMTP_TLS_REJECT_UNAUTHORIZED} - - SMTP_SECURE_CONNECTION=${SMTP_SECURE_CONNECTION} - - ENV_MODE=${ENV_MODE} - NODE_ENV=${NODE_ENV} - - SMTP_TLS_CIPHERS={SMTP_TLS_CIPHERS} - - BILL_BEE_ACTIVE=${BILL_BEE_ACTIVE} - - BILL_BEE_API_KEY=${BILL_BEE_API_KEY} - - BILL_BEE_API_USERNAME=${BILL_BEE_API_USERNAME} - - BILL_BEE_API_SECRET=${BILL_BEE_API_SECRET} - - BILL_BEE_API_URL=${BILL_BEE_API_URL} - - CHANNEL_PILOT_PRO_ACTIVE=${CHANNEL_PILOT_PRO_ACTIVE} - - CHANNEL_PILOT_PRO_URL=${CHANNEL_PILOT_PRO_URL} - - CHANNEL_PILOT_PRO_API_MERCHANT_ID=${CHANNEL_PILOT_PRO_API_MERCHANT_ID} - - CHANNEL_PILOT_PRO_API_TOKEN=${CHANNEL_PILOT_PRO_API_TOKEN} - - CHANNEL_PILOT_PRO_ACCESS_TOKEN=${CHANNEL_PILOT_PRO_ACCESS_TOKEN} - - CHANNEL_PILOT_PRO_EXPIRED_AT=${CHANNEL_PILOT_PRO_EXPIRED_AT} - - SHOPIFY_ACTIVE=${SHOPIFY_ACTIVE} - - SHOPIFY_HOST_NAME=${SHOPIFY_HOST_NAME} - - SHOPIFY_API_KEY=${SHOPIFY_API_KEY} - - SHOPIFY_API_SECRET=${SHOPIFY_API_SECRET} - - SHOPIFY_HOST_SCHEME=${SHOPIFY_HOST_SCHEME} - - SHOPIFY_IS_EMBEDDED_APP=${SHOPIFY_IS_EMBEDDED_APP} - command: ["npm", "run", "start:server"] deploy: - replicas: 1 #change here if u want to have more replicas. Cant find a way to set via variable right now + replicas: ${PHOENIX_SYSTEM_REPLICAS} #change here if u want to have more replicas. Cant find a way to set via variable right now networks: - - backend + backend: + aliases: + - phoenix-system depends_on: postgres: condition: service_healthy phoenix-redis: condition: service_healthy + healthcheck: + test: ["CMD-SHELL", "curl -s http://phoenix-system:3000/health | grep -q '\"admin-api\":{\"status\":\"up\"}' && curl -s http://phoenix-system:3000/health | grep -q '\"database\":{\"status\":\"up\"}'"] # Checks both admin-api and database status + interval: 10s # Time between each health check + timeout: 6s # Max time to wait for each check + retries: 10 # Number of failures before marking as unhealthy + start_period: 40s # Grace period before health checks start volumes: - - "./logs:/usr/src/app/packages/dev-server/logs" - "./assets:/usr/src/app/packages/dev-server/assets" - "./server_custom:/usr/src/app/packages/dev-server/custom" + # - "./logs:/usr/src/app/packages/dev-server/logs" phoenix-worker: restart: always image: "yurimatoslima/phoenix-backend:alpha" + container_name: "phoenix-worker" + ports: + - "3001:3001" # Restrict to only allow access from Grafana Server IP + # logging: + # driver: loki + # options: + # loki-url: "${LOKI_URL}" + # loki-retries: "${LOKI_RETRIES}" + # loki-batch-size: "${LOKI_BATCH_SIZE}" + # loki-external-labels: "service=phx-worker,env=prod" networks: - backend environment: - DB_HOST=phoenixDB + - "DB_NAME=${DB_NAME}" - "DB_PASSWORD=${POSTGRES_PASSWORD}" - DB_USERNAME=postgres - "SUPER_ADMIN_USER_PASSWORD=${SUPER_ADMIN_USER_PASSWORD}" - REDIS_PASSWORD=${REDIS_PASSWORD} - # command: ["npm", "run", "start:worker"] - entrypoint: ./entrypoint-phoenix-worker.sh + - NODE_ENV=${NODE_ENV} depends_on: + phoenix-system: + condition: service_healthy postgres: condition: service_healthy + healthcheck: + test: [ "CMD-SHELL", "curl -s http://phoenix-worker:3001/health | grep -q '\"status\":\"ok\"'" ] # Check if worker responds with status ok + interval: 10s # Time between each health check + timeout: 6s # Max time to wait for each check + retries: 20 # Grace period before health checks start + start_period: 30s # Grace period before health checks start volumes: - "./assets:/usr/src/app/packages/dev-server/assets" - "./server_custom:/usr/src/app/packages/dev-server/custom" - - "./logs:/usr/src/app/packages/dev-server/logs" + # - "./logs:/usr/src/app/packages/dev-server/logs" phoenix-redis: image: 'bitnami/redis:latest' container_name: redis - command: /opt/bitnami/scripts/redis/run.sh --maxmemory 100mb --dir /bitnami/redis/data - user: 1001:1001 # Non-root user in Bitnami images The /bitnami/redis/data directory inside the container is already owned by 1001, avoiding permission issues. + command: /opt/bitnami/scripts/redis/run.sh --maxmemory 100mb + user: root + # logging: + # driver: loki + # options: + # loki-url: "${LOKI_URL}" + # loki-retries: "${LOKI_RETRIES}" + # loki-batch-size: "${LOKI_BATCH_SIZE}" + # loki-external-labels: "service=phx-redis,env=prod" networks: - backend restart: always environment: ALLOW_EMPTY_PASSWORD: "no" - REDIS_DISABLE_COMMANDS: FLUSHDB,FLUSHALL,CONFIG REDIS_PASSWORD: ${REDIS_PASSWORD} healthcheck: test: [ "CMD", "redis-cli", "--raw", "incr", "ping" ] interval: 5s retries: 10 # Increase retries if Redis takes a while to start timeout: 5s # Increase timeout if needed - volumes: - - "./redis/data:/bitnami/redis/data" - - /opt/phx/redis/tmp:/opt/bitnami/redis/tmp # ✅ Fix permission issue - - /opt/phx/redis/logs:/opt/bitnami/redis/logs # ✅ Fix logs permission issue - - ./redis.conf:/opt/bitnami/redis/etc/redis.conf # ✅ Use a writable redis.conf depends_on: postgres: condition: service_healthy - node_exporter: + volumes: + - "./redis/data:/bitnami/redis/data" + phoenix-health-exporter: + image: phxerp/phoenix-health-exporter:alpha + container_name: health_exporter + restart: unless-stopped + # logging: + # driver: loki + # options: + # loki-url: "${LOKI_URL}" + # loki-retries: "${LOKI_RETRIES}" + # loki-batch-size: "${LOKI_BATCH_SIZE}" + # loki-external-labels: "service=phx-health-exporter,env=prod" + ports: + - "9800:9800" + environment: + DB_HOST: ${DB_HOST} + DB_NAME: ${DB_NAME} + DB_PASSWORD: ${POSTGRES_PASSWORD} + DB_USERNAME: ${DB_USERNAME} + networks: + - frontend + - backend + volumes: + - /etc/hostname:/etc/host_hostname:ro # This ensures the container always uses the real machine hostname, even if restarted or recreated. + security_opt: + - no-new-privileges:true + deploy: + resources: + limits: + cpus: '0.25' + memory: 128M + depends_on: + phoenix-system: + condition: service_healthy + phoenix-worker: + condition: service_healthy + postgres: + condition: service_healthy + healthcheck: + test: ["CMD-SHELL", "curl -sf http://localhost:9800/healthz || exit 1"] + interval: 1m + timeout: 5s + retries: 3 + start_period: 15s + node-exporter: image: quay.io/prometheus/node-exporter:latest container_name: node_exporter + # logging: + # driver: loki + # options: + # loki-url: "${LOKI_URL}" + # loki-retries: "${LOKI_RETRIES}" + # loki-batch-size: "${LOKI_BATCH_SIZE}" + # loki-external-labels: "service=phx-node-exporter,env=prod" networks: - metrics + - frontend restart: unless-stopped ports: - - "9100:9100" + - "9100:9100" # Restrict to only allow access from Grafana Server IP command: - "--path.procfs=/host/proc" - "--path.sysfs=/host/sys" @@ -169,13 +291,66 @@ services: - "/proc:/host/proc:ro" - "/sys:/host/sys:ro" - "/:/host:ro,rslave" + security_opt: + - no-new-privileges:true + deploy: + resources: + limits: + cpus: '0.25' + memory: 128M + healthcheck: + test: ["CMD", "wget", "-qO-", "http://localhost:9100/metrics"] + interval: 15s + timeout: 5s + retries: 3 + start_period: 20s + # nginx-exporter: + # image: nginx/nginx-prometheus-exporter:1.4.2 + # container_name: nginx_exporter + # restart: unless-stopped + # # logging: + # # driver: loki + # # options: + # # loki-url: "${LOKI_URL}" + # # loki-retries: "${LOKI_RETRIES}" + # # loki-batch-size: "${LOKI_BATCH_SIZE}" + # # loki-external-labels: "service=phx-nginx-exporter,env=prod" + # ports: + # - "9113:9113" # Restrict to only allow access from Grafana Server IP + # command: + # - '--nginx.scrape-uri=http://phoenixApp/stub_status' + # security_opt: + # - no-new-privileges:true + # deploy: + # resources: + # limits: + # cpus: '0.25' + # memory: 128M + # depends_on: + # phoenix-app: + # condition: service_healthy + # networks: + # - frontend + # - metrics + # healthcheck: + # test: ["CMD", "wget", "-qO-", "http://localhost:9113/metrics"] # Not working as expected + # interval: 15s + # timeout: 5s + # retries: 3 + # start_period: 10s https_portal: container_name: https_portal image: "steveltn/https-portal:1.21" restart: unless-stopped + # logging: + # driver: loki + # options: + # loki-url: "${LOKI_URL}" + # loki-retries: "${LOKI_RETRIES}" + # loki-batch-size: "${LOKI_BATCH_SIZE}" + # loki-external-labels: "service=phx-https-portal,env=prod" networks: - frontend # [ PgAdmin, Phoenix-App ] - - external # [ Outside of the World] ports: - "80:80" - "443:443" @@ -186,18 +361,18 @@ services: DEBUG: "true" RENEW_MARGIN_DAYS: 30 CLIENT_MAX_BODY_SIZE: 0 + SERVER_NAMES_HASH_BUCKET_SIZE: 128 # Increase hash bucket size for server names - good for bigger domains names, if not set correctly, it will throw an error, break the container. # FORCE_RENEW: 'true' - DOMAINS: 'yuri.phx-erp.de -> phoenix-app' + DOMAINS: "${HTTPS_PORTAL_DOMAINS}" volumes: - ./https_portal/data:/var/lib/https-portal # ssl_certs, vhost.d, htdocs - ./https_portal/log:/var/log/nginx # nginx logs - - ./https_portal/config/custom_nginx.conf:/opt/custom_nginx.conf:ro # ✅ Mount file in a safe path + # - ./https_portal/config/custom_nginx.conf:/opt/custom_nginx.conf:ro # ✅ Mount file in a safe path depends_on: - - phoenix-app - - phoenix-system - - pgadmin - - phoenix-redis - - postgres + pgadmin: + condition: service_healthy + postgres: + condition: service_healthy fail2ban: image: crazymax/fail2ban:latest container_name: fail2ban @@ -210,6 +385,7 @@ services: - ./fail2ban/jail.d:/etc/fail2ban/jail.d - /var/log:/var/log:ro restart: always + networks: backend: driver: bridge @@ -224,17 +400,10 @@ networks: ipam: config: - subnet: 172.20.0.0/16 - - external: - driver: bridge - external: false - + metrics: driver: bridge external: false ipam: config: - subnet: 172.22.0.0/16 - -volumes: - pgadmin: null diff --git a/https_portal/data/yuri.phx-erp.de/production/signed.crt b/https_portal/data/yuri.phx-erp.de/production/signed.crt index b053f48..a35d71e 100644 --- a/https_portal/data/yuri.phx-erp.de/production/signed.crt +++ b/https_portal/data/yuri.phx-erp.de/production/signed.crt @@ -1,31 +1,31 @@ -----BEGIN CERTIFICATE----- -MIIE7TCCA9WgAwIBAgISBN9fSOkvS54NMLTgeStNS2oTMA0GCSqGSIb3DQEBCwUA +MIIE+zCCA+OgAwIBAgISBeKK1knRhuU1oi3S5b7is2lPMA0GCSqGSIb3DQEBCwUA MDMxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1MZXQncyBFbmNyeXB0MQwwCgYDVQQD -EwNSMTEwHhcNMjUwMTA4MDMyMzU2WhcNMjUwNDA4MDMyMzU1WjAaMRgwFgYDVQQD +EwNSMTEwHhcNMjUwNTA4MTUxNTE3WhcNMjUwODA2MTUxNTE2WjAaMRgwFgYDVQQD Ew95dXJpLnBoeC1lcnAuZGUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB AQCkoSgHOsFmtqy02FZoJflGQlqx2Lc9WP12L5i5D/hutP8gpMoEfq6WyHD7Gn25 LR2BTn8ceqMMoArfJs6SsEXT7xdbmWSS9r4pWtbpZLWO/jwtYBbg/lwCTJUbiIvD wwRLtjP+xVlwfuslkgcEdPCD9CaigGkhcLQKgzL2hhwYwBMaA94MVX0rhd8w66zA cwMcwo8VWDb0PwD2TAJqBXupmjMQ8XIob57rC0drO9175Wp2UX13W3m/NaOylKyU Ct7uoClu/LtBXdXG0TmKTSlYdlEkwyWJYBNEnog0QWbsxl4PCaRaMianjgln5BGt -rmBUApxH0qm+Ct1Svw5HdB9tAgMBAAGjggISMIICDjAOBgNVHQ8BAf8EBAMCBaAw +rmBUApxH0qm+Ct1Svw5HdB9tAgMBAAGjggIgMIICHDAOBgNVHQ8BAf8EBAMCBaAw HQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwHQYD VR0OBBYEFEKJ54GyFoST7ssNgV+4Qn/QEubyMB8GA1UdIwQYMBaAFMXPRqTq9MPA -emyVxC2wXpIvJuO5MFcGCCsGAQUFBwEBBEswSTAiBggrBgEFBQcwAYYWaHR0cDov -L3IxMS5vLmxlbmNyLm9yZzAjBggrBgEFBQcwAoYXaHR0cDovL3IxMS5pLmxlbmNy -Lm9yZy8wGgYDVR0RBBMwEYIPeXVyaS5waHgtZXJwLmRlMBMGA1UdIAQMMAowCAYG -Z4EMAQIBMIIBAwYKKwYBBAHWeQIEAgSB9ASB8QDvAHUAouMK5EXvva2bfjjtR2d3 -U9eCW4SU1yteGyzEuVCkR+cAAAGURCZHUAAABAMARjBEAiAxBCBQSJT8mWTph0yI -HQsoR6RFyMExrihZhIKpkMoRwwIgftlv5sQPZnto1KMuwrKwQXUdMej2Pb2/QgUM -eIyplEQAdgDM+w9qhXEJZf6Vm1PO6bJ8IumFXA2XjbapflTA/kwNsAAAAZREJkdh -AAAEAwBHMEUCIG02M+HEsqp2J7GT9Lkce/1FJKyFOo3lupETe/wvtfzQAiEA7PnP -MRZ481CHAg6HMYBv/lcSCBBOsjt6NASP8ZIgNvQwDQYJKoZIhvcNAQELBQADggEB -AFAeYvfig+Eb9IZlLxAXVKpnnIc11D3Tyvfe/c3YNbv2krXWd7n64TMEdE8IqzLl -Ew/2R7v7Zm8dsgnmWET2TGDT0O6ZAzeYTictTqaYkg0WMGGq1gfovjUt6E3aGhYm -TaacT0ypXm6zE0JpotXkJESNbfYx+zO0VNTCxYtfcTxeGFvqG41ZljvB5tWx2ODU -dlYh9omk1OnIgxY6LCdNdhIpNIfcswx0FN6dLc4hNIlZeUwAznao0/DB7M9kKKbL -JEISF1PD7+qgBCOLnKxylYx+aV3Bmg9jaUzySB2j95MLJirPYqqDa4ObU1UKa9v0 -RWOYa6/PjG44rdUjvU6GsLY= +emyVxC2wXpIvJuO5MDMGCCsGAQUFBwEBBCcwJTAjBggrBgEFBQcwAoYXaHR0cDov +L3IxMS5pLmxlbmNyLm9yZy8wGgYDVR0RBBMwEYIPeXVyaS5waHgtZXJwLmRlMBMG +A1UdIAQMMAowCAYGZ4EMAQIBMC4GA1UdHwQnMCUwI6AhoB+GHWh0dHA6Ly9yMTEu +Yy5sZW5jci5vcmcvODAuY3JsMIIBBQYKKwYBBAHWeQIEAgSB9gSB8wDxAHYAzPsP +aoVxCWX+lZtTzumyfCLphVwNl422qX5UwP5MDbAAAAGWsKyn1QAABAMARzBFAiBz +v6PsRPAwXWcYu6BOQl5QmzzI6BP9Jl/t+teqR7rVgAIhAPrl3JjVjIPBuWh/LvfR +SSH1bJSLVuZpP/czfG1yZI+XAHcArxgaKNaMo+CpikycZ6sJ+Lu8IrquvLE4o6Gd +0/m2Aw0AAAGWsKyqywAABAMASDBGAiEArUQdsL6CsB9cBR2ZaXd5yOC3VypYyOmp +ZQPD/iPIOP8CIQDt0R00Uq3MTybefEXwTo34ixkQyN9vw/xh2OkbYYBP8DANBgkq +hkiG9w0BAQsFAAOCAQEAGxx7PNuGrMyX530iBmPfUtmRXBLmsVUmj1jalbUzx//i +oFxJ7DWAau8MZHPF+tSvrbReKjCFgpkOfSdaSMubWHlAPaSMP6NGZUwmLt7jApke +qzYKgYWhovh/J8uYgQ7KEDPJeXYeDIHbmnyyHtgxI0eXKlpN3hgQiIxC2Q9JZc7+ +enktsKskPWpwHNxVPHYKF9VGbFMdOxBjr6wSRecmzD3lGXv0O0r9e84ULSfmK8KQ +a+TCZnwVM1tTNMm5TMKRa79nQE0+3R2wsXrqj2PfFtjS2haJgBbSQfSSqWlcft9C +WPuFNj8uG3ZVjqw7uOXjE6hy1AqdJbDt2gBSEB3gIA== -----END CERTIFICATE----- -----BEGIN CERTIFICATE----- diff --git a/nginx/includes/pgadmin.conf b/nginx/includes/pgadmin.conf new file mode 100644 index 0000000..8a83459 --- /dev/null +++ b/nginx/includes/pgadmin.conf @@ -0,0 +1,15 @@ +# pgAdmin reverse proxy (under subpath) +location /pgadmin4 { + proxy_pass http://pgAdmin4_Ui/; + proxy_set_header X-Script-Name /pgadmin4; + proxy_set_header X-Scheme $scheme; + proxy_set_header X-Forwarded-Proto $forwarded_proto; + # Include headers for proxying + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Host $host; + proxy_set_header X-Forwarded-Proto $forwarded_proto; + # End of headers + proxy_redirect off; +} \ No newline at end of file diff --git a/nginx/nginx.conf b/nginx/nginx.conf index 7927242..c7b219b 100644 --- a/nginx/nginx.conf +++ b/nginx/nginx.conf @@ -1,126 +1,392 @@ -worker_processes 1; +# Main process configuration +worker_processes 1; events { - worker_connections 1024; + worker_connections 1024; } http { + geo $frontend_whitelist { + default 1; + 127.0.0.1 1; + 172.20.0.0/16 1; # Frontend Docker subnet + 5.75.153.161 1; # Grafana or monitoring + 167.235.254.4 1; # Ansible server IP + } + geo $backend_whitelist { + default 1; + 127.0.0.1 1; + 172.19.0.0/16 1; # Backend Docker subnet + 5.75.153.161 1; # Grafana or monitoring + 167.235.254.4 1; # Ansible server IP + } + + # These settings ensure that $remote_addr reflects the real client IP forwarded by https-portal, which is needed for your allow rules to work correctly + # Recommended for resolving client IP behind proxy + # Docker networks where both frontend and backend containers communicate through NGINX. + # To avoid potential misclassification of real client IPs from backend routes. + # The set_real_ip_from directive doesn’t allow access — it just instructs NGINX to trust the X-Forwarded-For header from those IPs. + set_real_ip_from 172.20.0.0/16; # Replace with your Docker network subnet (matches your `frontend` network) + set_real_ip_from 172.19.0.0/16; # Replace with your Docker network subnet (matches your `backend` network) + real_ip_header X-Forwarded-For; + real_ip_recursive on; + + resolver 127.0.0.11 valid=10s; + resolver_timeout 5s; + + upstream phoenix_system_cluster { + zone phoenix_system_cluster 64k; + least_conn; + server phoenix-system:3000 resolve fail_timeout=1s max_fails=0; + # ADD_SYSTEM_SERVERS_HERE + } + + upstream phoenix_worker_cluster { + zone phoenix_worker_cluster 64k; + least_conn; + server phoenix-worker:3001 resolve fail_timeout=1s max_fails=0; + # ADD_WORKER_SERVERS_HERE + } + + server_tokens off; # Disable NGINX version tokens to avoid leaking NGINX version. + + # File handling & upload limits sendfile on; client_max_body_size 64m; - #client_body_temp_path /data/temp; + # Prevent warning when setting many proxy headers, like we do + proxy_headers_hash_max_size 1024; + proxy_headers_hash_bucket_size 128; + # Gzip compression (for better bandwidth efficiency) + gzip on; + gzip_min_length 1000; + gzip_proxied expired no-cache no-store private auth; + gzip_types text/plain text/css application/json application/javascript application/x-javascript text/xml application/xml application/xml+rss text/javascript; + + # Trust the protocol from upstream proxy/load balancer + map $http_x_forwarded_proto $forwarded_proto { + default $scheme; + https https; + http http; + } + + # File types and default mime type + include /etc/nginx/mime.types; + default_type application/octet-stream; + + # 🧩 Logs + map $request_uri $loggable { + default 1; + ~^/stub_status 0; + ~^/health/system 0; + ~^/health/worker 0; + } + + log_format main_with_realip '$remote_addr - $realip_remote_addr [$time_local] ' + '"$request" $status $body_bytes_sent ' + '"$http_referer" "$http_user_agent"'; + + log_format json_compatible escape=json '{' + '"time":"$time_iso8601",' + '"remote_addr":"$remote_addr",' + '"proxy_addr":"$proxy_protocol_addr",' + '"x_forwarded_for":"$http_x_forwarded_for",' + '"request_method":"$request_method",' + '"request_uri":"$request_uri",' + '"status":$status,' + '"body_bytes_sent":$body_bytes_sent,' + '"request_time":$request_time,' + '"upstream_response_time":"$upstream_response_time",' + '"http_referer":"$http_referer",' + '"http_user_agent":"$http_user_agent",' + '"host":"$host",' + '"realip":"$realip_remote_addr"' + '}'; + + access_log /var/log/nginx/access_json.log json_compatible if=$loggable; # JSON format for Loki + access_log /var/log/nginx/access.log main_with_realip if=$loggable; + # End of logs + + ################################################################## + # 🧩 HTTP Server Block + ################################################################## server { listen 80; - server_name localhost; + server_name _; - root /usr/share/nginx/html; - index index.html index.htm; - include /etc/nginx/mime.types; + # Security headers + add_header X-Frame-Options "SAMEORIGIN" always; + add_header X-XSS-Protection "1; mode=block" always; + add_header X-Content-Type-Options "nosniff" always; + add_header Referrer-Policy "no-referrer-when-downgrade" always; - gzip on; - gzip_min_length 1000; - gzip_proxied expired no-cache no-store private auth; - gzip_types text/plain text/css application/json application/javascript application/x-javascript text/xml application/xml application/xml+rss text/javascript; - - sendfile on; - client_max_body_size 64m; + root /usr/share/nginx/html; + index index.html index.htm; + # Frontend SPA fallback location / { try_files $uri $uri/ /index.html; } - # https://serverfault.com/questions/379675/nginx-reverse-proxy-url-rewrite + + # Backend API routes location /backend-api/ { - #rewrite ^/backend-api(.*) /$1 break; - proxy_pass http://phoenix-system:3000/; + proxy_pass http://phoenix_system_cluster/; + # Include headers for proxying + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Host $http_host; + proxy_set_header X-Forwarded-Host $host; + proxy_set_header X-Forwarded-Proto $forwarded_proto; + # End of headers } location /admin-api { - proxy_pass http://phoenix-system:3000/admin-api; + proxy_pass http://phoenix_system_cluster/admin-api; + # Include headers for proxying + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Host $http_host; + proxy_set_header X-Forwarded-Host $host; + proxy_set_header X-Forwarded-Proto $forwarded_proto; + # End of headers } location /remote-assets { - proxy_pass http://phoenix-system:3000/remote-assets; + proxy_pass http://phoenix_system_cluster/remote-assets; + # Include headers for proxying + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Host $http_host; + proxy_set_header X-Forwarded-Host $host; + proxy_set_header X-Forwarded-Proto $forwarded_proto; + # End of headers } location /sti { - proxy_pass http://phoenix-system:3000/sti; + proxy_pass http://phoenix_system_cluster/sti; + # Include headers for proxying + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Host $http_host; + proxy_set_header X-Forwarded-Host $host; + proxy_set_header X-Forwarded-Proto $forwarded_proto; + # End of headers } + # WebSocket support location /ws { - proxy_pass http://phoenix-system:3000/graphql; + proxy_pass http://phoenix_system_cluster/graphql; proxy_http_version 1.1; proxy_set_header Upgrade $http_upgrade; proxy_set_header Connection "upgrade"; + # Include headers for proxying + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Host $host; + proxy_set_header X-Forwarded-Proto $forwarded_proto; + # End of headers + } + + # Reverse proxy for pgAdmin (subpath support) + include /etc/nginx/includes/*.conf; + + # Health check endpoints -> used by the health check exporter + location /health/system { + proxy_pass http://phoenix_system_cluster/health; + # Secure the health check endpoint + if ($backend_whitelist = 0) { + return 403; + } + # End of security + # Include headers for proxying + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $forwarded_proto; + # End of headers + } + + # location /health/system/metrics { + # proxy_pass http://phoenix_system_cluster/health/metrics; + # # Secure the health check endpoint + # # if ($backend_whitelist = 0) { + # # return 403; + # # } + # # End of security + # # Include headers for proxying + # proxy_set_header Host $host; + # proxy_set_header X-Real-IP $remote_addr; + # proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + # proxy_set_header X-Forwarded-Proto $forwarded_proto; + # # End of headers + # } + + location /health/worker { + proxy_pass http://phoenix_worker_cluster/health; + # Secure the health check endpoint + if ($backend_whitelist = 0) { + return 403; + } + # End of security + # Include headers for proxying + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $forwarded_proto; + # End of headers + } + + # location /health/worker/metrics { + # proxy_pass http://phoenix_worker_cluster/health/metrics; + # # Secure the health check endpoint + # # if ($backend_whitelist = 0) { + # # return 403; + # # } + # # End of security + # # Include headers for proxying + # proxy_set_header Host $host; + # proxy_set_header X-Real-IP $remote_addr; + # proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + # proxy_set_header X-Forwarded-Proto $forwarded_proto; + # # End of headers + # } + + location /stub_status { + stub_status; + # Secure the stub status endpoint + if ($frontend_whitelist = 0) { + return 403; + } + # End of security } } - server { # This new server will watch for traffic on 443 - listen 443 ssl http2; - server_name localhost; + ################################################################## + # 🔐 HTTPS Server Block + ################################################################## + server { + listen 443 ssl; + http2 on; + server_name _; + + # Security headers + add_header X-Frame-Options "SAMEORIGIN" always; + add_header X-XSS-Protection "1; mode=block" always; + add_header X-Content-Type-Options "nosniff" always; + add_header Referrer-Policy "no-referrer-when-downgrade" always; + ssl_certificate /etc/nginx/external-certificate/certificate.crt; ssl_certificate_key /etc/nginx/external-certificate/certificate.key; - root /usr/share/nginx/html; - index index.html index.htm; - include /etc/nginx/mime.types; - gzip on; - gzip_min_length 1000; - gzip_proxied expired no-cache no-store private auth; - gzip_types text/plain text/css application/json application/javascript application/x-javascript text/xml application/xml application/xml+rss text/javascript; - - sendfile on; - client_max_body_size 64m; + root /usr/share/nginx/html; + index index.html index.htm; location / { try_files $uri $uri/ /index.html; } - # https://serverfault.com/questions/379675/nginx-reverse-proxy-url-rewrite + # Secure API routes location /backend-api/ { - #rewrite ^/backend-api(.*) /$1 break; - proxy_pass http://phoenix-system:3000/; + proxy_pass http://phoenix_system_cluster/; + # Include headers for proxying + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Host $http_host; - proxy_set_header X-Forwarded-Proto https; + proxy_set_header X-Forwarded-Host $host; + proxy_set_header X-Forwarded-Proto $forwarded_proto; + # End of headers } location /admin-api { - proxy_pass http://phoenix-system:3000/admin-api; + proxy_pass http://phoenix_system_cluster/admin-api; + # Include headers for proxying + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Host $http_host; + proxy_set_header X-Forwarded-Host $host; + proxy_set_header X-Forwarded-Proto $forwarded_proto; + # End of headers } location /remote-assets { - proxy_pass http://phoenix-system:3000/remote-assets; + proxy_pass http://phoenix_system_cluster/remote-assets; + # Include headers for proxying + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Host $http_host; - proxy_set_header X-Forwarded-Proto https; + proxy_set_header X-Forwarded-Host $host; + proxy_set_header X-Forwarded-Proto $forwarded_proto; + # End of headers } location /sti { - proxy_pass http://phoenix-system:3000/sti; + proxy_pass http://phoenix_system_cluster/sti; + # Include headers for proxying + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Host $http_host; + proxy_set_header X-Forwarded-Host $host; + proxy_set_header X-Forwarded-Proto $forwarded_proto; + # End of headers } location /ws { - proxy_pass http://phoenix-system:3000/graphql; + proxy_pass http://phoenix_system_cluster/graphql; proxy_http_version 1.1; proxy_set_header Upgrade $http_upgrade; proxy_set_header Connection "upgrade"; + # Include headers for proxying + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Host $host; + proxy_set_header X-Forwarded-Proto $forwarded_proto; + # End of headers } + # Reverse proxy for pgAdmin (subpath support) + include /etc/nginx/includes/*.conf; + location /health/system { + proxy_pass http://phoenix_system_cluster/health; + # Secure the health check endpoint + if ($backend_whitelist = 0) { + return 403; + } + # End of security + # Include headers for proxying + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $forwarded_proto; + # End of headers + } + + location /health/worker { + proxy_pass http://phoenix_worker_cluster/health; + # Secure the health check endpoint + if ($backend_whitelist = 0) { + return 403; + } + # End of security + # Include headers for proxying + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $forwarded_proto; + # End of headers + } + + location /stub_status { + stub_status; + # Secure the stub status endpoint + if ($frontend_whitelist = 0) { + return 403; + } + # End of security + } } - } \ No newline at end of file diff --git a/pgadmin/pgadmin-entrypoint.sh b/pgadmin/pgadmin-entrypoint.sh new file mode 100644 index 0000000..72c0aa1 --- /dev/null +++ b/pgadmin/pgadmin-entrypoint.sh @@ -0,0 +1,49 @@ +#!/bin/sh +set -e + +echo "🔧 Entrypoint: Ensuring .pgpass directory and file" + +PGADMIN_HOME="/var/lib/pgadmin" +PGPASS_PATH="${PGADMIN_HOME}/pgpass" +SERVERS_JSON_PATH="/var/lib/pgadmin/servers.json" + +# Ensure parent directory exists +mkdir -p "$PGADMIN_HOME" + +# Create or overwrite .pgpass file +echo "${PGPASS_HOST}:${PGPASS_PORT}:${PGPASS_DB}:${PGPASS_USER}:${PGPASS_PASSWORD}" > "$PGPASS_PATH" +chmod 600 "$PGPASS_PATH" +chown 5050:5050 "$PGPASS_PATH" +export PGPASSFILE="$PGPASS_PATH" + +echo "✅ .pgpass ready at $PGPASS_PATH" +echo "🛠️ Generating servers.json for pgAdmin..." + +# Try to ensure /pgadmin4 is owned by 5050 if possible +if [ -d /pgadmin4 ]; then + echo "🔧 Attempting to chown /pgadmin4 to 5050:5050" + chown 5050:5050 /pgadmin4 2>/dev/null || echo "⚠️ Could not chown /pgadmin4 (likely read-only or permission issue)" +fi + +cat < "$SERVERS_JSON_PATH" +{ + "Servers": { + "1": { + "Name": "Phoenix DB", + "Group": "PHX GROUP", + "Host": "${PGPASS_HOST}", + "Port": ${PGPASS_PORT}, + "MaintenanceDB": "${PGPASS_DB}", + "Username": "${PGPASS_USER}", + "SSLMode": "prefer", + "PassFile": "$PGPASSFILE" + } + } +} +EOF + +chmod 600 "$SERVERS_JSON_PATH" +chown 5050:5050 "$SERVERS_JSON_PATH" +echo "✅ servers.json created at $SERVERS_JSON_PATH" + +exec /entrypoint.sh "$@" \ No newline at end of file diff --git a/redis.conf b/redis.conf deleted file mode 100644 index 6ec37a6..0000000 --- a/redis.conf +++ /dev/null @@ -1,4 +0,0 @@ - -rename-command FLUSHDB "" -rename-command FLUSHALL "" -rename-command CONFIG "" diff --git a/server_custom/config.ts b/server_custom/config.ts index ff99a53..1cad081 100644 --- a/server_custom/config.ts +++ b/server_custom/config.ts @@ -1,8 +1,7 @@ /* tslint:disable:no-console */ import path from 'path'; -import { ConnectionOptions } from 'typeorm'; -// import { DataSourceOptions } from 'typeorm'; -import { WinstonLogger, RedisSessionCachePlugin, LogLevel, TypeOrmLogger, SystemConfig, DefaultJobQueuePlugin } from '@phoenix/core'; +import { DataSourceOptions } from 'typeorm'; +import { WinstonLogger, LogLevel, TypeOrmLogger, SystemConfig, DefaultJobQueuePlugin } from '@phoenix/core'; import { AssetServerPlugin } from '@phoenix/asset-server-plugin'; import { ADMIN_API_PATH, API_PORT, SHOP_API_PATH, SUPER_ADMIN_USER_IDENTIFIER } from '@phoenix/common'; import { EmailPlugin, FileBasedTemplateLoader, defaultEmailHandlers } from '@phoenix/email-plugin'; @@ -10,6 +9,8 @@ import { BillBeePlugin } from "@phoenix/bill-bee-plugin"; import { ChannelPilotProPlugin } from "@phoenix/channel-pilot-pro-plugin"; import { ShopifyPlugin } from '@phoenix/shopify-plugin'; +// RedisSessionCachePlugin + /** * Config settings used during development */ @@ -72,15 +73,15 @@ export const customConfig: SystemConfig = { defaultTakeNumber: 100, }, plugins: [ - RedisSessionCachePlugin.init({ - namespace: 'phx-session', - redisOptions: { - host: process.env.REDIS_HOST || 'redis', - port: process.env.REDIS_PORT ? parseInt(process.env.REDIS_PORT) : 6379, - db: process.env.REDIS_DB ? parseInt(process.env.REDIS_DB) : 0, - password: process.env.REDIS_PASSWORD || 'admin' - } - }), + // RedisSessionCachePlugin.init({ + // namespace: 'phx-session', + // redisOptions: { + // host: process.env.REDIS_HOST || 'redis', + // port: process.env.REDIS_PORT ? parseInt(process.env.REDIS_PORT) : 6379, + // db: process.env.REDIS_DB ? parseInt(process.env.REDIS_DB) : 0, + // password: process.env.REDIS_PASSWORD || 'admin' + // } + // }), AssetServerPlugin.init({ route: 'remote-assets', assetUploadDir: path.join(__dirname, 'assets'), @@ -104,28 +105,9 @@ export const customConfig: SystemConfig = { }), BillBeePlugin.init({ active: process.env.BILL_BEE_ACTIVE === 'true', - apiUrl: process.env.BILL_BEE_API_URL, - apiKey: process.env.BILL_BEE_API_KEY, - username: process.env.BILL_BEE_API_USERNAME, - password: process.env.BILL_BEE_API_SECRET, - header: { - 'X-Billbee-Api-Key': process.env.BILL_BEE_API_KEY, - 'Authorization': `Basic ${Buffer.from(`${process.env.BILL_BEE_API_USERNAME}:${process.env.BILL_BEE_API_SECRET}`).toString('base64')}`, - } }), ChannelPilotProPlugin.init({ - active: process.env.CHANNEL_PILOT_PRO_ACTIVE === 'true', - connectionInfo: { - url: process.env.CHANNEL_PILOT_PRO_URL, - apiMerchantId: process.env.CHANNEL_PILOT_PRO_API_MERCHANT_ID, - apiToken: process.env.CHANNEL_PILOT_PRO_API_TOKEN, - access_token: process.env.CHANNEL_PILOT_PRO_ACCESS_TOKEN, - expiredAt: process.env.CHANNEL_PILOT_PRO_EXPIRED_AT, - tokenType: 'Bearer' - }, - header: { - 'Authorization': `Bearer ${process.env.CHANNEL_PILOT_PRO_ACCESS_TOKEN}`, - } + active: process.env.CHANNEL_PILOT_PRO_ACTIVE === 'true' }), ShopifyPlugin.init({ active: process.env.SHOPIFY_ACTIVE === 'true' @@ -151,7 +133,7 @@ export const customConfig: SystemConfig = { // ApolloEngineApiKey: "service:Logic-Bits-2900:5w1aCP5YUtF-1ErRG0KNQw" }; -function getDbConfig(): ConnectionOptions { +function getDbConfig(): DataSourceOptions { const dbType = process.env.DB || 'postgres'; const dbHost = process.env.DB_HOST || 'localhost'; const dbPort = +process.env.DB_PORT || 5432;