refactor: update Nginx configuration and Docker setup
- Change user directive in Nginx configuration from 'nginx' to 'www-data'. - Update upstream server configurations in Nginx to use 'localhost' instead of service names. - Modify Nginx server block to redirect HTTP to a status page instead of Grafana. - Rename Alertmanager location from '/alertmanager/' to '/alerts/' for consistency. - Remove deprecated status page configuration and related files. - Adjust Prometheus configuration to reflect the new Docker network settings.
This commit is contained in:
@@ -2,12 +2,12 @@
|
||||
# Proxies requests to Alertmanager
|
||||
|
||||
# Alertmanager location
|
||||
location /alertmanager/ {
|
||||
location /alerts/ {
|
||||
# Rate limiting
|
||||
limit_req zone=api burst=10 nodelay;
|
||||
|
||||
# Remove trailing slash for proxy
|
||||
rewrite ^/alertmanager/(.*)$ /$1 break;
|
||||
rewrite ^/alerts/(.*)$ /$1 break;
|
||||
|
||||
# Proxy to Alertmanager
|
||||
proxy_pass http://alertmanager_backend;
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Grafana proxy configuration
|
||||
location /grafana/ {
|
||||
proxy_pass http://grafana_backend/;
|
||||
proxy_pass http://grafana_backend;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
|
||||
@@ -1,43 +0,0 @@
|
||||
# Status page configuration (Uptime Kuma integration)
|
||||
|
||||
# Rate limiting for status page
|
||||
location /status {
|
||||
# Rate limiting
|
||||
limit_req zone=status burst=5 nodelay;
|
||||
|
||||
# Proxy to Uptime Kuma
|
||||
proxy_pass http://uptime_kuma_backend;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
|
||||
# WebSocket support
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
|
||||
# Timeouts
|
||||
proxy_connect_timeout 30s;
|
||||
proxy_send_timeout 30s;
|
||||
proxy_read_timeout 30s;
|
||||
|
||||
# Buffer settings
|
||||
proxy_buffering on;
|
||||
proxy_buffer_size 4k;
|
||||
proxy_buffers 8 4k;
|
||||
|
||||
# Security headers
|
||||
add_header X-Frame-Options "SAMEORIGIN" always;
|
||||
add_header X-Content-Type-Options "nosniff" always;
|
||||
}
|
||||
|
||||
# Nginx status stub (for monitoring)
|
||||
location /nginx_status {
|
||||
stub_status on;
|
||||
access_log off;
|
||||
allow 127.0.0.1;
|
||||
allow 172.16.0.0/12; # Docker networks
|
||||
allow 192.168.0.0/16; # Private networks
|
||||
deny all;
|
||||
}
|
||||
@@ -1,12 +1,6 @@
|
||||
# Uptime Kuma Nginx Configuration
|
||||
# Proxies requests to Uptime Kuma status page
|
||||
|
||||
# Upstream for Uptime Kuma
|
||||
upstream uptime_kuma_backend {
|
||||
server uptime-kuma:3001;
|
||||
keepalive 32;
|
||||
}
|
||||
|
||||
# Status page location
|
||||
location /status {
|
||||
# Rate limiting
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
user nginx;
|
||||
user www-data;
|
||||
worker_processes auto;
|
||||
error_log /var/log/nginx/error.log warn;
|
||||
pid /var/run/nginx.pid;
|
||||
@@ -65,26 +65,27 @@ http {
|
||||
|
||||
# Upstream configurations
|
||||
upstream grafana_backend {
|
||||
server grafana:3000;
|
||||
server localhost:3000;
|
||||
keepalive 32;
|
||||
}
|
||||
|
||||
upstream prometheus_backend {
|
||||
server prometheus:9090;
|
||||
server localhost:9090;
|
||||
keepalive 32;
|
||||
}
|
||||
|
||||
upstream uptime_kuma_backend {
|
||||
server uptime-kuma:3001;
|
||||
server localhost:3001;
|
||||
keepalive 32;
|
||||
}
|
||||
|
||||
upstream alertmanager_backend {
|
||||
server alertmanager:9093;
|
||||
server localhost:9093;
|
||||
keepalive 32;
|
||||
}
|
||||
|
||||
# Main server block
|
||||
# Redirect HTTP to HTTPS
|
||||
server {
|
||||
listen 80;
|
||||
server_name _;
|
||||
@@ -96,8 +97,8 @@ http {
|
||||
server_name _;
|
||||
|
||||
# SSL configuration (self-signed certificate)
|
||||
ssl_certificate /etc/letsencrypt/live/{{SERVER_IP}}/fullchain.pem;
|
||||
ssl_certificate_key /etc/letsencrypt/live/{{SERVER_IP}}/privkey.pem;еще
|
||||
ssl_certificate /etc/nginx/ssl/fullchain.pem;
|
||||
ssl_certificate_key /etc/nginx/ssl/privkey.pem;
|
||||
ssl_protocols TLSv1.2 TLSv1.3;
|
||||
ssl_ciphers ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA384;
|
||||
ssl_prefer_server_ciphers off;
|
||||
@@ -108,9 +109,10 @@ http {
|
||||
add_header X-Frame-Options "SAMEORIGIN" always;
|
||||
add_header X-Content-Type-Options "nosniff" always;
|
||||
|
||||
# Redirect root to Grafana
|
||||
# Root page - show simple status
|
||||
location = / {
|
||||
return 301 /grafana/;
|
||||
return 200 "Bot Infrastructure Status\n\nServices:\n- Grafana: /grafana/\n- Prometheus: /prometheus/\n- Uptime Kuma: /status/\n- Alertmanager: /alerts/\n";
|
||||
add_header Content-Type text/plain;
|
||||
}
|
||||
|
||||
# Health check endpoint
|
||||
|
||||
Reference in New Issue
Block a user