refactor: update Nginx configuration and Docker setup
- Change user directive in Nginx configuration from 'nginx' to 'www-data'. - Update upstream server configurations in Nginx to use 'localhost' instead of service names. - Modify Nginx server block to redirect HTTP to a status page instead of Grafana. - Rename Alertmanager location from '/alertmanager/' to '/alerts/' for consistency. - Remove deprecated status page configuration and related files. - Adjust Prometheus configuration to reflect the new Docker network settings.
This commit is contained in:
@@ -94,92 +94,26 @@ receivers:
|
||||
- url: 'http://localhost:5001/'
|
||||
send_resolved: true
|
||||
|
||||
# Critical alerts - immediate notification via multiple channels
|
||||
# Critical alerts - immediate notification via webhook
|
||||
- name: 'critical-alerts'
|
||||
email_configs:
|
||||
- to: 'admin@{{DOMAIN}}'
|
||||
subject: '🚨 CRITICAL ALERT: {{ .GroupLabels.alertname }}'
|
||||
body: |
|
||||
{{ range .Alerts }}
|
||||
Alert: {{ .Annotations.summary }}
|
||||
Description: {{ .Annotations.description }}
|
||||
Severity: {{ .Labels.severity }}
|
||||
Service: {{ .Labels.service }}
|
||||
Instance: {{ .Labels.instance }}
|
||||
Time: {{ .StartsAt }}
|
||||
{{ end }}
|
||||
html: |
|
||||
<h2>🚨 Critical Alert</h2>
|
||||
<table>
|
||||
<tr><td><strong>Alert:</strong></td><td>{{ .GroupLabels.alertname }}</td></tr>
|
||||
<tr><td><strong>Service:</strong></td><td>{{ .GroupLabels.service }}</td></tr>
|
||||
<tr><td><strong>Time:</strong></td><td>{{ .GroupLabels.time }}</td></tr>
|
||||
</table>
|
||||
<h3>Alerts:</h3>
|
||||
<ul>
|
||||
{{ range .Alerts }}
|
||||
<li><strong>{{ .Annotations.summary }}</strong><br/>
|
||||
{{ .Annotations.description }}<br/>
|
||||
<small>Instance: {{ .Labels.instance }} | Time: {{ .StartsAt }}</small>
|
||||
</li>
|
||||
{{ end }}
|
||||
</ul>
|
||||
webhook_configs:
|
||||
- url: 'http://localhost:5001/critical'
|
||||
send_resolved: true
|
||||
|
||||
# Warning alerts - less urgent notification
|
||||
- name: 'warning-alerts'
|
||||
email_configs:
|
||||
- to: 'admin@{{DOMAIN}}'
|
||||
subject: '⚠️ WARNING: {{ .GroupLabels.alertname }}'
|
||||
body: |
|
||||
{{ range .Alerts }}
|
||||
Alert: {{ .Annotations.summary }}
|
||||
Description: {{ .Annotations.description }}
|
||||
Severity: {{ .Labels.severity }}
|
||||
Service: {{ .Labels.service }}
|
||||
Instance: {{ .Labels.instance }}
|
||||
Time: {{ .StartsAt }}
|
||||
{{ end }}
|
||||
webhook_configs:
|
||||
- url: 'http://localhost:5001/warning'
|
||||
send_resolved: true
|
||||
|
||||
# Bot-specific alerts
|
||||
- name: 'bot-alerts'
|
||||
email_configs:
|
||||
- to: 'bot-admin@{{DOMAIN}}'
|
||||
subject: '🤖 Bot Alert: {{ .GroupLabels.alertname }}'
|
||||
body: |
|
||||
Bot Alert: {{ .GroupLabels.alertname }}
|
||||
Service: {{ .GroupLabels.service }}
|
||||
|
||||
{{ range .Alerts }}
|
||||
- {{ .Annotations.summary }}
|
||||
{{ .Annotations.description }}
|
||||
Instance: {{ .Labels.instance }}
|
||||
Time: {{ .StartsAt }}
|
||||
{{ end }}
|
||||
webhook_configs:
|
||||
- url: 'http://localhost:5001/bot'
|
||||
send_resolved: true
|
||||
|
||||
# Infrastructure alerts
|
||||
- name: 'infrastructure-alerts'
|
||||
email_configs:
|
||||
- to: 'infra@{{DOMAIN}}'
|
||||
subject: '🏗️ Infrastructure Alert: {{ .GroupLabels.alertname }}'
|
||||
body: |
|
||||
Infrastructure Alert: {{ .GroupLabels.alertname }}
|
||||
Service: {{ .GroupLabels.service }}
|
||||
|
||||
{{ range .Alerts }}
|
||||
- {{ .Annotations.summary }}
|
||||
{{ .Annotations.description }}
|
||||
Instance: {{ .Labels.instance }}
|
||||
Time: {{ .StartsAt }}
|
||||
{{ end }}
|
||||
webhook_configs:
|
||||
- url: 'http://localhost:5001/infrastructure'
|
||||
send_resolved: true
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -2,12 +2,12 @@
|
||||
# Proxies requests to Alertmanager
|
||||
|
||||
# Alertmanager location
|
||||
location /alertmanager/ {
|
||||
location /alerts/ {
|
||||
# Rate limiting
|
||||
limit_req zone=api burst=10 nodelay;
|
||||
|
||||
# Remove trailing slash for proxy
|
||||
rewrite ^/alertmanager/(.*)$ /$1 break;
|
||||
rewrite ^/alerts/(.*)$ /$1 break;
|
||||
|
||||
# Proxy to Alertmanager
|
||||
proxy_pass http://alertmanager_backend;
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Grafana proxy configuration
|
||||
location /grafana/ {
|
||||
proxy_pass http://grafana_backend/;
|
||||
proxy_pass http://grafana_backend;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
|
||||
@@ -1,43 +0,0 @@
|
||||
# Status page configuration (Uptime Kuma integration)
|
||||
|
||||
# Rate limiting for status page
|
||||
location /status {
|
||||
# Rate limiting
|
||||
limit_req zone=status burst=5 nodelay;
|
||||
|
||||
# Proxy to Uptime Kuma
|
||||
proxy_pass http://uptime_kuma_backend;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
|
||||
# WebSocket support
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
|
||||
# Timeouts
|
||||
proxy_connect_timeout 30s;
|
||||
proxy_send_timeout 30s;
|
||||
proxy_read_timeout 30s;
|
||||
|
||||
# Buffer settings
|
||||
proxy_buffering on;
|
||||
proxy_buffer_size 4k;
|
||||
proxy_buffers 8 4k;
|
||||
|
||||
# Security headers
|
||||
add_header X-Frame-Options "SAMEORIGIN" always;
|
||||
add_header X-Content-Type-Options "nosniff" always;
|
||||
}
|
||||
|
||||
# Nginx status stub (for monitoring)
|
||||
location /nginx_status {
|
||||
stub_status on;
|
||||
access_log off;
|
||||
allow 127.0.0.1;
|
||||
allow 172.16.0.0/12; # Docker networks
|
||||
allow 192.168.0.0/16; # Private networks
|
||||
deny all;
|
||||
}
|
||||
@@ -1,12 +1,6 @@
|
||||
# Uptime Kuma Nginx Configuration
|
||||
# Proxies requests to Uptime Kuma status page
|
||||
|
||||
# Upstream for Uptime Kuma
|
||||
upstream uptime_kuma_backend {
|
||||
server uptime-kuma:3001;
|
||||
keepalive 32;
|
||||
}
|
||||
|
||||
# Status page location
|
||||
location /status {
|
||||
# Rate limiting
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
user nginx;
|
||||
user www-data;
|
||||
worker_processes auto;
|
||||
error_log /var/log/nginx/error.log warn;
|
||||
pid /var/run/nginx.pid;
|
||||
@@ -65,26 +65,27 @@ http {
|
||||
|
||||
# Upstream configurations
|
||||
upstream grafana_backend {
|
||||
server grafana:3000;
|
||||
server localhost:3000;
|
||||
keepalive 32;
|
||||
}
|
||||
|
||||
upstream prometheus_backend {
|
||||
server prometheus:9090;
|
||||
server localhost:9090;
|
||||
keepalive 32;
|
||||
}
|
||||
|
||||
upstream uptime_kuma_backend {
|
||||
server uptime-kuma:3001;
|
||||
server localhost:3001;
|
||||
keepalive 32;
|
||||
}
|
||||
|
||||
upstream alertmanager_backend {
|
||||
server alertmanager:9093;
|
||||
server localhost:9093;
|
||||
keepalive 32;
|
||||
}
|
||||
|
||||
# Main server block
|
||||
# Redirect HTTP to HTTPS
|
||||
server {
|
||||
listen 80;
|
||||
server_name _;
|
||||
@@ -96,8 +97,8 @@ http {
|
||||
server_name _;
|
||||
|
||||
# SSL configuration (self-signed certificate)
|
||||
ssl_certificate /etc/letsencrypt/live/{{SERVER_IP}}/fullchain.pem;
|
||||
ssl_certificate_key /etc/letsencrypt/live/{{SERVER_IP}}/privkey.pem;еще
|
||||
ssl_certificate /etc/nginx/ssl/fullchain.pem;
|
||||
ssl_certificate_key /etc/nginx/ssl/privkey.pem;
|
||||
ssl_protocols TLSv1.2 TLSv1.3;
|
||||
ssl_ciphers ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA384;
|
||||
ssl_prefer_server_ciphers off;
|
||||
@@ -108,9 +109,10 @@ http {
|
||||
add_header X-Frame-Options "SAMEORIGIN" always;
|
||||
add_header X-Content-Type-Options "nosniff" always;
|
||||
|
||||
# Redirect root to Grafana
|
||||
# Root page - show simple status
|
||||
location = / {
|
||||
return 301 /grafana/;
|
||||
return 200 "Bot Infrastructure Status\n\nServices:\n- Grafana: /grafana/\n- Prometheus: /prometheus/\n- Uptime Kuma: /status/\n- Alertmanager: /alerts/\n";
|
||||
add_header Content-Type text/plain;
|
||||
}
|
||||
|
||||
# Health check endpoint
|
||||
|
||||
@@ -13,7 +13,7 @@ scrape_configs:
|
||||
# Job для мониторинга Node Exporter
|
||||
- job_name: 'node'
|
||||
static_configs:
|
||||
- targets: ['172.17.0.1:9100'] # Специальное имя для доступа к хосту
|
||||
- targets: ['172.20.0.1:9100'] # IP хоста в Docker сети bots_network
|
||||
labels:
|
||||
instance: 'main-server'
|
||||
|
||||
|
||||
Reference in New Issue
Block a user