- Change user directive in Nginx configuration from 'nginx' to 'www-data'. - Update upstream server configurations in Nginx to use 'localhost' instead of service names. - Modify Nginx server block to redirect HTTP to a status page instead of Grafana. - Rename Alertmanager location from '/alertmanager/' to '/alerts/' for consistency. - Remove deprecated status page configuration and related files. - Adjust Prometheus configuration to reflect the new Docker network settings.
259 lines
7.6 KiB
YAML
259 lines
7.6 KiB
YAML
|
|
services:
|
|
# Prometheus Monitoring
|
|
prometheus:
|
|
image: prom/prometheus:latest
|
|
container_name: bots_prometheus
|
|
restart: unless-stopped
|
|
command:
|
|
- '--config.file=/etc/prometheus/prometheus.yml'
|
|
- '--storage.tsdb.path=/prometheus'
|
|
- '--web.console.libraries=/etc/prometheus/console_libraries'
|
|
- '--web.console.templates=/etc/prometheus/consoles'
|
|
- '--storage.tsdb.retention.time=${PROMETHEUS_RETENTION_DAYS:-30}d'
|
|
- '--web.enable-lifecycle'
|
|
- '--web.external-url=https://${SERVER_IP}/prometheus/'
|
|
ports:
|
|
- "9090:9090"
|
|
volumes:
|
|
- ./infra/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml:ro
|
|
- ./infra/prometheus/alert_rules.yml:/etc/prometheus/alert_rules.yml:ro
|
|
- prometheus_data:/prometheus
|
|
networks:
|
|
- bots_network
|
|
healthcheck:
|
|
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:9090/-/healthy"]
|
|
interval: 30s
|
|
timeout: 10s
|
|
retries: 3
|
|
|
|
# Grafana Dashboard
|
|
grafana:
|
|
image: grafana/grafana:latest
|
|
container_name: bots_grafana
|
|
restart: unless-stopped
|
|
environment:
|
|
- GF_SECURITY_ADMIN_USER=${GRAFANA_ADMIN_USER:-admin}
|
|
- GF_SECURITY_ADMIN_PASSWORD=${GRAFANA_ADMIN_PASSWORD:-admin}
|
|
- GF_USERS_ALLOW_SIGN_UP=false
|
|
- GF_INSTALL_PLUGINS=grafana-clock-panel,grafana-simple-json-datasource
|
|
- GF_SERVER_ROOT_URL=https://${SERVER_IP}/grafana/
|
|
- GF_SERVER_SERVE_FROM_SUB_PATH=true
|
|
ports:
|
|
- "3000:3000"
|
|
volumes:
|
|
- grafana_data:/var/lib/grafana
|
|
- ./infra/grafana/provisioning:/etc/grafana/provisioning:ro
|
|
networks:
|
|
- bots_network
|
|
depends_on:
|
|
- prometheus
|
|
healthcheck:
|
|
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:3000/api/health"]
|
|
interval: 30s
|
|
timeout: 10s
|
|
retries: 3
|
|
|
|
# Uptime Kuma Status Page
|
|
uptime-kuma:
|
|
image: louislam/uptime-kuma:latest
|
|
container_name: bots_uptime_kuma
|
|
restart: unless-stopped
|
|
volumes:
|
|
- uptime_kuma_data:/app/data
|
|
ports:
|
|
- "3001:3001"
|
|
environment:
|
|
- UPTIME_KUMA_PORT=3001
|
|
networks:
|
|
- bots_network
|
|
healthcheck:
|
|
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:3001"]
|
|
interval: 30s
|
|
timeout: 10s
|
|
retries: 3
|
|
start_period: 40s
|
|
|
|
# Alertmanager
|
|
alertmanager:
|
|
image: prom/alertmanager:latest
|
|
container_name: bots_alertmanager
|
|
restart: unless-stopped
|
|
command:
|
|
- '--config.file=/etc/alertmanager/alertmanager.yml'
|
|
- '--storage.path=/alertmanager'
|
|
- '--web.external-url=https://${SERVER_IP}/alertmanager/'
|
|
- '--web.route-prefix=/'
|
|
ports:
|
|
- "9093:9093"
|
|
volumes:
|
|
- alertmanager_data:/alertmanager
|
|
- ./infra/alertmanager/alertmanager.yml:/etc/alertmanager/alertmanager.yml:ro
|
|
networks:
|
|
- bots_network
|
|
depends_on:
|
|
- prometheus
|
|
healthcheck:
|
|
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:9093/-/healthy"]
|
|
interval: 30s
|
|
timeout: 10s
|
|
retries: 3
|
|
|
|
# Nginx Reverse Proxy
|
|
nginx:
|
|
image: nginx:alpine
|
|
container_name: bots_nginx
|
|
restart: unless-stopped
|
|
ports:
|
|
- "80:80"
|
|
- "443:443"
|
|
environment:
|
|
- SERVER_IP=${SERVER_IP}
|
|
volumes:
|
|
- ./infra/nginx/nginx.conf:/etc/nginx/nginx.conf:ro
|
|
- ./infra/nginx/conf.d:/etc/nginx/conf.d:ro
|
|
- ./infra/nginx/ssl:/etc/nginx/ssl:ro
|
|
- ./infra/nginx/.htpasswd:/etc/nginx/.htpasswd:ro
|
|
- /etc/letsencrypt:/etc/letsencrypt:ro
|
|
networks:
|
|
- bots_network
|
|
depends_on:
|
|
- grafana
|
|
- prometheus
|
|
- uptime-kuma
|
|
healthcheck:
|
|
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost/nginx-health"]
|
|
interval: 30s
|
|
timeout: 10s
|
|
retries: 3
|
|
|
|
# Telegram Helper Bot
|
|
telegram-bot:
|
|
build:
|
|
context: ./bots/telegram-helper-bot
|
|
dockerfile: Dockerfile
|
|
container_name: bots_telegram_bot
|
|
restart: unless-stopped
|
|
env_file:
|
|
- ./bots/telegram-helper-bot/.env
|
|
ports:
|
|
- "8080:8080"
|
|
environment:
|
|
- PYTHONPATH=/app
|
|
- DOCKER_CONTAINER=true
|
|
- LOG_LEVEL=${LOG_LEVEL:-INFO}
|
|
- LOG_RETENTION_DAYS=${LOG_RETENTION_DAYS:-30}
|
|
- METRICS_HOST=${METRICS_HOST:-0.0.0.0}
|
|
- METRICS_PORT=${METRICS_PORT:-8080}
|
|
# Telegram settings
|
|
- TELEGRAM_BOT_TOKEN=${BOT_TOKEN}
|
|
- TELEGRAM_LISTEN_BOT_TOKEN=${LISTEN_BOT_TOKEN}
|
|
- TELEGRAM_TEST_BOT_TOKEN=${TEST_BOT_TOKEN}
|
|
- TELEGRAM_PREVIEW_LINK=${PREVIEW_LINK:-false}
|
|
- TELEGRAM_MAIN_PUBLIC=${MAIN_PUBLIC}
|
|
- TELEGRAM_GROUP_FOR_POSTS=${GROUP_FOR_POSTS}
|
|
- TELEGRAM_GROUP_FOR_MESSAGE=${GROUP_FOR_MESSAGE}
|
|
- TELEGRAM_GROUP_FOR_LOGS=${GROUP_FOR_LOGS}
|
|
- TELEGRAM_IMPORTANT_LOGS=${IMPORTANT_LOGS}
|
|
- TELEGRAM_ARCHIVE=${ARCHIVE}
|
|
- TELEGRAM_TEST_GROUP=${TEST_GROUP}
|
|
# Bot settings
|
|
- SETTINGS_LOGS=${LOGS:-false}
|
|
- SETTINGS_TEST=${TEST:-false}
|
|
# Database
|
|
- DATABASE_PATH=${DATABASE_PATH:-database/tg-bot-database.db}
|
|
volumes:
|
|
- ./bots/telegram-helper-bot/database:/app/database:rw
|
|
- ./bots/telegram-helper-bot/logs:/app/logs:rw
|
|
- ./bots/telegram-helper-bot/voice_users:/app/voice_users:rw
|
|
- ./bots/telegram-helper-bot/.env:/app/.env:ro
|
|
networks:
|
|
- bots_network
|
|
depends_on:
|
|
- prometheus
|
|
healthcheck:
|
|
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:8080/health"]
|
|
interval: 30s
|
|
timeout: 10s
|
|
retries: 3
|
|
start_period: 40s
|
|
deploy:
|
|
resources:
|
|
limits:
|
|
memory: 256M
|
|
cpus: '0.5'
|
|
reservations:
|
|
memory: 128M
|
|
cpus: '0.25'
|
|
|
|
# AnonBot - Anonymous Q&A Bot
|
|
anon-bot:
|
|
build:
|
|
context: ./bots/AnonBot
|
|
dockerfile: Dockerfile
|
|
container_name: bots_anon_bot
|
|
restart: unless-stopped
|
|
env_file:
|
|
- ./bots/AnonBot/.env
|
|
ports:
|
|
- "8081:8081"
|
|
environment:
|
|
- PYTHONPATH=/app
|
|
- PYTHONUNBUFFERED=1
|
|
- DOCKER_CONTAINER=true
|
|
- LOG_LEVEL=${LOG_LEVEL:-INFO}
|
|
# AnonBot settings
|
|
- ANON_BOT_TOKEN=${BOT_TOKEN}
|
|
- ANON_BOT_ADMINS=${ADMINS}
|
|
- ANON_BOT_DATABASE_PATH=/app/database/anon_qna.db
|
|
- ANON_BOT_DEBUG=${DEBUG:-false}
|
|
- ANON_BOT_MAX_QUESTION_LENGTH=${MAX_QUESTION_LENGTH:-1000}
|
|
- ANON_BOT_MAX_ANSWER_LENGTH=${MAX_ANSWER_LENGTH:-2000}
|
|
# Rate limiting settings
|
|
- RATE_LIMIT_ENV=${RATE_LIMIT_ENV:-production}
|
|
- RATE_LIMIT_MESSAGES_PER_SECOND=${RATE_LIMIT_MESSAGES_PER_SECOND:-0.5}
|
|
- RATE_LIMIT_BURST_LIMIT=${RATE_LIMIT_BURST_LIMIT:-2}
|
|
- RATE_LIMIT_RETRY_MULTIPLIER=${RATE_LIMIT_RETRY_MULTIPLIER:-1.5}
|
|
- RATE_LIMIT_MAX_RETRY_DELAY=${RATE_LIMIT_MAX_RETRY_DELAY:-30.0}
|
|
- RATE_LIMIT_MAX_RETRIES=${RATE_LIMIT_MAX_RETRIES:-3}
|
|
volumes:
|
|
- ./bots/AnonBot/database:/app/database:rw
|
|
- ./bots/AnonBot/logs:/app/logs:rw
|
|
- ./bots/AnonBot/.env:/app/.env:ro
|
|
networks:
|
|
- bots_network
|
|
depends_on:
|
|
- prometheus
|
|
healthcheck:
|
|
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:8081/health"]
|
|
interval: 30s
|
|
timeout: 10s
|
|
retries: 3
|
|
start_period: 40s
|
|
deploy:
|
|
resources:
|
|
limits:
|
|
memory: 256M
|
|
cpus: '0.25'
|
|
reservations:
|
|
memory: 128M
|
|
cpus: '0.1'
|
|
|
|
volumes:
|
|
prometheus_data:
|
|
driver: local
|
|
grafana_data:
|
|
driver: local
|
|
uptime_kuma_data:
|
|
driver: local
|
|
alertmanager_data:
|
|
driver: local
|
|
|
|
networks:
|
|
bots_network:
|
|
driver: bridge
|
|
ipam:
|
|
config:
|
|
- subnet: 172.20.0.0/16
|
|
gateway: 172.20.0.1
|