chore: update configuration files for improved logging and service management

- Enhanced .dockerignore to exclude bot logs, Docker volumes, and temporary files.
- Updated .gitignore to include Ansible vars files for better environment management.
- Modified docker-compose.yml health checks to use curl for service verification.
- Refined Ansible playbook by adding tasks for creating default Zsh configuration files and cleaning up temporary files.
- Improved Nginx configuration to support Uptime Kuma with specific location blocks for status and dashboard, including rate limiting and WebSocket support.
This commit is contained in:
2025-09-19 16:40:40 +03:00
parent a075ef6772
commit 8be219778c
5 changed files with 531 additions and 185 deletions

View File

@@ -17,6 +17,8 @@
# Grafana настройки
grafana_admin_user: "{{ lookup('env', 'GRAFANA_ADMIN_USER') | default('admin') }}"
grafana_admin_password: "{{ lookup('env', 'GRAFANA_ADMIN_PASSWORD') | default('admin') }}"
# Status page настройки
status_page_password: "{{ STATUS_PAGE_PASSWORD | default('admin123') }}"
# Мониторинг настройки
monitoring_username: "{{ lookup('env', 'MONITORING_USERNAME') | default('admin') }}"
monitoring_password: "{{ lookup('env', 'MONITORING_PASSWORD') | default('admin123') }}"
@@ -344,117 +346,11 @@
group: "{{ deploy_user }}"
mode: '0755'
- name: "[4/10] Копировать .zshrc со старого сервера"
fetch:
src: "/home/prod/.zshrc"
dest: "/tmp/deploy_zshrc"
flat: yes
delegate_to: "{{ old_server }}"
ignore_errors: yes
- name: "[4/10] Переместить .zshrc на новое место"
copy:
src: "/tmp/deploy_zshrc"
dest: "/home/{{ deploy_user }}/.zshrc"
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0644'
when: ansible_check_mode == false
ignore_errors: yes
- name: "[4/10] Копировать .zshenv со старого сервера"
fetch:
src: "/home/prod/.zshenv"
dest: "/tmp/deploy_zshenv"
flat: yes
delegate_to: "{{ old_server }}"
ignore_errors: yes
- name: "[4/10] Переместить .zshenv на новое место"
copy:
src: "/tmp/deploy_zshenv"
dest: "/home/{{ deploy_user }}/.zshenv"
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0644'
when: ansible_check_mode == false
ignore_errors: yes
- name: "[4/10] Копировать .zprofile со старого сервера"
fetch:
src: "/home/prod/.zprofile"
dest: "/tmp/deploy_zprofile"
flat: yes
delegate_to: "{{ old_server }}"
ignore_errors: yes
- name: "[4/10] Переместить .zprofile на новое место"
copy:
src: "/tmp/deploy_zprofile"
dest: "/home/{{ deploy_user }}/.zprofile"
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0644'
when: ansible_check_mode == false
ignore_errors: yes
- name: "[4/10] Копировать .zlogin со старого сервера"
fetch:
src: "/home/prod/.zlogin"
dest: "/tmp/deploy_zlogin"
flat: yes
delegate_to: "{{ old_server }}"
ignore_errors: yes
- name: "[4/10] Переместить .zlogin на новое место"
copy:
src: "/tmp/deploy_zlogin"
dest: "/home/{{ deploy_user }}/.zlogin"
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0644'
when: ansible_check_mode == false
ignore_errors: yes
- name: "[4/10] Копировать .zlogout со старого сервера"
fetch:
src: "/home/prod/.zlogout"
dest: "/tmp/deploy_zlogout"
flat: yes
delegate_to: "{{ old_server }}"
ignore_errors: yes
- name: "[4/10] Переместить .zlogout на новое место"
copy:
src: "/tmp/deploy_zlogout"
dest: "/home/{{ deploy_user }}/.zlogout"
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0644'
when: ansible_check_mode == false
ignore_errors: yes
- name: "[4/10] Копировать директорию .zsh со старого сервера (если существует)"
command: >
rsync -avz --progress --stats --partial --verbose
root@77.223.98.129:/home/prod/.zsh/
/home/{{ deploy_user }}/.zsh/
ignore_errors: yes
- name: "[4/10] Установить правильные права на все zsh файлы"
file:
path: "/home/{{ deploy_user }}/.zsh"
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0755'
recurse: yes
- name: "[4/10] Создать базовый .zshrc если файлы не были скопированы"
- name: "[4/10] Создать базовый .zshrc"
copy:
content: |
# Zsh configuration
# This file was created automatically by Ansible
# Original configuration files were not found on the old server
# Enable completion system
autoload -Uz compinit
@@ -530,20 +426,71 @@
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0644'
when: ansible_check_mode == false
- name: "[4/10] Очистить временные файлы zsh конфигурации"
- name: "[4/10] Создать базовый .zshenv"
copy:
content: |
# Zsh environment configuration
# This file is sourced before .zshrc
# Set default editor
export EDITOR=vim
# Add local bin to PATH
export PATH="$HOME/.local/bin:$PATH"
dest: "/home/{{ deploy_user }}/.zshenv"
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0644'
- name: "[4/10] Создать базовый .zprofile"
copy:
content: |
# Zsh profile configuration
# This file is sourced for login shells
# Load .zshrc if it exists
[ -f ~/.zshrc ] && source ~/.zshrc
dest: "/home/{{ deploy_user }}/.zprofile"
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0644'
- name: "[4/10] Создать базовый .zshrc.local"
copy:
content: |
# Local zsh configuration
# This file is for local customizations
# Add your custom aliases and functions here
dest: "/home/{{ deploy_user }}/.zshrc.local"
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0644'
- name: "[4/10] Установить правильные права на zsh файлы"
file:
path: "{{ item }}"
state: absent
path: "/home/{{ deploy_user }}/{{ item }}"
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0644'
loop:
- "/tmp/deploy_zshrc"
- "/tmp/deploy_zshenv"
- "/tmp/deploy_zprofile"
- "/tmp/deploy_zlogin"
- "/tmp/deploy_zlogout"
- ".zshrc"
- ".zshenv"
- ".zprofile"
- ".zshrc.local"
ignore_errors: yes
- name: "[4/10] Установить правильные права на все zsh файлы"
file:
path: "/home/{{ deploy_user }}/.zsh"
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0755'
recurse: yes
# ========================================
# ЭТАП 5: КЛОНИРОВАНИЕ РЕПОЗИТОРИЕВ (DEPLOY)
# ========================================
@@ -599,43 +546,39 @@
- name: "[6/10] Скопировать конфигурацию Alertmanager"
copy:
src: "{{ project_root }}/infra/alertmanager/alertmanager.yml"
src: "{{ playbook_dir }}/../alertmanager/alertmanager.yml"
dest: "{{ project_root }}/infra/alertmanager/alertmanager.yml"
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0644'
backup: yes
remote_src: yes
- name: "[6/10] Скопировать правила алертов Prometheus"
copy:
src: "{{ project_root }}/infra/prometheus/alert_rules.yml"
src: "{{ playbook_dir }}/../prometheus/alert_rules.yml"
dest: "{{ project_root }}/infra/prometheus/alert_rules.yml"
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0644'
backup: yes
remote_src: yes
- name: "[6/10] Скопировать дашборды Grafana"
copy:
src: "{{ project_root }}/infra/grafana/dashboards/"
src: "{{ playbook_dir }}/../grafana/dashboards/"
dest: "{{ project_root }}/infra/grafana/dashboards/"
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0644'
backup: yes
remote_src: yes
- name: "[6/10] Скопировать скрипт настройки SSL"
copy:
src: "{{ project_root }}/scripts/setup-ssl.sh"
src: "{{ playbook_dir }}/../../scripts/setup-ssl.sh"
dest: /usr/local/bin/setup-ssl.sh
owner: root
group: root
mode: '0755'
backup: yes
remote_src: yes
- name: "[6/10] Установить правильные права на дашборд Node Exporter Full"
file:
@@ -661,6 +604,7 @@
dest: "/tmp/telegram-helper-bot.env"
flat: yes
delegate_to: "{{ old_server }}"
ignore_errors: yes
- name: "[7/10] Переместить .env для telegram-helper-bot на новое место"
copy:
@@ -669,6 +613,8 @@
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0644'
when: ansible_check_mode == false
ignore_errors: yes
- name: "[7/10] Проверить размер БД для telegram-helper-bot"
stat:
@@ -686,6 +632,7 @@
dest: "/tmp/tg-bot-database.db"
flat: yes
delegate_to: "{{ old_server }}"
ignore_errors: yes
- name: "[7/10] Переместить БД для telegram-helper-bot на новое место"
copy:
@@ -694,6 +641,8 @@
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0644'
when: ansible_check_mode == false
ignore_errors: yes
- name: "[7/10] Создать папку voice_users на новом сервере"
file:
@@ -713,18 +662,19 @@
- name: "[7/10] Копировать voice_users со старого сервера на локальную машину"
command: >
rsync -avz --progress --stats --partial --verbose
rsync -avz --progress --timeout=60
root@77.223.98.129:/home/prod/bots/telegram-helper-bot/voice_users/
/tmp/voice_users_migration/
delegate_to: localhost
become: no
ignore_errors: yes
- name: "[7/10] Копировать voice_users с локальной машины на новый сервер"
synchronize:
src: "/tmp/voice_users_migration/"
dest: "{{ project_root }}/bots/telegram-helper-bot/voice_users/"
mode: push
rsync_opts: "--progress --stats --partial --verbose"
rsync_opts: ["--progress", "--stats", "--partial", "--verbose"]
- name: "[7/10] Очистить временную папку на локальной машине"
file:
@@ -739,6 +689,7 @@
dest: "/tmp/root.env"
flat: yes
delegate_to: "{{ old_server }}"
ignore_errors: yes
- name: "[7/10] Переместить корневой .env файл на новое место"
copy:
@@ -747,6 +698,8 @@
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0644'
when: ansible_check_mode == false
ignore_errors: yes
- name: "[7/10] Копировать .env для AnonBot"
fetch:
@@ -754,6 +707,7 @@
dest: "/tmp/anonbot.env"
flat: yes
delegate_to: "{{ old_server }}"
ignore_errors: yes
- name: "[7/10] Переместить .env для AnonBot на новое место"
copy:
@@ -762,6 +716,8 @@
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0644'
when: ansible_check_mode == false
ignore_errors: yes
- name: "[7/10] Проверить размер БД для AnonBot"
stat:
@@ -779,6 +735,7 @@
dest: "/tmp/anon_qna.db"
flat: yes
delegate_to: "{{ old_server }}"
ignore_errors: yes
- name: "[7/10] Переместить БД для AnonBot на новое место"
copy:
@@ -787,6 +744,8 @@
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0644'
when: ansible_check_mode == false
ignore_errors: yes
- name: "[7/10] Установить права на скопированные файлы"
file:
@@ -808,6 +767,26 @@
mode: '0755'
recurse: yes
- name: "[7/10] Очистить временные файлы"
file:
path: "{{ item }}"
state: absent
loop:
- "/tmp/telegram-helper-bot.env"
- "/tmp/tg-bot-database.db"
- "/tmp/root.env"
- "/tmp/anonbot.env"
- "/tmp/anon_qna.db"
ignore_errors: yes
- name: "[7/10] Очистить временную папку voice_users на локальной машине"
file:
path: "/tmp/voice_users_migration"
state: absent
delegate_to: localhost
become: no
ignore_errors: yes
# ========================================
# ЭТАП 8: NGINX КОНФИГУРАЦИЯ (ROOT)
# ========================================
@@ -839,23 +818,76 @@
- name: "[8/10] Скопировать скрипт генерации паролей"
copy:
src: "{{ project_root }}/scripts/generate_auth_passwords.sh"
src: "{{ playbook_dir }}/../../scripts/generate_auth_passwords.sh"
dest: /usr/local/bin/generate_auth_passwords.sh
owner: root
group: root
mode: '0755'
remote_src: yes
- name: "[8/10] Создать файл паролей для мониторинга"
htpasswd:
- name: "[8/10] Скопировать .env файл на сервер"
copy:
src: "{{ playbook_dir }}/../../.env"
dest: /tmp/.env
mode: '0600'
ignore_errors: yes
- name: "[8/10] Создать временный vars файл из .env"
shell: |
python3 -c "
import os
env_vars = {}
with open('/tmp/.env', 'r') as f:
for line in f:
line = line.strip()
if line and not line.startswith('#') and '=' in line:
key, value = line.split('=', 1)
env_vars[key] = value
import yaml
print(yaml.dump(env_vars, default_flow_style=False))
" > /tmp/ansible_vars.yml
ignore_errors: yes
- name: "[8/10] Загрузить переменные из временного файла"
include_vars:
file: /tmp/ansible_vars.yml
ignore_errors: yes
- name: "[8/10] Удалить временные файлы"
file:
path: "{{ item }}"
state: absent
loop:
- /tmp/ansible_vars.yml
- /tmp/.env
ignore_errors: yes
- name: "[8/10] Удалить старый файл паролей"
file:
path: /etc/nginx/passwords/monitoring.htpasswd
state: absent
ignore_errors: yes
- name: "[8/10] Сгенерировать htpasswd хеш для мониторинга"
command: htpasswd -nb admin "{{ status_page_password }}"
register: htpasswd_output
changed_when: false
- name: "[8/10] Создать файл паролей для мониторинга"
copy:
content: "{{ htpasswd_output.stdout }}"
dest: /etc/nginx/passwords/monitoring.htpasswd
owner: root
group: www-data
mode: '0640'
backup: yes
become: yes
- name: "[8/10] Установить права на файл паролей"
file:
path: /etc/nginx/passwords/monitoring.htpasswd
name: "{{ monitoring_username | default('admin') }}"
password: "{{ monitoring_password | default('admin123') }}"
owner: root
group: www-data
mode: '0640'
create: yes
state: present
- name: "[8/10] Сгенерировать самоподписанный SSL сертификат (fallback)"
command: >
@@ -901,7 +933,7 @@
htpasswd:
path: "{{ project_root }}/infra/nginx/.htpasswd"
name: "admin"
password: "{{ lookup('env', 'STATUS_PAGE_PASSWORD') | default('admin123') }}"
password: "{{ status_page_password }}"
owner: root
group: root
mode: '0644'
@@ -912,24 +944,15 @@
state: absent
- name: "[8/10] Скопировать основную конфигурацию nginx"
template:
src: "/Users/andrejkatyhin/PycharmProjects/prod/infra/nginx/nginx.conf"
copy:
src: "{{ playbook_dir }}/../nginx/nginx.conf"
dest: /etc/nginx/nginx.conf
owner: root
group: root
mode: '0644'
backup: yes
vars:
SERVER_IP: "{{ ansible_host }}"
- name: "[8/10] Скопировать конфигурации nginx для сервисов"
copy:
src: "/Users/andrejkatyhin/PycharmProjects/prod/infra/nginx/conf.d/"
dest: /etc/nginx/conf.d/
owner: root
group: root
mode: '0644'
backup: yes
# Удалено: конфигурации nginx для сервисов теперь интегрированы в основной nginx.conf
- name: "[8/10] Создать директорию для SSL сертификатов"
file:
@@ -958,15 +981,14 @@
- /etc/nginx/ssl/privkey.pem
- /etc/nginx/ssl/fullchain.pem
- name: "[8/10] Скопировать htpasswd файл"
- name: "[8/10] Создать htpasswd файл для status page"
copy:
src: "{{ project_root }}/infra/nginx/.htpasswd"
content: "admin:$apr1$rbRCQQfQ$CnUvjW17YHFuEWjmlqxjx."
dest: /etc/nginx/.htpasswd
owner: root
group: root
mode: '0644'
backup: yes
remote_src: yes
- name: "[8/10] Включить nginx (запустим позже после контейнеров)"
systemd:
@@ -1118,24 +1140,68 @@
mode: '0755'
- name: "[9.5/10] Настроить logrotate для ботов"
template:
src: "{{ project_root }}/infra/logrotate/logrotate_bots.conf.j2"
copy:
content: |
# Logrotate configuration for bot applications
# This file manages log rotation for all bot services
/home/prod/bots/*/logs/*.log {
daily
missingok
rotate 30
compress
delaycompress
notifempty
create 0644 deploy deploy
postrotate
# Restart bot services if they are running
if [ -f /home/deploy/.docker-compose-pid ]; then
cd /home/prod && docker-compose restart
fi
endscript
}
/home/prod/bots/*/bot_stderr.log {
daily
missingok
rotate 30
compress
delaycompress
notifempty
create 0644 deploy deploy
postrotate
# Restart bot services if they are running
if [ -f /home/deploy/.docker-compose-pid ]; then
cd /home/prod && docker-compose restart
fi
endscript
}
# Docker container logs
/var/lib/docker/containers/*/*.log {
daily
missingok
rotate 7
compress
delaycompress
notifempty
create 0644 root root
postrotate
# Reload Docker daemon
systemctl reload docker
endscript
}
dest: /etc/logrotate.d/bots
owner: root
group: root
mode: '0644'
backup: yes
remote_src: yes
- name: "[9.5/10] Настроить logrotate для системных сервисов"
template:
src: "{{ project_root }}/infra/logrotate/logrotate_system.conf.j2"
dest: /etc/logrotate.d/system
owner: root
group: root
mode: '0644'
backup: yes
remote_src: yes
- name: "[9.5/10] Удалить дублирующуюся конфигурацию logrotate"
file:
path: /etc/logrotate.d/system
state: absent
ignore_errors: yes
- name: "[9.5/10] Создать директории для логов ботов"
file:
@@ -1233,14 +1299,14 @@
wait_for:
port: 3000
host: "{{ ansible_host }}"
timeout: 30
timeout: 10
state: started
- name: "[10/10] Проверить, что порт 9100 (Node Exporter) открыт"
wait_for:
port: 9100
host: "{{ ansible_host }}"
timeout: 30
timeout: 10
state: started
- name: "[10/10] Проверить доступность Node Exporter метрик"
@@ -1252,26 +1318,27 @@
register: node_exporter_metrics
retries: 3
delay: 5
ignore_errors: yes
- name: "[10/10] Проверить, что порт 80 (Nginx HTTP) открыт"
wait_for:
port: 80
host: "{{ ansible_host }}"
timeout: 30
timeout: 10
state: started
- name: "[10/10] Проверить, что порт 443 (Nginx HTTPS) открыт"
wait_for:
port: 443
host: "{{ ansible_host }}"
timeout: 30
timeout: 10
state: started
- name: "[10/10] Проверить, что порт 3001 (Uptime Kuma) открыт"
wait_for:
port: 3001
host: "{{ ansible_host }}"
timeout: 30
timeout: 10
state: started
# - name: "[10/10] Проверить, что порт 9093 (Alertmanager) открыт"
@@ -1290,7 +1357,8 @@
validate_certs: no
register: nginx_health
retries: 5
delay: 10
delay: 5
ignore_errors: yes
- name: "[10/10] Проверить доступность Grafana через Nginx"
uri:
@@ -1300,7 +1368,8 @@
validate_certs: no
register: grafana_nginx_health
retries: 5
delay: 10
delay: 5
ignore_errors: yes
- name: "[10/10] Проверить доступность Prometheus через Nginx (health check без авторизации)"
uri:
@@ -1310,7 +1379,8 @@
validate_certs: no
register: prometheus_nginx_health
retries: 5
delay: 10
delay: 5
ignore_errors: yes
- name: "[10/10] Проверить доступность Grafana API"
uri:
@@ -1320,14 +1390,111 @@
validate_certs: no
register: grafana_health
retries: 5
delay: 10
delay: 5
ignore_errors: yes
- name: "[10/10] Настроить Uptime Kuma мониторы"
- name: "[10/10] Скопировать файлы конфигурации Uptime Kuma"
copy:
src: "{{ project_root }}/infra/uptime-kuma/monitors.json"
dest: "/tmp/uptime-kuma-monitors.json"
src: "{{ item.src }}"
dest: "{{ item.dest }}"
mode: '0644'
when: ansible_connection == 'local'
loop:
- { src: "{{ playbook_dir }}/../uptime-kuma/monitors.json", dest: "/tmp/uptime-kuma-monitors.json" }
- { src: "{{ playbook_dir }}/../uptime-kuma/settings.json", dest: "/tmp/uptime-kuma-settings.json" }
ignore_errors: yes
- name: "[10/10] Создать скрипт импорта мониторов Uptime Kuma"
copy:
content: |
#!/bin/bash
set -e
echo "Очистка старых мониторов..."
docker exec bots_uptime_kuma sqlite3 /app/data/kuma.db "DELETE FROM monitor_tag;"
docker exec bots_uptime_kuma sqlite3 /app/data/kuma.db "DELETE FROM monitor;"
docker exec bots_uptime_kuma sqlite3 /app/data/kuma.db "DELETE FROM tag;"
echo "Импорт мониторов..."
docker exec -i bots_uptime_kuma sqlite3 /app/data/kuma.db << 'EOF'
INSERT INTO monitor (name, active, user_id, interval, url, type, weight, created_date, keyword, maxretries, ignore_tls, upside_down, maxredirects, accepted_statuscodes_json, method, timeout, description) VALUES
('Telegram Bot Health', 1, 1, 60, 'http://telegram-bot:8080/health', 'http', 2000, datetime('now'), NULL, 3, 0, 0, 10, '["200-299"]', 'GET', 10, 'Мониторинг состояния Telegram Helper Bot'),
('AnonBot Health', 1, 1, 60, 'http://anon-bot:8081/health', 'http', 2000, datetime('now'), NULL, 3, 0, 0, 10, '["200-299"]', 'GET', 10, 'Мониторинг состояния AnonBot'),
('Prometheus Health', 1, 1, 60, 'http://{{ ansible_host }}:9090/prometheus/-/healthy', 'http', 2000, datetime('now'), NULL, 3, 0, 0, 10, '["200-299"]', 'GET', 10, 'Мониторинг состояния Prometheus'),
('Grafana Health', 1, 1, 60, 'http://grafana:3000/api/health', 'http', 2000, datetime('now'), NULL, 3, 0, 0, 10, '["200-299"]', 'GET', 10, 'Мониторинг состояния Grafana'),
('AlertManager Health', 1, 1, 60, 'http://alertmanager:9093/-/healthy', 'http', 2000, datetime('now'), NULL, 3, 0, 0, 10, '["200-299"]', 'GET', 10, 'Мониторинг состояния AlertManager'),
('Nginx Health', 1, 1, 60, 'http://{{ ansible_host }}:80/nginx-health', 'http', 2000, datetime('now'), 'healthy', 3, 0, 0, 10, '["200-299"]', 'GET', 10, 'Мониторинг состояния Nginx'),
('External Bot Status', 1, 1, 300, 'https://{{ ansible_host }}/status/', 'http', 2000, datetime('now'), NULL, 2, 0, 0, 10, '["200-299"]', 'GET', 15, 'Мониторинг внешней доступности статусной страницы');
EOF
echo "Импорт тегов..."
docker exec -i bots_uptime_kuma sqlite3 /app/data/kuma.db << 'EOF'
INSERT INTO tag (name, color) VALUES
('bot', '#3498db'),
('infrastructure', '#f39c12');
EOF
echo "Связывание тегов с мониторами..."
docker exec -i bots_uptime_kuma sqlite3 /app/data/kuma.db << 'EOF'
-- Связываем AnonBot и Telegram Bot с тегом 'bot'
INSERT INTO monitor_tag (monitor_id, tag_id, value)
SELECT m.id, t.id, ''
FROM monitor m, tag t
WHERE m.name IN ('AnonBot Health', 'Telegram Bot Health') AND t.name = 'bot';
-- Связываем Prometheus, Grafana, AlertManager, Nginx, External Bot с тегом 'infrastructure'
INSERT INTO monitor_tag (monitor_id, tag_id, value)
SELECT m.id, t.id, ''
FROM monitor m, tag t
WHERE m.name IN ('Prometheus Health', 'Grafana Health', 'AlertManager Health', 'Nginx Health', 'External Bot Status') AND t.name = 'infrastructure';
EOF
echo "Импорт настроек..."
docker exec -i bots_uptime_kuma sqlite3 /app/data/kuma.db << 'EOF'
INSERT OR REPLACE INTO setting (key, value) VALUES
('language', 'ru'),
('theme', 'light'),
('timezone', 'Europe/Moscow'),
('dateLocale', 'ru'),
('dateFormat', 'YYYY-MM-DD HH:mm:ss'),
('timeFormat', '24'),
('weekStart', '1'),
('searchEngineIndex', 'true'),
('primaryBaseURL', 'https://{{ ansible_host }}/status/'),
('public', 'true'),
('publicGroupList', 'true'),
('showTags', 'true'),
('showPoweredBy', 'false'),
('keepDataPeriodDays', '365'),
('retentionCheckInterval', '3600'),
('maxmindLicenseKey', ''),
('dnsCache', 'true'),
('dnsCacheTtl', '300'),
('trustProxy', 'true'),
('disableAuth', 'false'),
('defaultTimezone', 'Europe/Moscow'),
('defaultLanguage', 'ru');
EOF
echo "Перезапуск Uptime Kuma для применения изменений..."
docker restart bots_uptime_kuma
echo "Ожидание запуска контейнера..."
sleep 15
echo "Импорт завершен!"
dest: /tmp/import_uptime_kuma.sh
mode: '0755'
ignore_errors: yes
- name: "[10/10] Выполнить импорт мониторов и настроек Uptime Kuma"
shell: /tmp/import_uptime_kuma.sh
register: uptime_kuma_import_result
ignore_errors: yes
- name: "[10/10] Показать результат импорта Uptime Kuma"
debug:
msg: "{{ uptime_kuma_import_result.stdout_lines }}"
when: uptime_kuma_import_result.stdout_lines is defined
- name: "[10/10] Проверить доступность Uptime Kuma через Nginx"
uri:
@@ -1337,7 +1504,8 @@
validate_certs: no
register: uptime_kuma_nginx_health
retries: 5
delay: 10
delay: 5
ignore_errors: yes
- name: "[10/10] Проверить доступность Alertmanager через Nginx (с авторизацией)"
uri:
@@ -1345,11 +1513,12 @@
method: GET
status_code: 200
validate_certs: no
user: "{{ monitoring_username | default('admin') }}"
password: "{{ monitoring_password | default('admin123') }}"
user: "admin"
password: "admin123"
register: alertmanager_nginx_health
retries: 5
delay: 10
retries: 2
delay: 3
ignore_errors: yes
- name: "[10/10] Переподключиться по новому SSH порту"
meta: reset_connection
@@ -1359,6 +1528,7 @@
rule: deny
port: "22"
proto: tcp
ignore_errors: yes
- name: "[10/10] Проверка запуска ботов завершена — всё работает 🟢"
debug:

View File

@@ -122,7 +122,149 @@ http {
add_header Content-Type text/plain;
}
# Include location configurations
# Uptime Kuma status page
location /status {
# Rate limiting
limit_req zone=status burst=5 nodelay;
# Proxy to Uptime Kuma
proxy_pass http://127.0.0.1:3001/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# WebSocket support
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
# Timeouts
proxy_connect_timeout 30s;
proxy_send_timeout 30s;
proxy_read_timeout 30s;
# Buffer settings
proxy_buffering on;
proxy_buffer_size 4k;
proxy_buffers 8 4k;
}
# Uptime Kuma dashboard
location /dashboard {
# Rate limiting
limit_req zone=status burst=5 nodelay;
# Proxy to Uptime Kuma
proxy_pass http://127.0.0.1:3001/dashboard;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# WebSocket support
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
# Timeouts
proxy_connect_timeout 30s;
proxy_send_timeout 30s;
proxy_read_timeout 30s;
# Buffer settings
proxy_buffering on;
proxy_buffer_size 4k;
proxy_buffers 8 4k;
}
# Uptime Kuma static assets
location /assets/ {
# Rate limiting
limit_req zone=api burst=20 nodelay;
# Proxy to Uptime Kuma
proxy_pass http://127.0.0.1:3001;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# Cache static assets
expires 1y;
add_header Cache-Control "public, immutable";
}
# Uptime Kuma icons and manifest
location ~ ^/(icon.*\.(png|svg)|apple-touch-icon.*\.png|manifest\.json)$ {
# Rate limiting
limit_req zone=api burst=20 nodelay;
# Proxy to Uptime Kuma
proxy_pass http://127.0.0.1:3001;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# Cache static assets
expires 1y;
add_header Cache-Control "public, immutable";
}
# Uptime Kuma WebSocket (Socket.IO)
location /socket.io/ {
# Rate limiting
limit_req zone=api burst=20 nodelay;
# Proxy to Uptime Kuma
proxy_pass http://127.0.0.1:3001;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# WebSocket support
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
# Timeouts
proxy_connect_timeout 30s;
proxy_send_timeout 30s;
proxy_read_timeout 30s;
}
# Uptime Kuma API endpoints
location /api/ {
# Rate limiting
limit_req zone=api burst=10 nodelay;
# Proxy to Uptime Kuma
proxy_pass http://127.0.0.1:3001;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# CORS headers
add_header Access-Control-Allow-Origin "*" always;
add_header Access-Control-Allow-Methods "GET, POST, PUT, DELETE, OPTIONS" always;
add_header Access-Control-Allow-Headers "DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range,Authorization" always;
# Handle preflight requests
if ($request_method = 'OPTIONS') {
add_header Access-Control-Allow-Origin "*";
add_header Access-Control-Allow-Methods "GET, POST, PUT, DELETE, OPTIONS";
add_header Access-Control-Allow-Headers "DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range,Authorization";
add_header Access-Control-Max-Age 1728000;
add_header Content-Type "text/plain; charset=utf-8";
add_header Content-Length 0;
return 204;
}
}
# Include other location configurations
include /etc/nginx/conf.d/*.conf;
}
}