Files
prod/infra/ansible/playbook.yml
Andrey 8595fc5886 refactor: streamline Ansible playbook and logrotate configurations
- Removed environment variable lookups for logrotate settings in logrotate configuration files, replacing them with hardcoded values.
- Updated the Ansible playbook to simplify project root, deploy user, and old server configurations by removing environment variable dependencies.
- Added tasks to copy Zsh configuration files from an old server to the new server, ensuring proper permissions and cleanup of temporary files.
- Enhanced logrotate configurations for bots and system logs to ensure consistent management of log files.
2025-09-19 13:00:19 +03:00

1377 lines
47 KiB
YAML
Raw Blame History

This file contains ambiguous Unicode characters
This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.
---
- name: Полная миграция ботов на новый сервер
hosts: new_server
become: yes
vars:
# Основная директория проекта
project_root: "/home/prod"
# Пользователь и группа
deploy_user: "deploy"
uid: 1001
gid: 1001
# Старый сервер для копирования данных
old_server: "root@77.223.98.129"
# Опция: пересоздавать папку /home/prod (по умолчанию — нет)
recreate_project: false
# Grafana настройки
grafana_admin_user: "{{ lookup('env', 'GRAFANA_ADMIN_USER') | default('admin') }}"
grafana_admin_password: "{{ lookup('env', 'GRAFANA_ADMIN_PASSWORD') | default('admin') }}"
# Мониторинг настройки
monitoring_username: "{{ lookup('env', 'MONITORING_USERNAME') | default('admin') }}"
monitoring_password: "{{ lookup('env', 'MONITORING_PASSWORD') | default('admin123') }}"
# SSL настройки
use_letsencrypt: "{{ lookup('env', 'USE_LETSENCRYPT') | default('false') | lower == 'true' }}"
tasks:
# ========================================
# ЭТАП 1: ПОДГОТОВКА СИСТЕМЫ (ROOT)
# ========================================
- name: "[1/10] Обновить SSH host key для избежания ошибок при переустановке"
known_hosts:
path: ~/.ssh/known_hosts
name: "{{ ansible_host }}"
key: "{{ lookup('pipe', 'ssh-keyscan -t rsa,ecdsa,ed25519 ' + ansible_host) }}"
state: present
delegate_to: localhost
run_once: true
ignore_errors: yes
- name: "[1/10] Обновить кэш пакетов"
apt:
update_cache: yes
- name: "[1/10] Установить необходимые пакеты"
apt:
name:
- docker.io
- docker-compose
- make
- git
- python3-pip
- curl
- sshpass
- rsync
- vim
- zsh
- ufw
- htop
- iotop
- traceroute
- ncdu
- prometheus-node-exporter
- fail2ban
- tzdata
- nginx
- openssl
- apache2-utils
- certbot
- python3-certbot-nginx
- logrotate
state: present
- name: "[1/10] Установить Python библиотеки для Ansible"
pip:
name:
- passlib
- bcrypt
state: present
- name: "[1/10] Установить часовой пояс Europe/Moscow"
timezone:
name: Europe/Moscow
# ========================================
# ЭТАП 2: НАСТРОЙКА СИСТЕМЫ (ROOT)
# ========================================
- name: "[2/10] Проверить существование swap-файла"
stat:
path: /swapfile
register: swap_file_stat
- name: "[2/10] Создать swap-файл (2GB)"
command: fallocate -l 2G /swapfile
when: not swap_file_stat.stat.exists
- name: "[2/10] Установить правильные права на swap-файл"
file:
path: /swapfile
mode: '0600'
owner: root
group: root
- name: "[2/10] Настроить swap-файл"
command: mkswap /swapfile
when: not swap_file_stat.stat.exists
- name: "[2/10] Включить swap-файл"
command: swapon /swapfile
when: not swap_file_stat.stat.exists
- name: "[2/10] Настроить swappiness = 10 (временно)"
sysctl:
name: vm.swappiness
value: '10'
state: present
reload: yes
- name: "[2/10] Настроить swappiness = 10 (постоянно)"
lineinfile:
path: /etc/sysctl.conf
regexp: '^vm\.swappiness\s*='
line: 'vm.swappiness = 10'
state: present
- name: "[2/10] Добавить swap-файл в /etc/fstab для автоматического монтирования"
lineinfile:
path: /etc/fstab
line: '/swapfile none swap sw 0 0'
state: present
create: yes
- name: "[2/10] Проверить статус swap"
command: swapon --show
register: swap_status
changed_when: false
- name: "[2/10] Показать информацию о swap"
debug:
var: swap_status.stdout_lines
# Настройка параметров безопасности ядра
- name: "[2/10] Настроить параметры безопасности ядра"
sysctl:
name: "{{ item.name }}"
value: "{{ item.value }}"
state: present
reload: yes
loop:
# Защита от DDoS
- { name: "net.ipv4.tcp_syn_retries", value: "2" }
- { name: "net.ipv4.tcp_synack_retries", value: "2" }
- { name: "net.ipv4.tcp_max_syn_backlog", value: "2048" }
- { name: "net.ipv4.tcp_fin_timeout", value: "15" }
- { name: "net.ipv4.tcp_keepalive_time", value: "1200" }
- { name: "net.ipv4.tcp_keepalive_intvl", value: "15" }
- { name: "net.ipv4.tcp_keepalive_probes", value: "5" }
- { name: "net.core.netdev_max_backlog", value: "1000" }
- { name: "net.core.somaxconn", value: "65535" }
# Защита от IP спуфинга
- { name: "net.ipv4.conf.all.accept_source_route", value: "0" }
- { name: "net.ipv4.conf.default.accept_source_route", value: "0" }
- { name: "net.ipv6.conf.all.accept_source_route", value: "0" }
- { name: "net.ipv6.conf.default.accept_source_route", value: "0" }
# Защита от фрагментации
- { name: "net.ipv4.conf.all.log_martians", value: "1" }
- { name: "net.ipv4.conf.default.log_martians", value: "1" }
- { name: "net.ipv4.icmp_echo_ignore_broadcasts", value: "1" }
- { name: "net.ipv4.icmp_ignore_bogus_error_responses", value: "1" }
- { name: "net.ipv4.tcp_syncookies", value: "1" }
- { name: "net.ipv4.conf.all.rp_filter", value: "1" }
- { name: "net.ipv4.conf.default.rp_filter", value: "1" }
# Для Docker
- { name: "kernel.pid_max", value: "65536" }
- { name: "kernel.threads-max", value: "4096" }
- { name: "vm.max_map_count", value: "262144" }
- name: "[2/10] Сохранить параметры безопасности в /etc/sysctl.conf"
lineinfile:
path: /etc/sysctl.conf
regexp: "^{{ item.name }}\\s*="
line: "{{ item.name }} = {{ item.value }}"
state: present
loop:
# Защита от DDoS
- { name: "net.ipv4.tcp_syn_retries", value: "2" }
- { name: "net.ipv4.tcp_synack_retries", value: "2" }
- { name: "net.ipv4.tcp_max_syn_backlog", value: "2048" }
- { name: "net.ipv4.tcp_fin_timeout", value: "15" }
- { name: "net.ipv4.tcp_keepalive_time", value: "1200" }
- { name: "net.ipv4.tcp_keepalive_intvl", value: "15" }
- { name: "net.ipv4.tcp_keepalive_probes", value: "5" }
- { name: "net.core.netdev_max_backlog", value: "1000" }
- { name: "net.core.somaxconn", value: "65535" }
# Защита от IP спуфинга
- { name: "net.ipv4.conf.all.accept_source_route", value: "0" }
- { name: "net.ipv4.conf.default.accept_source_route", value: "0" }
- { name: "net.ipv6.conf.all.accept_source_route", value: "0" }
- { name: "net.ipv6.conf.default.accept_source_route", value: "0" }
# Защита от фрагментации
- { name: "net.ipv4.conf.all.log_martians", value: "1" }
- { name: "net.ipv4.conf.default.log_martians", value: "1" }
- { name: "net.ipv4.icmp_echo_ignore_broadcasts", value: "1" }
- { name: "net.ipv4.icmp_ignore_bogus_error_responses", value: "1" }
- { name: "net.ipv4.tcp_syncookies", value: "1" }
- { name: "net.ipv4.conf.all.rp_filter", value: "1" }
- { name: "net.ipv4.conf.default.rp_filter", value: "1" }
# Для Docker
- { name: "kernel.pid_max", value: "65536" }
- { name: "kernel.threads-max", value: "4096" }
- { name: "vm.max_map_count", value: "262144" }
# ========================================
# ЭТАП 3: СИСТЕМНЫЕ СЕРВИСЫ (ROOT)
# ========================================
- name: "[3/10] Включить и запустить prometheus-node-exporter"
systemd:
name: prometheus-node-exporter
enabled: yes
state: started
- name: "[3/10] Проверить статус prometheus-node-exporter"
command: systemctl status prometheus-node-exporter
register: node_exporter_status
changed_when: false
- name: "[3/10] Показать статус prometheus-node-exporter"
debug:
var: node_exporter_status.stdout_lines
- name: "[3/10] Проверить, что node_exporter слушает на порту 9100"
command: netstat -tulpn | grep 9100
register: node_exporter_port
changed_when: false
- name: "[3/10] Показать информацию о порте 9100"
debug:
var: node_exporter_port.stdout_lines
- name: "[3/10] Обновить Docker Compose до последней версии"
get_url:
url: "https://github.com/docker/compose/releases/latest/download/docker-compose-{{ ansible_system }}-{{ ansible_architecture }}"
dest: /usr/local/bin/docker-compose
mode: '0755'
- name: "[3/10] Включить и запустить Docker"
systemd:
name: docker
enabled: yes
state: started
# ========================================
# ЭТАП 4: ПОЛЬЗОВАТЕЛЬ DEPLOY (ROOT)
# ========================================
- name: "[4/10] Проверить существование пользователя deploy"
getent:
database: passwd
key: "{{ deploy_user }}"
register: user_exists
failed_when: false
- name: "[4/10] Создать группу deploy с GID 1001"
group:
name: "{{ deploy_user }}"
gid: "{{ gid }}"
when: user_exists.ansible_facts.getent_passwd is not defined
- name: "[4/10] Создать пользователя deploy с UID 1001 (если не существует)"
user:
name: "{{ deploy_user }}"
uid: "{{ uid }}"
group: "{{ gid }}"
shell: /bin/zsh
create_home: yes
system: no
groups: docker
append: yes
when: user_exists.ansible_facts.getent_passwd is not defined
- name: "[4/10] Установить zsh как оболочку по умолчанию для существующего пользователя deploy"
user:
name: "{{ deploy_user }}"
shell: /bin/zsh
when: user_exists.ansible_facts.getent_passwd is defined
- name: "[4/10] Скопировать SSH ключ с локальной машины для пользователя deploy"
authorized_key:
user: "{{ deploy_user }}"
key: "{{ lookup('file', '~/.ssh/id_rsa.pub') }}"
state: present
- name: "[4/10] Настроить sudo для deploy (все команды без пароля)"
lineinfile:
path: /etc/sudoers.d/deploy
line: "{{ deploy_user }} ALL=(ALL) NOPASSWD: ALL"
create: yes
mode: '0440'
validate: 'visudo -cf %s'
- name: "[4/10] Удалить /home/prod, если требуется (чистое развертывание)"
file:
path: "{{ project_root }}"
state: absent
when: recreate_project | bool
- name: "[4/10] Создать директорию проекта /home/prod"
file:
path: "{{ project_root }}"
state: directory
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0755'
- name: "[4/10] Скопировать приватный SSH ключ для Git"
copy:
src: "~/.ssh/id_rsa"
dest: "/home/{{ deploy_user }}/.ssh/id_rsa"
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0600'
remote_src: no
- name: "[4/10] Настроить SSH config для GitHub"
lineinfile:
path: "/home/{{ deploy_user }}/.ssh/config"
line: "Host github.com\n StrictHostKeyChecking no\n UserKnownHostsFile /dev/null"
create: yes
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0600'
# ========================================
# КОПИРОВАНИЕ КОНФИГУРАЦИИ ZSH (ROOT)
# ========================================
- name: "[4/10] Создать директорию .zsh для пользователя deploy"
file:
path: "/home/{{ deploy_user }}/.zsh"
state: directory
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0755'
- name: "[4/10] Копировать .zshrc со старого сервера"
fetch:
src: "/home/prod/.zshrc"
dest: "/tmp/deploy_zshrc"
flat: yes
delegate_to: "{{ old_server }}"
ignore_errors: yes
- name: "[4/10] Переместить .zshrc на новое место"
copy:
src: "/tmp/deploy_zshrc"
dest: "/home/{{ deploy_user }}/.zshrc"
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0644'
when: ansible_check_mode == false
ignore_errors: yes
- name: "[4/10] Копировать .zshenv со старого сервера"
fetch:
src: "/home/prod/.zshenv"
dest: "/tmp/deploy_zshenv"
flat: yes
delegate_to: "{{ old_server }}"
ignore_errors: yes
- name: "[4/10] Переместить .zshenv на новое место"
copy:
src: "/tmp/deploy_zshenv"
dest: "/home/{{ deploy_user }}/.zshenv"
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0644'
when: ansible_check_mode == false
ignore_errors: yes
- name: "[4/10] Копировать .zprofile со старого сервера"
fetch:
src: "/home/prod/.zprofile"
dest: "/tmp/deploy_zprofile"
flat: yes
delegate_to: "{{ old_server }}"
ignore_errors: yes
- name: "[4/10] Переместить .zprofile на новое место"
copy:
src: "/tmp/deploy_zprofile"
dest: "/home/{{ deploy_user }}/.zprofile"
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0644'
when: ansible_check_mode == false
ignore_errors: yes
- name: "[4/10] Копировать .zlogin со старого сервера"
fetch:
src: "/home/prod/.zlogin"
dest: "/tmp/deploy_zlogin"
flat: yes
delegate_to: "{{ old_server }}"
ignore_errors: yes
- name: "[4/10] Переместить .zlogin на новое место"
copy:
src: "/tmp/deploy_zlogin"
dest: "/home/{{ deploy_user }}/.zlogin"
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0644'
when: ansible_check_mode == false
ignore_errors: yes
- name: "[4/10] Копировать .zlogout со старого сервера"
fetch:
src: "/home/prod/.zlogout"
dest: "/tmp/deploy_zlogout"
flat: yes
delegate_to: "{{ old_server }}"
ignore_errors: yes
- name: "[4/10] Переместить .zlogout на новое место"
copy:
src: "/tmp/deploy_zlogout"
dest: "/home/{{ deploy_user }}/.zlogout"
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0644'
when: ansible_check_mode == false
ignore_errors: yes
- name: "[4/10] Копировать директорию .zsh со старого сервера (если существует)"
command: >
rsync -avz --progress --stats --partial --verbose
root@77.223.98.129:/home/prod/.zsh/
/home/{{ deploy_user }}/.zsh/
ignore_errors: yes
- name: "[4/10] Установить правильные права на все zsh файлы"
file:
path: "/home/{{ deploy_user }}/.zsh"
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0755'
recurse: yes
- name: "[4/10] Создать базовый .zshrc если файлы не были скопированы"
copy:
content: |
# Zsh configuration
# This file was created automatically by Ansible
# Original configuration files were not found on the old server
# Enable completion system
autoload -Uz compinit
compinit
# Enable colors
autoload -Uz colors
colors
# History configuration
HISTFILE=~/.zsh_history
HISTSIZE=10000
SAVEHIST=10000
setopt HIST_IGNORE_DUPS
setopt HIST_IGNORE_ALL_DUPS
setopt HIST_SAVE_NO_DUPS
setopt HIST_FIND_NO_DUPS
setopt SHARE_HISTORY
# Directory navigation
setopt AUTO_CD
setopt AUTO_PUSHD
setopt PUSHD_IGNORE_DUPS
setopt PUSHD_SILENT
# Completion
setopt AUTO_LIST
setopt AUTO_MENU
setopt COMPLETE_IN_WORD
setopt ALWAYS_TO_END
# Prompt
PROMPT='%F{blue}%n@%m%f %F{green}%~%f %# '
# Aliases
alias ll='ls -la'
alias la='ls -A'
alias l='ls -CF'
alias ..='cd ..'
alias ...='cd ../..'
alias grep='grep --color=auto'
alias fgrep='fgrep --color=auto'
alias egrep='egrep --color=auto'
# Docker aliases
alias d='docker'
alias dc='docker-compose'
alias dps='docker ps'
alias dpsa='docker ps -a'
alias di='docker images'
alias dex='docker exec -it'
# Git aliases
alias gs='git status'
alias ga='git add'
alias gc='git commit'
alias gp='git push'
alias gl='git log --oneline'
alias gd='git diff'
# Project specific
alias prod='cd /home/prod'
alias bots='cd /home/prod/bots'
alias logs='cd /home/prod/bots/*/logs'
# Environment
export PATH="$PATH:/usr/local/bin"
export EDITOR=vim
# Load additional configurations if they exist
[ -f ~/.zshrc.local ] && source ~/.zshrc.local
dest: "/home/{{ deploy_user }}/.zshrc"
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0644'
when: ansible_check_mode == false
- name: "[4/10] Очистить временные файлы zsh конфигурации"
file:
path: "{{ item }}"
state: absent
loop:
- "/tmp/deploy_zshrc"
- "/tmp/deploy_zshenv"
- "/tmp/deploy_zprofile"
- "/tmp/deploy_zlogin"
- "/tmp/deploy_zlogout"
ignore_errors: yes
# ========================================
# ЭТАП 5: КЛОНИРОВАНИЕ РЕПОЗИТОРИЕВ (DEPLOY)
# ========================================
- name: "[5/10] Исправить права на директорию проекта перед клонированием"
file:
path: "{{ project_root }}"
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0755'
recurse: yes
become: yes
- name: "[5/10] Клонировать основной репозиторий prod"
git:
repo: git@github.com:KerradKerridi/prod.git
dest: "{{ project_root }}"
update: yes
force: yes
become: yes
become_user: "{{ deploy_user }}"
- name: "[5/10] Клонировать AnonBot"
git:
repo: git@github.com:KerradKerridi/AnonBot.git
dest: "{{ project_root }}/bots/AnonBot"
update: yes
force: yes
become: yes
become_user: "{{ deploy_user }}"
- name: "[5/10] Клонировать telegram-helper-bot"
git:
repo: git@github.com:KerradKerridi/telegram-helper-bot.git
dest: "{{ project_root }}/bots/telegram-helper-bot"
version: dev-9
update: yes
force: yes
become: yes
become_user: "{{ deploy_user }}"
- name: "[5/10] Исправить права на все файлы после клонирования"
file:
path: "{{ project_root }}"
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0755'
recurse: yes
become: yes
# ========================================
# ЭТАП 6: КОПИРОВАНИЕ КОНФИГУРАЦИЙ (ROOT)
# ========================================
- name: "[6/10] Скопировать конфигурацию Alertmanager"
copy:
src: "{{ project_root }}/infra/alertmanager/alertmanager.yml"
dest: "{{ project_root }}/infra/alertmanager/alertmanager.yml"
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0644'
backup: yes
remote_src: yes
- name: "[6/10] Скопировать правила алертов Prometheus"
copy:
src: "{{ project_root }}/infra/prometheus/alert_rules.yml"
dest: "{{ project_root }}/infra/prometheus/alert_rules.yml"
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0644'
backup: yes
remote_src: yes
- name: "[6/10] Скопировать дашборды Grafana"
copy:
src: "{{ project_root }}/infra/grafana/dashboards/"
dest: "{{ project_root }}/infra/grafana/dashboards/"
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0644'
backup: yes
remote_src: yes
- name: "[6/10] Скопировать скрипт настройки SSL"
copy:
src: "{{ project_root }}/scripts/setup-ssl.sh"
dest: /usr/local/bin/setup-ssl.sh
owner: root
group: root
mode: '0755'
backup: yes
remote_src: yes
- name: "[6/10] Установить правильные права на дашборд Node Exporter Full"
file:
path: "{{ project_root }}/infra/grafana/provisioning/dashboards/node-exporter-full-dashboard.json"
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0644'
# ========================================
# ЭТАП 7: КОПИРОВАНИЕ ДАННЫХ СО СТАРОГО СЕРВЕРА (ROOT)
# ========================================
- name: "[7/10] Скопировать SSH ключ на старый сервер для копирования файлов"
authorized_key:
user: root
key: "{{ lookup('file', '~/.ssh/id_rsa.pub') }}"
state: present
delegate_to: "{{ old_server }}"
- name: "[7/10] Копировать .env для telegram-helper-bot со старого сервера"
fetch:
src: "/home/prod/bots/telegram-helper-bot/.env"
dest: "/tmp/telegram-helper-bot.env"
flat: yes
delegate_to: "{{ old_server }}"
- name: "[7/10] Переместить .env для telegram-helper-bot на новое место"
copy:
src: "/tmp/telegram-helper-bot.env"
dest: "{{ project_root }}/bots/telegram-helper-bot/.env"
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0644'
- name: "[7/10] Проверить размер БД для telegram-helper-bot"
stat:
path: "/home/prod/bots/telegram-helper-bot/database/tg-bot-database.db"
delegate_to: "{{ old_server }}"
register: db_size
- name: "[7/10] Показать размер БД для telegram-helper-bot"
debug:
msg: "Размер БД: {{ (db_size.stat.size / 1024 / 1024) | round(2) }} MB"
- name: "[7/10] Копировать БД для telegram-helper-bot"
fetch:
src: "/home/prod/bots/telegram-helper-bot/database/tg-bot-database.db"
dest: "/tmp/tg-bot-database.db"
flat: yes
delegate_to: "{{ old_server }}"
- name: "[7/10] Переместить БД для telegram-helper-bot на новое место"
copy:
src: "/tmp/tg-bot-database.db"
dest: "{{ project_root }}/bots/telegram-helper-bot/database/tg-bot-database.db"
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0644'
- name: "[7/10] Создать папку voice_users на новом сервере"
file:
path: "{{ project_root }}/bots/telegram-helper-bot/voice_users"
state: directory
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0755'
- name: "[7/10] Создать временную папку для voice_users на локальной машине"
file:
path: "/tmp/voice_users_migration"
state: directory
mode: '0755'
delegate_to: localhost
become: no
- name: "[7/10] Копировать voice_users со старого сервера на локальную машину"
command: >
rsync -avz --progress --stats --partial --verbose
root@77.223.98.129:/home/prod/bots/telegram-helper-bot/voice_users/
/tmp/voice_users_migration/
delegate_to: localhost
become: no
- name: "[7/10] Копировать voice_users с локальной машины на новый сервер"
synchronize:
src: "/tmp/voice_users_migration/"
dest: "{{ project_root }}/bots/telegram-helper-bot/voice_users/"
mode: push
rsync_opts: "--progress --stats --partial --verbose"
- name: "[7/10] Очистить временную папку на локальной машине"
file:
path: "/tmp/voice_users_migration"
state: absent
delegate_to: localhost
become: no
- name: "[7/10] Копировать корневой .env файл"
fetch:
src: "/home/prod/.env"
dest: "/tmp/root.env"
flat: yes
delegate_to: "{{ old_server }}"
- name: "[7/10] Переместить корневой .env файл на новое место"
copy:
src: "/tmp/root.env"
dest: "{{ project_root }}/.env"
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0644'
- name: "[7/10] Копировать .env для AnonBot"
fetch:
src: "/home/prod/bots/AnonBot/.env"
dest: "/tmp/anonbot.env"
flat: yes
delegate_to: "{{ old_server }}"
- name: "[7/10] Переместить .env для AnonBot на новое место"
copy:
src: "/tmp/anonbot.env"
dest: "{{ project_root }}/bots/AnonBot/.env"
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0644'
- name: "[7/10] Проверить размер БД для AnonBot"
stat:
path: "/home/prod/bots/AnonBot/database/anon_qna.db"
delegate_to: "{{ old_server }}"
register: anon_db_size
- name: "[7/10] Показать размер БД для AnonBot"
debug:
msg: "Размер БД AnonBot: {{ (anon_db_size.stat.size / 1024 / 1024) | round(2) }} MB"
- name: "[7/10] Копировать БД для AnonBot"
fetch:
src: "/home/prod/bots/AnonBot/database/anon_qna.db"
dest: "/tmp/anon_qna.db"
flat: yes
delegate_to: "{{ old_server }}"
- name: "[7/10] Переместить БД для AnonBot на новое место"
copy:
src: "/tmp/anon_qna.db"
dest: "{{ project_root }}/bots/AnonBot/database/anon_qna.db"
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0644'
- name: "[7/10] Установить права на скопированные файлы"
file:
path: "{{ item }}"
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0644'
loop:
- "{{ project_root }}/bots/telegram-helper-bot/.env"
- "{{ project_root }}/bots/telegram-helper-bot/database/tg-bot-database.db"
- "{{ project_root }}/bots/AnonBot/.env"
- "{{ project_root }}/bots/AnonBot/database/anon_qna.db"
- name: "[7/10] Исправить права доступа для voice_users (рекурсивно)"
file:
path: "{{ project_root }}/bots/telegram-helper-bot/voice_users"
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0755'
recurse: yes
# ========================================
# ЭТАП 8: NGINX КОНФИГУРАЦИЯ (ROOT)
# ========================================
- name: "[8/10] Остановить nginx (если запущен)"
systemd:
name: nginx
state: stopped
ignore_errors: yes
- name: "[8/10] Создать директории для nginx конфигураций"
file:
path: "{{ item }}"
state: directory
owner: root
group: root
mode: '0755'
loop:
- "{{ project_root }}/infra/nginx"
- "{{ project_root }}/infra/nginx/ssl"
- "{{ project_root }}/infra/nginx/conf.d"
- "{{ project_root }}/infra/uptime-kuma"
- "{{ project_root }}/infra/uptime-kuma/backup"
- "{{ project_root }}/infra/alertmanager"
- "{{ project_root }}/infra/grafana/dashboards"
- "{{ project_root }}/infra/logrotate"
- "{{ project_root }}/scripts"
- /etc/nginx/passwords
- name: "[8/10] Скопировать скрипт генерации паролей"
copy:
src: "{{ project_root }}/scripts/generate_auth_passwords.sh"
dest: /usr/local/bin/generate_auth_passwords.sh
owner: root
group: root
mode: '0755'
remote_src: yes
- name: "[8/10] Создать файл паролей для мониторинга"
htpasswd:
path: /etc/nginx/passwords/monitoring.htpasswd
name: "{{ monitoring_username | default('admin') }}"
password: "{{ monitoring_password | default('admin123') }}"
owner: root
group: www-data
mode: '0640'
create: yes
state: present
- name: "[8/10] Сгенерировать самоподписанный SSL сертификат (fallback)"
command: >
openssl req -x509 -newkey rsa:4096 -keyout {{ project_root }}/infra/nginx/ssl/key.pem
-out {{ project_root }}/infra/nginx/ssl/cert.pem -days 365 -nodes
-subj "/CN={{ ansible_host }}/O=Monitoring/C=RU"
args:
creates: "{{ project_root }}/infra/nginx/ssl/cert.pem"
when: not use_letsencrypt | default(false)
- name: "[8/10] Создать директории для Let's Encrypt"
file:
path: "{{ item }}"
state: directory
owner: root
group: root
mode: '0755'
loop:
- /etc/letsencrypt
- /etc/letsencrypt/live
- /etc/letsencrypt/archive
- /etc/letsencrypt/renewal
when: use_letsencrypt | default(false)
- name: "[8/10] Настроить cron для автоматического обновления SSL сертификатов"
cron:
name: "SSL Certificate Renewal"
job: "0 2 * * 1 /usr/local/bin/ssl-renewal.sh"
user: root
when: use_letsencrypt | default(false)
- name: "[8/10] Установить права на SSL сертификаты"
file:
path: "{{ item }}"
owner: root
group: root
mode: '0600'
loop:
- "{{ project_root }}/infra/nginx/ssl/cert.pem"
- "{{ project_root }}/infra/nginx/ssl/key.pem"
- name: "[8/10] Создать htpasswd файл для status page"
htpasswd:
path: "{{ project_root }}/infra/nginx/.htpasswd"
name: "admin"
password: "{{ lookup('env', 'STATUS_PAGE_PASSWORD') | default('admin123') }}"
owner: root
group: root
mode: '0644'
- name: "[8/10] Удалить старую конфигурацию nginx"
file:
path: /etc/nginx/nginx.conf
state: absent
- name: "[8/10] Скопировать основную конфигурацию nginx"
template:
src: "/Users/andrejkatyhin/PycharmProjects/prod/infra/nginx/nginx.conf"
dest: /etc/nginx/nginx.conf
owner: root
group: root
mode: '0644'
backup: yes
vars:
SERVER_IP: "{{ ansible_host }}"
- name: "[8/10] Скопировать конфигурации nginx для сервисов"
copy:
src: "/Users/andrejkatyhin/PycharmProjects/prod/infra/nginx/conf.d/"
dest: /etc/nginx/conf.d/
owner: root
group: root
mode: '0644'
backup: yes
- name: "[8/10] Создать директорию для SSL сертификатов"
file:
path: /etc/nginx/ssl
state: directory
owner: root
group: root
mode: '0755'
- name: "[8/10] Сгенерировать самоподписанный SSL сертификат"
command: >
openssl req -x509 -nodes -days 365 -newkey rsa:2048
-keyout /etc/nginx/ssl/privkey.pem
-out /etc/nginx/ssl/fullchain.pem
-subj "/C=RU/ST=Moscow/L=Moscow/O=Bot Infrastructure/OU=IT Department/CN={{ ansible_host }}"
args:
creates: /etc/nginx/ssl/fullchain.pem
- name: "[8/10] Установить права на SSL сертификаты"
file:
path: "{{ item }}"
owner: root
group: root
mode: '0600'
loop:
- /etc/nginx/ssl/privkey.pem
- /etc/nginx/ssl/fullchain.pem
- name: "[8/10] Скопировать htpasswd файл"
copy:
src: "{{ project_root }}/infra/nginx/.htpasswd"
dest: /etc/nginx/.htpasswd
owner: root
group: root
mode: '0644'
backup: yes
remote_src: yes
- name: "[8/10] Включить nginx (запустим позже после контейнеров)"
systemd:
name: nginx
enabled: yes
state: stopped
# ========================================
# ЭТАП 9: БЕЗОПАСНОСТЬ И ФАЙРВОЛ (ROOT)
# ========================================
- name: "[9/10] Разрешить SSH (порт 22) перед включением UFW"
ufw:
rule: allow
port: "22"
proto: tcp
- name: "[9/10] Разрешить новый SSH порт (15722) перед включением UFW"
ufw:
rule: allow
port: "15722"
proto: tcp
- name: "[9/10] Настроить политику UFW по умолчанию"
ufw:
policy: deny
direction: incoming
- name: "[9/10] Включить UFW (файрвол)"
ufw:
state: enabled
- name: "[9/10] Открыть порты для сервисов"
ufw:
rule: allow
port: "{{ item }}"
proto: tcp
loop:
- "8080" # Telegram Bot
- "8081" # AnonBot
- "9090" # Prometheus
- "3000" # Grafana
- "9100" # Node Exporter
- "80" # HTTP
- "443" # HTTPS
- name: "[9/10] Настроить безопасный SSH"
lineinfile:
path: /etc/ssh/sshd_config
regexp: "^{{ item.regexp }}"
line: "{{ item.line }}"
backup: yes
loop:
- { regexp: "Port", line: "Port 15722" }
- { regexp: "PermitRootLogin", line: "PermitRootLogin no" }
- { regexp: "PasswordAuthentication", line: "PasswordAuthentication no" }
- { regexp: "PubkeyAuthentication", line: "PubkeyAuthentication yes" }
- { regexp: "AllowUsers", line: "AllowUsers {{ deploy_user }}" }
notify: reload ssh
- name: "[9/10] Перезагрузить SSH сервис для применения настроек"
systemd:
name: ssh
state: reloaded
- name: "[9/10] Создать конфигурацию Fail2ban для SSH"
copy:
content: |
[sshd]
enabled = true
port = 15722
filter = sshd
logpath = /var/log/auth.log
maxretry = 3
bantime = 3600
findtime = 600
dest: /etc/fail2ban/jail.d/sshd.local
owner: root
group: root
mode: '0644'
- name: "[9/10] Создать конфигурацию Fail2ban для Nginx"
copy:
content: |
[nginx-http-auth]
enabled = true
port = http,https
filter = nginx-http-auth
logpath = /var/log/nginx/error.log
maxretry = 3
bantime = 3600
findtime = 600
[nginx-limit-req]
enabled = true
port = http,https
filter = nginx-limit-req
logpath = /var/log/nginx/error.log
maxretry = 3
bantime = 3600
findtime = 600
dest: /etc/fail2ban/jail.d/nginx.local
owner: root
group: root
mode: '0644'
- name: "[9/10] Создать конфигурацию Fail2ban для Docker"
copy:
content: |
[docker]
enabled = true
port = 2375,2376
filter = docker
logpath = /var/log/syslog
maxretry = 3
bantime = 3600
findtime = 600
dest: /etc/fail2ban/jail.d/docker.local
owner: root
group: root
mode: '0644'
- name: "[9/10] Включить и запустить Fail2ban"
systemd:
name: fail2ban
enabled: yes
state: started
- name: "[9/10] Проверить статус Fail2ban"
command: fail2ban-client status
register: fail2ban_status
changed_when: false
- name: "[9/10] Показать статус Fail2ban"
debug:
var: fail2ban_status.stdout_lines
# ========================================
# ЭТАП 9.5: НАСТРОЙКА LOGROTATE (ROOT)
# ========================================
- name: "[9.5/10] Создать директорию для logrotate конфигураций"
file:
path: /etc/logrotate.d
state: directory
owner: root
group: root
mode: '0755'
- name: "[9.5/10] Настроить logrotate для ботов"
template:
src: "{{ project_root }}/infra/logrotate/logrotate_bots.conf.j2"
dest: /etc/logrotate.d/bots
owner: root
group: root
mode: '0644'
backup: yes
remote_src: yes
- name: "[9.5/10] Настроить logrotate для системных сервисов"
template:
src: "{{ project_root }}/infra/logrotate/logrotate_system.conf.j2"
dest: /etc/logrotate.d/system
owner: root
group: root
mode: '0644'
backup: yes
remote_src: yes
- name: "[9.5/10] Создать директории для логов ботов"
file:
path: "{{ item }}"
state: directory
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0755'
loop:
- "{{ project_root }}/bots/AnonBot/logs"
- "{{ project_root }}/bots/telegram-helper-bot/logs"
- name: "[9.5/10] Проверить конфигурацию logrotate"
command: logrotate -d /etc/logrotate.conf
register: logrotate_test
changed_when: false
- name: "[9.5/10] Показать результат проверки logrotate"
debug:
var: logrotate_test.stdout_lines
- name: "[9.5/10] Включить и запустить logrotate"
systemd:
name: logrotate
enabled: yes
state: started
- name: "[9.5/10] Настроить cron для ежедневного запуска logrotate"
cron:
name: "Logrotate daily"
job: "0 2 * * * /usr/sbin/logrotate /etc/logrotate.conf"
user: root
state: present
# ========================================
# ЭТАП 10: ЗАПУСК ПРИЛОЖЕНИЙ И ПРОВЕРКИ (DEPLOY + ROOT)
# ========================================
- name: "[10/10] Запустить ботов через make up"
command: make up
args:
chdir: "{{ project_root }}"
become: yes
become_user: "{{ deploy_user }}"
- name: "[10/10] Пауза на 45 секунд — дать контейнерам запуститься"
pause:
seconds: 45
- name: "[10/10] Проверить конфигурацию nginx (после запуска контейнеров)"
command: nginx -t
register: nginx_config_test
changed_when: false
- name: "[10/10] Показать результат проверки nginx"
debug:
var: nginx_config_test.stdout_lines
- name: "[10/10] Запустить nginx (после запуска контейнеров)"
systemd:
name: nginx
state: started
- name: "[10/10] Проверить статус nginx"
command: systemctl status nginx
register: nginx_status
changed_when: false
- name: "[10/10] Показать статус nginx"
debug:
var: nginx_status.stdout_lines
- name: "[10/10] Проверить, что порт 8080 (Telegram Bot) открыт"
wait_for:
port: 8080
host: "{{ ansible_host }}"
timeout: 30
state: started
- name: "[10/10] Проверить, что порт 8081 (AnonBot) открыт"
wait_for:
port: 8081
host: "{{ ansible_host }}"
timeout: 30
state: started
- name: "[10/10] Проверить, что порт 9090 (Prometheus) открыт"
wait_for:
port: 9090
host: "{{ ansible_host }}"
timeout: 30
state: started
- name: "[10/10] Проверить, что порт 3000 (Grafana) открыт"
wait_for:
port: 3000
host: "{{ ansible_host }}"
timeout: 30
state: started
- name: "[10/10] Проверить, что порт 9100 (Node Exporter) открыт"
wait_for:
port: 9100
host: "{{ ansible_host }}"
timeout: 30
state: started
- name: "[10/10] Проверить доступность Node Exporter метрик"
uri:
url: "http://{{ ansible_host }}:9100/metrics"
method: GET
status_code: 200
validate_certs: no
register: node_exporter_metrics
retries: 3
delay: 5
- name: "[10/10] Проверить, что порт 80 (Nginx HTTP) открыт"
wait_for:
port: 80
host: "{{ ansible_host }}"
timeout: 30
state: started
- name: "[10/10] Проверить, что порт 443 (Nginx HTTPS) открыт"
wait_for:
port: 443
host: "{{ ansible_host }}"
timeout: 30
state: started
- name: "[10/10] Проверить, что порт 3001 (Uptime Kuma) открыт"
wait_for:
port: 3001
host: "{{ ansible_host }}"
timeout: 30
state: started
# - name: "[10/10] Проверить, что порт 9093 (Alertmanager) открыт"
# wait_for:
# port: 9093
# host: "{{ ansible_host }}"
# timeout: 30
# state: started
# # Пропускаем проверку Alertmanager, так как есть проблемы с конфигурацией
- name: "[10/10] Проверить доступность Nginx"
uri:
url: "http://{{ ansible_host }}/nginx-health"
method: GET
status_code: 200
validate_certs: no
register: nginx_health
retries: 5
delay: 10
- name: "[10/10] Проверить доступность Grafana через Nginx"
uri:
url: "https://{{ ansible_host }}/grafana/api/health"
method: GET
status_code: 200
validate_certs: no
register: grafana_nginx_health
retries: 5
delay: 10
- name: "[10/10] Проверить доступность Prometheus через Nginx (health check без авторизации)"
uri:
url: "https://{{ ansible_host }}/prometheus/-/healthy"
method: GET
status_code: 200
validate_certs: no
register: prometheus_nginx_health
retries: 5
delay: 10
- name: "[10/10] Проверить доступность Grafana API"
uri:
url: "http://{{ ansible_host }}:3000/api/health"
method: GET
status_code: 200
validate_certs: no
register: grafana_health
retries: 5
delay: 10
- name: "[10/10] Настроить Uptime Kuma мониторы"
copy:
src: "{{ project_root }}/infra/uptime-kuma/monitors.json"
dest: "/tmp/uptime-kuma-monitors.json"
mode: '0644'
when: ansible_connection == 'local'
- name: "[10/10] Проверить доступность Uptime Kuma через Nginx"
uri:
url: "https://{{ ansible_host }}/status"
method: GET
status_code: 200
validate_certs: no
register: uptime_kuma_nginx_health
retries: 5
delay: 10
- name: "[10/10] Проверить доступность Alertmanager через Nginx (с авторизацией)"
uri:
url: "https://{{ ansible_host }}/alerts/"
method: GET
status_code: 200
validate_certs: no
user: "{{ monitoring_username | default('admin') }}"
password: "{{ monitoring_password | default('admin123') }}"
register: alertmanager_nginx_health
retries: 5
delay: 10
- name: "[10/10] Переподключиться по новому SSH порту"
meta: reset_connection
- name: "[10/10] Закрыть старый SSH порт 22 в UFW (финальный шаг)"
ufw:
rule: deny
port: "22"
proto: tcp
- name: "[10/10] Проверка запуска ботов завершена — всё работает 🟢"
debug:
msg: "Все сервисы запущены и слушают нужные порты. SSH настроен на порт 15722, Fail2ban активен, параметры безопасности ядра применены. Порт 22 закрыт для безопасности. Добавлены: Uptime Kuma (статусная страница), Alertmanager (мониторинг), Let's Encrypt SSL, Grafana дашборды."
# handlers для перезагрузки сервисов
handlers:
- name: reload ssh
systemd:
name: ssh
state: reloaded
- name: restart ufw
ufw:
state: reloaded