Files
prod/infra/ansible/playbook.yml
Andrey 9e03c1f6f2 chore: optimize resource allocation and memory settings in Docker Compose
- Added memory and CPU limits and reservations for Prometheus, Grafana, and Uptime Kuma services to enhance performance and resource management.
- Updated Prometheus and Grafana configurations with new storage block duration settings for improved memory optimization.
- Revised README to include additional commands for running specific services and restarting containers.
2026-01-23 21:38:48 +03:00

1560 lines
55 KiB
YAML
Raw Permalink Blame History

This file contains ambiguous Unicode characters
This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.
---
- name: Полная миграция ботов на новый сервер
hosts: new_server
become: yes
vars:
# Основная директория проекта
project_root: "/home/prod"
# Пользователь и группа
deploy_user: "deploy"
uid: 1001
gid: 1001
# Старый сервер для копирования данных
old_server: "root@77.223.98.129"
# Опция: пересоздавать папку /home/prod (по умолчанию — нет)
recreate_project: false
# Grafana настройки
grafana_admin_user: "{{ lookup('env', 'GRAFANA_ADMIN_USER') | default('admin') }}"
grafana_admin_password: "{{ lookup('env', 'GRAFANA_ADMIN_PASSWORD') | default('admin') }}"
# Status page настройки
status_page_password: "{{ STATUS_PAGE_PASSWORD | default('admin123') }}"
# Мониторинг настройки
monitoring_username: "{{ lookup('env', 'MONITORING_USERNAME') | default('admin') }}"
monitoring_password: "{{ lookup('env', 'MONITORING_PASSWORD') | default('admin123') }}"
# SSL настройки
use_letsencrypt: "{{ lookup('env', 'USE_LETSENCRYPT') | default('false') | lower == 'true' }}"
tasks:
# ========================================
# ЭТАП 1: ПОДГОТОВКА СИСТЕМЫ (ROOT)
# ========================================
- name: "[1/10] Обновить SSH host key для избежания ошибок при переустановке"
known_hosts:
path: ~/.ssh/known_hosts
name: "{{ ansible_host }}"
key: "{{ lookup('pipe', 'ssh-keyscan -t rsa,ecdsa,ed25519 ' + ansible_host + ' 2>/dev/null') }}"
state: present
delegate_to: localhost
run_once: true
ignore_errors: yes
- name: "[1/10] Обновить кэш пакетов"
apt:
update_cache: yes
- name: "[1/10] Установить необходимые пакеты"
apt:
name:
- docker.io
- docker-compose
- make
- git
- python3-pip
- curl
- sshpass
- rsync
- vim
- zsh
- ufw
- htop
- iotop
- traceroute
- ncdu
- prometheus-node-exporter
- fail2ban
- tzdata
- nginx
- openssl
- apache2-utils
- certbot
- python3-certbot-nginx
- logrotate
- net-tools
- cron
state: present
- name: "[1/10] Установить Python библиотеки для Ansible"
pip:
name:
- passlib
- bcrypt
state: present
- name: "[1/10] Установить часовой пояс Europe/Moscow"
timezone:
name: Europe/Moscow
# ========================================
# ЭТАП 2: НАСТРОЙКА СИСТЕМЫ (ROOT)
# ========================================
- name: "[2/10] Проверить существование swap-файла"
stat:
path: /swapfile
register: swap_file_stat
- name: "[2/10] Создать swap-файл (2GB)"
command: fallocate -l 2G /swapfile
when: not swap_file_stat.stat.exists
- name: "[2/10] Установить правильные права на swap-файл"
file:
path: /swapfile
mode: '0600'
owner: root
group: root
- name: "[2/10] Настроить swap-файл"
command: mkswap /swapfile
when: not swap_file_stat.stat.exists
- name: "[2/10] Включить swap-файл"
command: swapon /swapfile
when: not swap_file_stat.stat.exists
- name: "[2/10] Настроить swappiness = 10 (временно)"
sysctl:
name: vm.swappiness
value: '10'
state: present
reload: yes
- name: "[2/10] Настроить swappiness = 10 (постоянно)"
lineinfile:
path: /etc/sysctl.conf
regexp: '^vm\.swappiness\s*='
line: 'vm.swappiness = 10'
state: present
- name: "[2/10] Добавить swap-файл в /etc/fstab для автоматического монтирования"
lineinfile:
path: /etc/fstab
line: '/swapfile none swap sw 0 0'
state: present
create: yes
- name: "[2/10] Проверить статус swap"
command: swapon --show
register: swap_status
changed_when: false
- name: "[2/10] Показать информацию о swap"
debug:
var: swap_status.stdout_lines
# Настройка параметров безопасности ядра
- name: "[2/10] Настроить параметры безопасности ядра"
sysctl:
name: "{{ item.name }}"
value: "{{ item.value }}"
state: present
reload: yes
loop:
# Защита от DDoS
- { name: "net.ipv4.tcp_syn_retries", value: "2" }
- { name: "net.ipv4.tcp_synack_retries", value: "2" }
- { name: "net.ipv4.tcp_max_syn_backlog", value: "2048" }
- { name: "net.ipv4.tcp_fin_timeout", value: "15" }
- { name: "net.ipv4.tcp_keepalive_time", value: "1200" }
- { name: "net.ipv4.tcp_keepalive_intvl", value: "15" }
- { name: "net.ipv4.tcp_keepalive_probes", value: "5" }
- { name: "net.core.netdev_max_backlog", value: "1000" }
- { name: "net.core.somaxconn", value: "65535" }
# Защита от IP спуфинга
- { name: "net.ipv4.conf.all.accept_source_route", value: "0" }
- { name: "net.ipv4.conf.default.accept_source_route", value: "0" }
- { name: "net.ipv6.conf.all.accept_source_route", value: "0" }
- { name: "net.ipv6.conf.default.accept_source_route", value: "0" }
# Защита от фрагментации
- { name: "net.ipv4.conf.all.log_martians", value: "1" }
- { name: "net.ipv4.conf.default.log_martians", value: "1" }
- { name: "net.ipv4.icmp_echo_ignore_broadcasts", value: "1" }
- { name: "net.ipv4.icmp_ignore_bogus_error_responses", value: "1" }
- { name: "net.ipv4.tcp_syncookies", value: "1" }
- { name: "net.ipv4.conf.all.rp_filter", value: "1" }
- { name: "net.ipv4.conf.default.rp_filter", value: "1" }
# Для Docker
- { name: "kernel.pid_max", value: "65536" }
- { name: "kernel.threads-max", value: "4096" }
- { name: "vm.max_map_count", value: "262144" }
- name: "[2/10] Сохранить параметры безопасности в /etc/sysctl.conf"
lineinfile:
path: /etc/sysctl.conf
regexp: "^{{ item.name }}\\s*="
line: "{{ item.name }} = {{ item.value }}"
state: present
loop:
# Защита от DDoS
- { name: "net.ipv4.tcp_syn_retries", value: "2" }
- { name: "net.ipv4.tcp_synack_retries", value: "2" }
- { name: "net.ipv4.tcp_max_syn_backlog", value: "2048" }
- { name: "net.ipv4.tcp_fin_timeout", value: "15" }
- { name: "net.ipv4.tcp_keepalive_time", value: "1200" }
- { name: "net.ipv4.tcp_keepalive_intvl", value: "15" }
- { name: "net.ipv4.tcp_keepalive_probes", value: "5" }
- { name: "net.core.netdev_max_backlog", value: "1000" }
- { name: "net.core.somaxconn", value: "65535" }
# Защита от IP спуфинга
- { name: "net.ipv4.conf.all.accept_source_route", value: "0" }
- { name: "net.ipv4.conf.default.accept_source_route", value: "0" }
- { name: "net.ipv6.conf.all.accept_source_route", value: "0" }
- { name: "net.ipv6.conf.default.accept_source_route", value: "0" }
# Защита от фрагментации
- { name: "net.ipv4.conf.all.log_martians", value: "1" }
- { name: "net.ipv4.conf.default.log_martians", value: "1" }
- { name: "net.ipv4.icmp_echo_ignore_broadcasts", value: "1" }
- { name: "net.ipv4.icmp_ignore_bogus_error_responses", value: "1" }
- { name: "net.ipv4.tcp_syncookies", value: "1" }
- { name: "net.ipv4.conf.all.rp_filter", value: "1" }
- { name: "net.ipv4.conf.default.rp_filter", value: "1" }
# Для Docker
- { name: "kernel.pid_max", value: "65536" }
- { name: "kernel.threads-max", value: "4096" }
- { name: "vm.max_map_count", value: "262144" }
# ========================================
# ЭТАП 3: СИСТЕМНЫЕ СЕРВИСЫ (ROOT)
# ========================================
- name: "[3/10] Включить и запустить prometheus-node-exporter"
systemd:
name: prometheus-node-exporter
enabled: yes
state: started
- name: "[3/10] Проверить статус prometheus-node-exporter"
command: systemctl status prometheus-node-exporter
register: node_exporter_status
changed_when: false
- name: "[3/10] Показать статус prometheus-node-exporter"
debug:
var: node_exporter_status.stdout_lines
- name: "[3/10] Проверить, что node_exporter слушает на порту 9100"
command: ss -tulpn | grep 9100
register: node_exporter_port
changed_when: false
ignore_errors: yes
- name: "[3/10] Показать информацию о порте 9100"
debug:
var: node_exporter_port.stdout_lines
- name: "[3/10] Обновить Docker Compose до последней версии"
get_url:
url: "https://github.com/docker/compose/releases/latest/download/docker-compose-{{ ansible_system }}-{{ ansible_architecture }}"
dest: /usr/local/bin/docker-compose
mode: '0755'
- name: "[3/10] Включить и запустить Docker"
systemd:
name: docker
enabled: yes
state: started
# ========================================
# ЭТАП 4: ПОЛЬЗОВАТЕЛЬ DEPLOY (ROOT)
# ========================================
- name: "[4/10] Проверить существование пользователя deploy"
getent:
database: passwd
key: "{{ deploy_user }}"
register: user_exists
failed_when: false
- name: "[4/10] Создать группу deploy с GID 1001"
group:
name: "{{ deploy_user }}"
gid: "{{ gid }}"
when: user_exists.ansible_facts.getent_passwd is not defined
- name: "[4/10] Создать пользователя deploy с UID 1001 (если не существует)"
user:
name: "{{ deploy_user }}"
uid: "{{ uid }}"
group: "{{ gid }}"
shell: /bin/zsh
create_home: yes
system: no
groups: docker
append: yes
when: user_exists.ansible_facts.getent_passwd is not defined
- name: "[4/10] Установить zsh как оболочку по умолчанию для существующего пользователя deploy"
user:
name: "{{ deploy_user }}"
shell: /bin/zsh
when: user_exists.ansible_facts.getent_passwd is defined
- name: "[4/10] Скопировать SSH ключ с локальной машины для пользователя deploy"
authorized_key:
user: "{{ deploy_user }}"
key: "{{ lookup('file', '~/.ssh/id_rsa.pub') }}"
state: present
- name: "[4/10] Настроить sudo для deploy (все команды без пароля)"
lineinfile:
path: /etc/sudoers.d/deploy
line: "{{ deploy_user }} ALL=(ALL) NOPASSWD: ALL"
create: yes
mode: '0440'
validate: 'visudo -cf %s'
- name: "[4/10] Удалить /home/prod, если требуется (чистое развертывание)"
file:
path: "{{ project_root }}"
state: absent
when: recreate_project | bool
- name: "[4/10] Создать директорию проекта /home/prod"
file:
path: "{{ project_root }}"
state: directory
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0755'
- name: "[4/10] Скопировать приватный SSH ключ для Git"
copy:
src: "~/.ssh/id_rsa"
dest: "/home/{{ deploy_user }}/.ssh/id_rsa"
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0600'
remote_src: no
- name: "[4/10] Настроить SSH config для GitHub"
lineinfile:
path: "/home/{{ deploy_user }}/.ssh/config"
line: "Host github.com\n StrictHostKeyChecking no\n UserKnownHostsFile /dev/null"
create: yes
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0600'
# ========================================
# КОПИРОВАНИЕ КОНФИГУРАЦИИ ZSH (ROOT)
# ========================================
- name: "[4/10] Создать директорию .zsh для пользователя deploy"
file:
path: "/home/{{ deploy_user }}/.zsh"
state: directory
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0755'
- name: "[4/10] Создать базовый .zshrc"
copy:
content: |
# Zsh configuration
# This file was created automatically by Ansible
# Enable completion system
autoload -Uz compinit
compinit
# Enable colors
autoload -Uz colors
colors
# History configuration
HISTFILE=~/.zsh_history
HISTSIZE=10000
SAVEHIST=10000
setopt HIST_IGNORE_DUPS
setopt HIST_IGNORE_ALL_DUPS
setopt HIST_SAVE_NO_DUPS
setopt HIST_FIND_NO_DUPS
setopt SHARE_HISTORY
# Directory navigation
setopt AUTO_CD
setopt AUTO_PUSHD
setopt PUSHD_IGNORE_DUPS
setopt PUSHD_SILENT
# Completion
setopt AUTO_LIST
setopt AUTO_MENU
setopt COMPLETE_IN_WORD
setopt ALWAYS_TO_END
# Prompt
PROMPT='%F{blue}%n@%m%f %F{green}%~%f %# '
# Aliases
alias ll='ls -la'
alias la='ls -A'
alias l='ls -CF'
alias ..='cd ..'
alias ...='cd ../..'
alias grep='grep --color=auto'
alias fgrep='fgrep --color=auto'
alias egrep='egrep --color=auto'
# Docker aliases
alias d='docker'
alias dc='docker-compose'
alias dps='docker ps'
alias dpsa='docker ps -a'
alias di='docker images'
alias dex='docker exec -it'
# Git aliases
alias gs='git status'
alias ga='git add'
alias gc='git commit'
alias gp='git push'
alias gl='git log --oneline'
alias gd='git diff'
# Project specific
alias prod='cd /home/prod'
alias bots='cd /home/prod/bots'
alias logs='cd /home/prod/bots/*/logs'
# Environment
export PATH="$PATH:/usr/local/bin"
export EDITOR=vim
# Load additional configurations if they exist
[ -f ~/.zshrc.local ] && source ~/.zshrc.local
dest: "/home/{{ deploy_user }}/.zshrc"
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0644'
- name: "[4/10] Создать базовый .zshenv"
copy:
content: |
# Zsh environment configuration
# This file is sourced before .zshrc
# Set default editor
export EDITOR=vim
# Add local bin to PATH
export PATH="$HOME/.local/bin:$PATH"
dest: "/home/{{ deploy_user }}/.zshenv"
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0644'
- name: "[4/10] Создать базовый .zprofile"
copy:
content: |
# Zsh profile configuration
# This file is sourced for login shells
# Load .zshrc if it exists
[ -f ~/.zshrc ] && source ~/.zshrc
dest: "/home/{{ deploy_user }}/.zprofile"
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0644'
- name: "[4/10] Создать базовый .zshrc.local"
copy:
content: |
# Local zsh configuration
# This file is for local customizations
# Add your custom aliases and functions here
dest: "/home/{{ deploy_user }}/.zshrc.local"
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0644'
- name: "[4/10] Установить правильные права на zsh файлы"
file:
path: "/home/{{ deploy_user }}/{{ item }}"
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0644'
loop:
- ".zshrc"
- ".zshenv"
- ".zprofile"
- ".zshrc.local"
ignore_errors: yes
- name: "[4/10] Установить правильные права на все zsh файлы"
file:
path: "/home/{{ deploy_user }}/.zsh"
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0755'
recurse: yes
# ========================================
# ЭТАП 5: КЛОНИРОВАНИЕ РЕПОЗИТОРИЕВ (DEPLOY)
# ========================================
- name: "[5/10] Исправить права на директорию проекта перед клонированием"
file:
path: "{{ project_root }}"
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0755'
recurse: yes
become: yes
- name: "[5/10] Клонировать основной репозиторий prod"
git:
repo: git@github.com:KerradKerridi/prod.git
dest: "{{ project_root }}"
update: yes
force: yes
become: yes
become_user: "{{ deploy_user }}"
- name: "[5/10] Клонировать AnonBot"
git:
repo: git@github.com:KerradKerridi/AnonBot.git
dest: "{{ project_root }}/bots/AnonBot"
update: yes
force: yes
become: yes
become_user: "{{ deploy_user }}"
- name: "[5/10] Клонировать telegram-helper-bot"
git:
repo: git@github.com:KerradKerridi/telegram-helper-bot.git
dest: "{{ project_root }}/bots/telegram-helper-bot"
update: yes
force: yes
become: yes
become_user: "{{ deploy_user }}"
- name: "[5/10] Исправить права на все файлы после клонирования"
file:
path: "{{ project_root }}"
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0755'
recurse: yes
become: yes
# ========================================
# ЭТАП 6: КОПИРОВАНИЕ КОНФИГУРАЦИЙ (ROOT)
# ========================================
- name: "[6/10] Скопировать конфигурацию Alertmanager"
copy:
src: "{{ playbook_dir }}/../alertmanager/alertmanager.yml"
dest: "{{ project_root }}/infra/alertmanager/alertmanager.yml"
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0644'
backup: yes
- name: "[6/10] Скопировать правила алертов Prometheus"
copy:
src: "{{ playbook_dir }}/../prometheus/alert_rules.yml"
dest: "{{ project_root }}/infra/prometheus/alert_rules.yml"
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0644'
backup: yes
- name: "[6/10] Скопировать дашборды Grafana"
copy:
src: "{{ playbook_dir }}/../grafana/dashboards/"
dest: "{{ project_root }}/infra/grafana/dashboards/"
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0644'
backup: yes
- name: "[6/10] Скопировать скрипт настройки SSL"
copy:
src: "{{ playbook_dir }}/../../scripts/setup-ssl.sh"
dest: /usr/local/bin/setup-ssl.sh
owner: root
group: root
mode: '0755'
backup: yes
- name: "[6/10] Установить правильные права на дашборд Node Exporter Full"
file:
path: "{{ project_root }}/infra/grafana/provisioning/dashboards/node-exporter-full-dashboard.json"
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0644'
# ========================================
# ЭТАП 7: КОПИРОВАНИЕ ДАННЫХ СО СТАРОГО СЕРВЕРА (ROOT)
# ========================================
- name: "[7/10] Скопировать SSH ключ на старый сервер для копирования файлов"
authorized_key:
user: root
key: "{{ lookup('file', '~/.ssh/id_rsa.pub') }}"
state: present
delegate_to: "{{ old_server }}"
- name: "[7/10] Копировать .env для telegram-helper-bot со старого сервера"
fetch:
src: "/home/prod/bots/telegram-helper-bot/.env"
dest: "/tmp/telegram-helper-bot.env"
flat: yes
delegate_to: "{{ old_server }}"
ignore_errors: yes
- name: "[7/10] Переместить .env для telegram-helper-bot на новое место"
copy:
src: "/tmp/telegram-helper-bot.env"
dest: "{{ project_root }}/bots/telegram-helper-bot/.env"
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0644'
when: ansible_check_mode == false
ignore_errors: yes
- name: "[7/10] Проверить размер БД для telegram-helper-bot"
stat:
path: "/home/prod/bots/telegram-helper-bot/database/tg-bot-database.db"
delegate_to: "{{ old_server }}"
register: db_size
- name: "[7/10] Показать размер БД для telegram-helper-bot"
debug:
msg: "Размер БД: {{ (db_size.stat.size / 1024 / 1024) | round(2) }} MB"
- name: "[7/10] Копировать БД для telegram-helper-bot"
fetch:
src: "/home/prod/bots/telegram-helper-bot/database/tg-bot-database.db"
dest: "/tmp/tg-bot-database.db"
flat: yes
delegate_to: "{{ old_server }}"
ignore_errors: yes
- name: "[7/10] Переместить БД для telegram-helper-bot на новое место"
copy:
src: "/tmp/tg-bot-database.db"
dest: "{{ project_root }}/bots/telegram-helper-bot/database/tg-bot-database.db"
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0644'
when: ansible_check_mode == false
ignore_errors: yes
- name: "[7/10] Создать папку voice_users на новом сервере"
file:
path: "{{ project_root }}/bots/telegram-helper-bot/voice_users"
state: directory
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0755'
- name: "[7/10] Создать временную папку для voice_users на локальной машине"
file:
path: "/tmp/voice_users_migration"
state: directory
mode: '0755'
delegate_to: localhost
become: no
- name: "[7/10] Копировать voice_users со старого сервера на локальную машину"
command: >
rsync -avz --progress --timeout=60
root@77.223.98.129:/home/prod/bots/telegram-helper-bot/voice_users/
/tmp/voice_users_migration/
delegate_to: localhost
become: no
ignore_errors: yes
- name: "[7/10] Копировать voice_users с локальной машины на новый сервер"
synchronize:
src: "/tmp/voice_users_migration/"
dest: "{{ project_root }}/bots/telegram-helper-bot/voice_users/"
mode: push
rsync_opts: ["--progress", "--stats", "--partial", "--verbose"]
- name: "[7/10] Очистить временную папку на локальной машине"
file:
path: "/tmp/voice_users_migration"
state: absent
delegate_to: localhost
become: no
- name: "[7/10] Копировать корневой .env файл"
fetch:
src: "/home/prod/.env"
dest: "/tmp/root.env"
flat: yes
delegate_to: "{{ old_server }}"
ignore_errors: yes
- name: "[7/10] Переместить корневой .env файл на новое место"
copy:
src: "/tmp/root.env"
dest: "{{ project_root }}/.env"
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0644'
when: ansible_check_mode == false
ignore_errors: yes
- name: "[7/10] Копировать .env для AnonBot"
fetch:
src: "/home/prod/bots/AnonBot/.env"
dest: "/tmp/anonbot.env"
flat: yes
delegate_to: "{{ old_server }}"
ignore_errors: yes
- name: "[7/10] Переместить .env для AnonBot на новое место"
copy:
src: "/tmp/anonbot.env"
dest: "{{ project_root }}/bots/AnonBot/.env"
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0644'
when: ansible_check_mode == false
ignore_errors: yes
- name: "[7/10] Проверить размер БД для AnonBot"
stat:
path: "/home/prod/bots/AnonBot/database/anon_qna.db"
delegate_to: "{{ old_server }}"
register: anon_db_size
- name: "[7/10] Показать размер БД для AnonBot"
debug:
msg: "Размер БД AnonBot: {{ (anon_db_size.stat.size / 1024 / 1024) | round(2) }} MB"
- name: "[7/10] Копировать БД для AnonBot"
fetch:
src: "/home/prod/bots/AnonBot/database/anon_qna.db"
dest: "/tmp/anon_qna.db"
flat: yes
delegate_to: "{{ old_server }}"
ignore_errors: yes
- name: "[7/10] Переместить БД для AnonBot на новое место"
copy:
src: "/tmp/anon_qna.db"
dest: "{{ project_root }}/bots/AnonBot/database/anon_qna.db"
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0644'
when: ansible_check_mode == false
ignore_errors: yes
- name: "[7/10] Установить права на скопированные файлы"
file:
path: "{{ item }}"
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0644'
loop:
- "{{ project_root }}/bots/telegram-helper-bot/.env"
- "{{ project_root }}/bots/telegram-helper-bot/database/tg-bot-database.db"
- "{{ project_root }}/bots/AnonBot/.env"
- "{{ project_root }}/bots/AnonBot/database/anon_qna.db"
- name: "[7/10] Исправить права доступа для voice_users (рекурсивно)"
file:
path: "{{ project_root }}/bots/telegram-helper-bot/voice_users"
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0755'
recurse: yes
- name: "[7/10] Очистить временные файлы"
file:
path: "{{ item }}"
state: absent
loop:
- "/tmp/telegram-helper-bot.env"
- "/tmp/tg-bot-database.db"
- "/tmp/root.env"
- "/tmp/anonbot.env"
- "/tmp/anon_qna.db"
ignore_errors: yes
- name: "[7/10] Очистить временную папку voice_users на локальной машине"
file:
path: "/tmp/voice_users_migration"
state: absent
delegate_to: localhost
become: no
ignore_errors: yes
# ========================================
# ЭТАП 8: NGINX КОНФИГУРАЦИЯ (ROOT)
# ========================================
- name: "[8/10] Остановить nginx (если запущен)"
systemd:
name: nginx
state: stopped
ignore_errors: yes
- name: "[8/10] Создать директории для nginx конфигураций"
file:
path: "{{ item }}"
state: directory
owner: root
group: root
mode: '0755'
loop:
- "{{ project_root }}/infra/nginx"
- "{{ project_root }}/infra/nginx/ssl"
- "{{ project_root }}/infra/nginx/conf.d"
- "{{ project_root }}/infra/uptime-kuma"
- "{{ project_root }}/infra/uptime-kuma/backup"
- "{{ project_root }}/infra/alertmanager"
- "{{ project_root }}/infra/grafana/dashboards"
- "{{ project_root }}/infra/logrotate"
- "{{ project_root }}/scripts"
- /etc/nginx/passwords
- name: "[8/10] Скопировать скрипт генерации паролей"
copy:
src: "{{ playbook_dir }}/../../scripts/generate_auth_passwords.sh"
dest: /usr/local/bin/generate_auth_passwords.sh
owner: root
group: root
mode: '0755'
- name: "[8/10] Скопировать .env файл на сервер"
copy:
src: "{{ playbook_dir }}/../../.env"
dest: /tmp/.env
mode: '0600'
ignore_errors: yes
- name: "[8/10] Создать временный vars файл из .env"
shell: |
python3 -c "
import os
env_vars = {}
with open('/tmp/.env', 'r') as f:
for line in f:
line = line.strip()
if line and not line.startswith('#') and '=' in line:
key, value = line.split('=', 1)
env_vars[key] = value
import yaml
print(yaml.dump(env_vars, default_flow_style=False))
" > /tmp/ansible_vars.yml
ignore_errors: yes
- name: "[8/10] Загрузить переменные из временного файла"
include_vars:
file: /tmp/ansible_vars.yml
ignore_errors: yes
- name: "[8/10] Удалить временные файлы"
file:
path: "{{ item }}"
state: absent
loop:
- /tmp/ansible_vars.yml
- /tmp/.env
ignore_errors: yes
- name: "[8/10] Удалить старый файл паролей"
file:
path: /etc/nginx/passwords/monitoring.htpasswd
state: absent
ignore_errors: yes
- name: "[8/10] Сгенерировать htpasswd хеш для мониторинга"
command: htpasswd -nb admin "{{ status_page_password[:72] }}"
register: htpasswd_output
changed_when: false
- name: "[8/10] Создать файл паролей для мониторинга"
copy:
content: "{{ htpasswd_output.stdout }}"
dest: /etc/nginx/passwords/monitoring.htpasswd
owner: root
group: www-data
mode: '0640'
backup: yes
become: yes
- name: "[8/10] Установить права на файл паролей"
file:
path: /etc/nginx/passwords/monitoring.htpasswd
owner: root
group: www-data
mode: '0640'
- name: "[8/10] Сгенерировать самоподписанный SSL сертификат (fallback)"
command: >
openssl req -x509 -newkey rsa:4096 -keyout {{ project_root }}/infra/nginx/ssl/key.pem
-out {{ project_root }}/infra/nginx/ssl/cert.pem -days 365 -nodes
-subj "/CN={{ ansible_host }}/O=Monitoring/C=RU"
args:
creates: "{{ project_root }}/infra/nginx/ssl/cert.pem"
when: not use_letsencrypt | default(false)
- name: "[8/10] Создать директории для Let's Encrypt"
file:
path: "{{ item }}"
state: directory
owner: root
group: root
mode: '0755'
loop:
- /etc/letsencrypt
- /etc/letsencrypt/live
- /etc/letsencrypt/archive
- /etc/letsencrypt/renewal
when: use_letsencrypt | default(false)
- name: "[8/10] Настроить cron для автоматического обновления SSL сертификатов"
cron:
name: "SSL Certificate Renewal"
job: "0 2 * * 1 /usr/local/bin/ssl-renewal.sh"
user: root
when: use_letsencrypt | default(false)
- name: "[8/10] Установить права на SSL сертификаты"
file:
path: "{{ item }}"
owner: root
group: root
mode: '0600'
loop:
- "{{ project_root }}/infra/nginx/ssl/cert.pem"
- "{{ project_root }}/infra/nginx/ssl/key.pem"
- name: "[8/10] Сгенерировать htpasswd хеш для status page"
command: htpasswd -nb admin "{{ status_page_password[:72] }}"
register: status_page_htpasswd_output
changed_when: false
- name: "[8/10] Создать htpasswd файл для status page"
copy:
content: "{{ status_page_htpasswd_output.stdout }}"
dest: "{{ project_root }}/infra/nginx/.htpasswd"
owner: root
group: root
mode: '0644'
- name: "[8/10] Удалить старую конфигурацию nginx"
file:
path: /etc/nginx/nginx.conf
state: absent
- name: "[8/10] Скопировать основную конфигурацию nginx"
copy:
src: "{{ playbook_dir }}/../nginx/nginx.conf"
dest: /etc/nginx/nginx.conf
owner: root
group: root
mode: '0644'
backup: yes
# Удалено: конфигурации nginx для сервисов теперь интегрированы в основной nginx.conf
- name: "[8/10] Создать директорию для SSL сертификатов"
file:
path: /etc/nginx/ssl
state: directory
owner: root
group: root
mode: '0755'
- name: "[8/10] Сгенерировать самоподписанный SSL сертификат"
command: >
openssl req -x509 -nodes -days 365 -newkey rsa:2048
-keyout /etc/nginx/ssl/privkey.pem
-out /etc/nginx/ssl/fullchain.pem
-subj "/C=RU/ST=Moscow/L=Moscow/O=Bot Infrastructure/OU=IT Department/CN={{ ansible_host }}"
args:
creates: /etc/nginx/ssl/fullchain.pem
- name: "[8/10] Установить права на SSL сертификаты"
file:
path: "{{ item }}"
owner: root
group: root
mode: '0600'
loop:
- /etc/nginx/ssl/privkey.pem
- /etc/nginx/ssl/fullchain.pem
- name: "[8/10] Создать htpasswd файл для status page"
copy:
content: "admin:$apr1$rbRCQQfQ$CnUvjW17YHFuEWjmlqxjx."
dest: /etc/nginx/.htpasswd
owner: root
group: root
mode: '0644'
backup: yes
- name: "[8/10] Включить nginx (запустим позже после контейнеров)"
systemd:
name: nginx
enabled: yes
state: stopped
# ========================================
# ЭТАП 9: БЕЗОПАСНОСТЬ И ФАЙРВОЛ (ROOT)
# ========================================
- name: "[9/10] Разрешить SSH (порт 22) перед включением UFW"
ufw:
rule: allow
port: "22"
proto: tcp
- name: "[9/10] Разрешить новый SSH порт (15722) перед включением UFW"
ufw:
rule: allow
port: "15722"
proto: tcp
- name: "[9/10] Настроить политику UFW по умолчанию"
ufw:
policy: deny
direction: incoming
- name: "[9/10] Включить UFW (файрвол)"
ufw:
state: enabled
- name: "[9/10] Открыть порты для сервисов"
ufw:
rule: allow
port: "{{ item }}"
proto: tcp
loop:
- "8080" # Telegram Bot
- "8081" # AnonBot
- "9090" # Prometheus
- "3000" # Grafana
- "9100" # Node Exporter
- "80" # HTTP
- "443" # HTTPS
- name: "[9/10] Настроить безопасный SSH"
lineinfile:
path: /etc/ssh/sshd_config
regexp: "^{{ item.regexp }}"
line: "{{ item.line }}"
backup: yes
loop:
- { regexp: "Port", line: "Port 15722" }
- { regexp: "PermitRootLogin", line: "PermitRootLogin no" }
- { regexp: "PasswordAuthentication", line: "PasswordAuthentication no" }
- { regexp: "PubkeyAuthentication", line: "PubkeyAuthentication yes" }
- { regexp: "AllowUsers", line: "AllowUsers {{ deploy_user }}" }
notify: reload ssh
- name: "[9/10] Перезагрузить SSH сервис для применения настроек"
systemd:
name: ssh
state: reloaded
- name: "[9/10] Создать конфигурацию Fail2ban для SSH"
copy:
content: |
[sshd]
enabled = true
port = 15722
filter = sshd
logpath = /var/log/auth.log
maxretry = 3
bantime = 3600
findtime = 600
dest: /etc/fail2ban/jail.d/sshd.local
owner: root
group: root
mode: '0644'
- name: "[9/10] Создать конфигурацию Fail2ban для Nginx"
copy:
content: |
[nginx-http-auth]
enabled = true
port = http,https
filter = nginx-http-auth
logpath = /var/log/nginx/error.log
maxretry = 3
bantime = 3600
findtime = 600
[nginx-limit-req]
enabled = true
port = http,https
filter = nginx-limit-req
logpath = /var/log/nginx/error.log
maxretry = 3
bantime = 3600
findtime = 600
dest: /etc/fail2ban/jail.d/nginx.local
owner: root
group: root
mode: '0644'
- name: "[9/10] Создать конфигурацию Fail2ban для Docker"
copy:
content: |
[docker]
enabled = true
port = 2375,2376
filter = docker
logpath = /var/log/syslog
maxretry = 3
bantime = 3600
findtime = 600
dest: /etc/fail2ban/jail.d/docker.local
owner: root
group: root
mode: '0644'
- name: "[9/10] Включить и запустить Fail2ban"
systemd:
name: fail2ban
enabled: yes
state: started
- name: "[9/10] Проверить статус Fail2ban"
command: fail2ban-client status
register: fail2ban_status
changed_when: false
ignore_errors: yes
- name: "[9/10] Показать статус Fail2ban"
debug:
var: fail2ban_status.stdout_lines
# ========================================
# ЭТАП 9.5: НАСТРОЙКА LOGROTATE (ROOT)
# ========================================
- name: "[9.5/10] Создать директорию для logrotate конфигураций"
file:
path: /etc/logrotate.d
state: directory
owner: root
group: root
mode: '0755'
- name: "[9.5/10] Настроить logrotate для ботов"
copy:
content: |
# Logrotate configuration for bot applications
# This file manages log rotation for all bot services
/home/prod/bots/*/logs/*.log {
daily
missingok
rotate 30
compress
delaycompress
notifempty
create 0644 deploy deploy
postrotate
# Restart bot services if they are running
if [ -f /home/deploy/.docker-compose-pid ]; then
cd /home/prod && docker-compose restart
fi
endscript
}
/home/prod/bots/*/bot_stderr.log {
daily
missingok
rotate 30
compress
delaycompress
notifempty
create 0644 deploy deploy
postrotate
# Restart bot services if they are running
if [ -f /home/deploy/.docker-compose-pid ]; then
cd /home/prod && docker-compose restart
fi
endscript
}
# Docker container logs
/var/lib/docker/containers/*/*.log {
daily
missingok
rotate 7
compress
delaycompress
notifempty
create 0644 root root
postrotate
# Reload Docker daemon
systemctl reload docker
endscript
}
dest: /etc/logrotate.d/bots
owner: root
group: root
mode: '0644'
backup: yes
- name: "[9.5/10] Удалить дублирующуюся конфигурацию logrotate"
file:
path: /etc/logrotate.d/system
state: absent
ignore_errors: yes
- name: "[9.5/10] Создать директории для логов ботов"
file:
path: "{{ item }}"
state: directory
owner: "{{ deploy_user }}"
group: "{{ deploy_user }}"
mode: '0755'
loop:
- "{{ project_root }}/bots/AnonBot/logs"
- "{{ project_root }}/bots/telegram-helper-bot/logs"
- name: "[9.5/10] Проверить конфигурацию logrotate"
command: logrotate -d /etc/logrotate.conf
register: logrotate_test
changed_when: false
- name: "[9.5/10] Показать результат проверки logrotate"
debug:
var: logrotate_test.stdout_lines
- name: "[9.5/10] Включить и запустить logrotate"
systemd:
name: logrotate
enabled: yes
state: started
- name: "[9.5/10] Установить cron (если не установлен)"
apt:
name: cron
state: present
when: ansible_pkg_mgr == "apt"
- name: "[9.5/10] Настроить cron для ежедневного запуска logrotate"
cron:
name: "Logrotate daily"
job: "0 2 * * * /usr/sbin/logrotate /etc/logrotate.conf"
user: root
state: present
# ========================================
# ЭТАП 10: ЗАПУСК ПРИЛОЖЕНИЙ И ПРОВЕРКИ (DEPLOY + ROOT)
# ========================================
- name: "[10/10] Запустить ботов через make up"
command: make up
args:
chdir: "{{ project_root }}"
become: yes
become_user: "{{ deploy_user }}"
- name: "[10/10] Пауза на 45 секунд — дать контейнерам запуститься"
pause:
seconds: 45
- name: "[10/10] Проверить конфигурацию nginx (после запуска контейнеров)"
command: nginx -t
register: nginx_config_test
changed_when: false
- name: "[10/10] Показать результат проверки nginx"
debug:
var: nginx_config_test.stdout_lines
- name: "[10/10] Запустить nginx (после запуска контейнеров)"
systemd:
name: nginx
state: started
- name: "[10/10] Проверить статус nginx"
command: systemctl status nginx
register: nginx_status
changed_when: false
- name: "[10/10] Показать статус nginx"
debug:
var: nginx_status.stdout_lines
- name: "[10/10] Проверить, что порт 8080 (Telegram Bot) открыт"
wait_for:
port: 8080
host: "{{ ansible_host }}"
timeout: 30
state: started
- name: "[10/10] Проверить, что порт 8081 (AnonBot) открыт"
wait_for:
port: 8081
host: "{{ ansible_host }}"
timeout: 30
state: started
- name: "[10/10] Проверить, что порт 9090 (Prometheus) открыт"
wait_for:
port: 9090
host: "{{ ansible_host }}"
timeout: 30
state: started
- name: "[10/10] Проверить, что порт 3000 (Grafana) открыт"
wait_for:
port: 3000
host: "{{ ansible_host }}"
timeout: 10
state: started
- name: "[10/10] Проверить, что порт 9100 (Node Exporter) открыт"
wait_for:
port: 9100
host: "{{ ansible_host }}"
timeout: 10
state: started
- name: "[10/10] Проверить доступность Node Exporter метрик"
uri:
url: "http://{{ ansible_host }}:9100/metrics"
method: GET
status_code: 200
validate_certs: no
register: node_exporter_metrics
retries: 3
delay: 5
ignore_errors: yes
- name: "[10/10] Проверить, что порт 80 (Nginx HTTP) открыт"
wait_for:
port: 80
host: "{{ ansible_host }}"
timeout: 10
state: started
- name: "[10/10] Проверить, что порт 443 (Nginx HTTPS) открыт"
wait_for:
port: 443
host: "{{ ansible_host }}"
timeout: 10
state: started
- name: "[10/10] Проверить, что порт 3001 (Uptime Kuma) открыт"
wait_for:
port: 3001
host: "{{ ansible_host }}"
timeout: 10
state: started
# - name: "[10/10] Проверить, что порт 9093 (Alertmanager) открыт"
# wait_for:
# port: 9093
# host: "{{ ansible_host }}"
# timeout: 30
# state: started
# # Пропускаем проверку Alertmanager, так как есть проблемы с конфигурацией
- name: "[10/10] Проверить доступность Nginx"
uri:
url: "http://{{ ansible_host }}/nginx-health"
method: GET
status_code: 200
validate_certs: no
register: nginx_health
retries: 5
delay: 5
ignore_errors: yes
- name: "[10/10] Проверить доступность Grafana через Nginx"
uri:
url: "https://{{ ansible_host }}/grafana/api/health"
method: GET
status_code: 200
validate_certs: no
register: grafana_nginx_health
retries: 5
delay: 5
ignore_errors: yes
- name: "[10/10] Проверить доступность Prometheus через Nginx (health check без авторизации)"
uri:
url: "https://{{ ansible_host }}/prometheus/-/healthy"
method: GET
status_code: 200
validate_certs: no
register: prometheus_nginx_health
retries: 5
delay: 5
ignore_errors: yes
- name: "[10/10] Проверить доступность Grafana API"
uri:
url: "http://{{ ansible_host }}:3000/api/health"
method: GET
status_code: 200
validate_certs: no
register: grafana_health
retries: 5
delay: 5
ignore_errors: yes
- name: "[10/10] Скопировать файлы конфигурации Uptime Kuma"
copy:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
mode: '0644'
loop:
- { src: "{{ playbook_dir }}/../uptime-kuma/monitors.json", dest: "/tmp/uptime-kuma-monitors.json" }
- { src: "{{ playbook_dir }}/../uptime-kuma/settings.json", dest: "/tmp/uptime-kuma-settings.json" }
ignore_errors: yes
- name: "[10/10] Создать скрипт импорта мониторов Uptime Kuma"
copy:
content: |
#!/bin/bash
set -e
echo "Очистка старых мониторов..."
docker exec bots_uptime_kuma sqlite3 /app/data/kuma.db "DELETE FROM monitor_tag;"
docker exec bots_uptime_kuma sqlite3 /app/data/kuma.db "DELETE FROM monitor;"
docker exec bots_uptime_kuma sqlite3 /app/data/kuma.db "DELETE FROM tag;"
echo "Импорт мониторов..."
docker exec -i bots_uptime_kuma sqlite3 /app/data/kuma.db << 'EOF'
INSERT INTO monitor (name, active, user_id, interval, url, type, weight, created_date, keyword, maxretries, ignore_tls, upside_down, maxredirects, accepted_statuscodes_json, method, timeout, description) VALUES
('Telegram Bot Health', 1, 1, 60, 'http://telegram-bot:8080/health', 'http', 2000, datetime('now'), NULL, 3, 0, 0, 10, '["200-299"]', 'GET', 10, 'Мониторинг состояния Telegram Helper Bot'),
('AnonBot Health', 1, 1, 60, 'http://anon-bot:8081/health', 'http', 2000, datetime('now'), NULL, 3, 0, 0, 10, '["200-299"]', 'GET', 10, 'Мониторинг состояния AnonBot'),
('Prometheus Health', 1, 1, 60, 'http://{{ ansible_host }}:9090/prometheus/-/healthy', 'http', 2000, datetime('now'), NULL, 3, 0, 0, 10, '["200-299"]', 'GET', 10, 'Мониторинг состояния Prometheus'),
('Grafana Health', 1, 1, 60, 'http://grafana:3000/api/health', 'http', 2000, datetime('now'), NULL, 3, 0, 0, 10, '["200-299"]', 'GET', 10, 'Мониторинг состояния Grafana'),
('AlertManager Health', 1, 1, 60, 'http://alertmanager:9093/-/healthy', 'http', 2000, datetime('now'), NULL, 3, 0, 0, 10, '["200-299"]', 'GET', 10, 'Мониторинг состояния AlertManager'),
('Nginx Health', 1, 1, 60, 'http://{{ ansible_host }}:80/nginx-health', 'http', 2000, datetime('now'), 'healthy', 3, 0, 0, 10, '["200-299"]', 'GET', 10, 'Мониторинг состояния Nginx'),
('External Bot Status', 1, 1, 300, 'https://{{ ansible_host }}/status/', 'http', 2000, datetime('now'), NULL, 2, 0, 0, 10, '["200-299"]', 'GET', 15, 'Мониторинг внешней доступности статусной страницы');
EOF
echo "Импорт тегов..."
docker exec -i bots_uptime_kuma sqlite3 /app/data/kuma.db << 'EOF'
INSERT INTO tag (name, color) VALUES
('bot', '#3498db'),
('infrastructure', '#f39c12');
EOF
echo "Связывание тегов с мониторами..."
docker exec -i bots_uptime_kuma sqlite3 /app/data/kuma.db << 'EOF'
-- Связываем AnonBot и Telegram Bot с тегом 'bot'
INSERT INTO monitor_tag (monitor_id, tag_id, value)
SELECT m.id, t.id, ''
FROM monitor m, tag t
WHERE m.name IN ('AnonBot Health', 'Telegram Bot Health') AND t.name = 'bot';
-- Связываем Prometheus, Grafana, AlertManager, Nginx, External Bot с тегом 'infrastructure'
INSERT INTO monitor_tag (monitor_id, tag_id, value)
SELECT m.id, t.id, ''
FROM monitor m, tag t
WHERE m.name IN ('Prometheus Health', 'Grafana Health', 'AlertManager Health', 'Nginx Health', 'External Bot Status') AND t.name = 'infrastructure';
EOF
echo "Импорт настроек..."
docker exec -i bots_uptime_kuma sqlite3 /app/data/kuma.db << 'EOF'
INSERT OR REPLACE INTO setting (key, value) VALUES
('language', 'ru'),
('theme', 'light'),
('timezone', 'Europe/Moscow'),
('dateLocale', 'ru'),
('dateFormat', 'YYYY-MM-DD HH:mm:ss'),
('timeFormat', '24'),
('weekStart', '1'),
('searchEngineIndex', 'true'),
('primaryBaseURL', 'https://{{ ansible_host }}/status/'),
('public', 'true'),
('publicGroupList', 'true'),
('showTags', 'true'),
('showPoweredBy', 'false'),
('keepDataPeriodDays', '365'),
('retentionCheckInterval', '3600'),
('maxmindLicenseKey', ''),
('dnsCache', 'true'),
('dnsCacheTtl', '300'),
('trustProxy', 'true'),
('disableAuth', 'false'),
('defaultTimezone', 'Europe/Moscow'),
('defaultLanguage', 'ru');
EOF
echo "Перезапуск Uptime Kuma для применения изменений..."
docker restart bots_uptime_kuma
echo "Ожидание запуска контейнера..."
sleep 15
echo "Импорт завершен!"
dest: /tmp/import_uptime_kuma.sh
mode: '0755'
ignore_errors: yes
- name: "[10/10] Выполнить импорт мониторов и настроек Uptime Kuma"
shell: /tmp/import_uptime_kuma.sh
register: uptime_kuma_import_result
ignore_errors: yes
- name: "[10/10] Показать результат импорта Uptime Kuma"
debug:
msg: "{{ uptime_kuma_import_result.stdout_lines }}"
when: uptime_kuma_import_result.stdout_lines is defined
- name: "[10/10] Проверить доступность Uptime Kuma через Nginx"
uri:
url: "https://{{ ansible_host }}/status"
method: GET
status_code: 200
validate_certs: no
register: uptime_kuma_nginx_health
retries: 5
delay: 5
ignore_errors: yes
- name: "[10/10] Проверить доступность Alertmanager через Nginx (с авторизацией)"
uri:
url: "https://{{ ansible_host }}/alerts/"
method: GET
status_code: 200
validate_certs: no
user: "admin"
password: "admin123"
register: alertmanager_nginx_health
retries: 2
delay: 3
ignore_errors: yes
- name: "[10/10] Переподключиться по новому SSH порту"
meta: reset_connection
- name: "[10/10] Закрыть старый SSH порт 22 в UFW (финальный шаг)"
ufw:
rule: deny
port: "22"
proto: tcp
ignore_errors: yes
- name: "[10/10] Проверка запуска ботов завершена — всё работает 🟢"
debug:
msg: "Все сервисы запущены и слушают нужные порты. SSH настроен на порт 15722, Fail2ban активен, параметры безопасности ядра применены. Порт 22 закрыт для безопасности. Добавлены: Uptime Kuma (статусная страница), Alertmanager (мониторинг), Let's Encrypt SSL, Grafana дашборды."
# handlers для перезагрузки сервисов
handlers:
- name: reload ssh
systemd:
name: ssh
state: reloaded
- name: restart ufw
ufw:
state: reloaded