This commit is contained in:
Hrankin, Aleksandr (contracted)
2026-02-19 11:34:13 +00:00
commit f243f440c3
191 changed files with 6183 additions and 0 deletions

View File

@@ -0,0 +1,43 @@
---
- name: apt update
ansible.builtin.apt:
update_cache: true
- name: apt upgrade
ansible.builtin.apt:
upgrade: dist
- name: install base packages
ansible.builtin.apt:
name:
- ca-certificates
- curl
- gnupg
- lvm2
- podman
state: present
- name: swapoff
ansible.builtin.command: swapoff -a
changed_when: true
- name: comment swap in /etc/fstab
ansible.builtin.replace:
path: /etc/fstab
regexp: '^([^#].*\s+swap\s+.*)$'
replace: '# \1'
- name: install cephadm and ceph-common
ansible.builtin.apt:
name:
- cephadm
- ceph-common
state: present
- name: cephadm version
ansible.builtin.command: cephadm version
changed_when: false
- name: ceph -v
ansible.builtin.command: ceph -v
changed_when: false

View File

@@ -0,0 +1,9 @@
---
- name: cephadm bootstrap
ansible.builtin.command: >
cephadm bootstrap
--mon-ip 192.168.0.102
--initial-dashboard-user admin
--initial-dashboard-password password
--allow-fqdn-hostname
changed_when: true

View File

@@ -0,0 +1,25 @@
---
- name: get cephadm pub key (run once on ceph01)
ansible.builtin.command: ceph cephadm get-pub-key
register: ceph_pubkey_cmd
changed_when: false
delegate_to: dev-kyiv01-vm-ceph-main-01
run_once: true
- name: set ceph pubkey fact for this play
ansible.builtin.set_fact:
ceph_pubkey: "{{ ceph_pubkey_cmd.stdout }}"
run_once: true
- name: add ceph pub key to root authorized_keys
ansible.posix.authorized_key:
user: root
key: "{{ ceph_pubkey }}"
state: present
when: inventory_hostname in ["dev-kyiv01-vm-ceph-main-02", "dev-kyiv01-vm-ceph-main-03"]
- name: restart ssh
ansible.builtin.service:
name: ssh
state: restarted
when: inventory_hostname in ["dev-kyiv01-vm-ceph-main-02", "dev-kyiv01-vm-ceph-main-03"]

View File

@@ -0,0 +1,40 @@
---
- name: add host ceph02
ansible.builtin.command: >
ceph orch host add dev-kyiv01-vm-ceph-main-02 192.168.0.103
changed_when: true
- name: add host ceph03
ansible.builtin.command: >
ceph orch host add dev-kyiv01-vm-ceph-main-03 192.168.0.104
changed_when: true
- name: add osd ceph01 sdb
ansible.builtin.command: >
ceph orch daemon add osd dev-kyiv01-vm-ceph-main-01:/dev/sdb
changed_when: true
- name: add osd ceph01 sdc
ansible.builtin.command: >
ceph orch daemon add osd dev-kyiv01-vm-ceph-main-01:/dev/sdc
changed_when: true
- name: add osd ceph02 sdb
ansible.builtin.command: >
ceph orch daemon add osd dev-kyiv01-vm-ceph-main-02:/dev/sdb
changed_when: true
- name: add osd ceph02 sdc
ansible.builtin.command: >
ceph orch daemon add osd dev-kyiv01-vm-ceph-main-02:/dev/sdc
changed_when: true
- name: add osd ceph03 sdb
ansible.builtin.command: >
ceph orch daemon add osd dev-kyiv01-vm-ceph-main-03:/dev/sdb
changed_when: true
- name: add osd ceph03 sdc
ansible.builtin.command: >
ceph orch daemon add osd dev-kyiv01-vm-ceph-main-03:/dev/sdc
changed_when: true

View File

@@ -0,0 +1,48 @@
# "[1/8] Проверка кластера"
ceph -s
ceph orch status
# "[2/8] Создаём realm/zonegroup/zone (если уже есть — будет ошибка; можно игнорировать или сначала проверить list)"
radosgw-admin realm create --rgw-realm=default --default || true
radosgw-admin zonegroup create --rgw-zonegroup=default --master --default || true
radosgw-admin zone create \
--rgw-zonegroup=default \
--rgw-zone=default \
--master \
--default || true
# "[3/8] Коммит периода (обновляем конфиг мультисайта)"
radosgw-admin period update --commit
# "[4/8] Проверка realm/zone"
radosgw-admin realm list
radosgw-admin zone list
# "[5/8] Деплой RGW сервисом через cephadm/orchestrator"
ceph orch apply rgw default --placement="1"
# "[6/8] Проверка что RGW поднялся"
ceph orch ls
ceph orch ps --service-name rgw.default
ss -lntp | grep -E 'rgw|civetweb|beast|7480|80|443' || true
# "[7/8] Создаём admin (system) пользователя — ТОЛЬКО для ops"
# Важно: system user не для приложений, а для админских операций/автоматизации ops
radosgw-admin user create \
--uid="admin" \
--display-name="RGW Admin (system)" \
--system || true
# "[8/8] Создаём пользователя для Crunchy pgBackRest + бакет"
# Создаём отдельного юзера под pgBackRest
radosgw-admin user create \
--uid="crunchy-backup" \
--display-name="Crunchy pgBackRest" || true
# Создаём бакет и назначаем владельца (uid должен существовать)
radosgw-admin bucket create \
--bucket="crunchy-pgbackrest" \
--uid="crunchy-backup" || true
# "=== Итог: креды для Crunchy ==="
radosgw-admin user info --uid="crunchy-backup"

View File

@@ -0,0 +1,28 @@
```bash
ceph -s
ceph fsid
ceph mon dump | egrep 'mon\.' -n
ceph osd pool ls
# создать pool (pg_num подбирай под размер кластера; для старта можно 64/128)
ceph osd pool create k8s-rbd 128
# включить application "rbd" (важно для CSI)
ceph osd pool application enable k8s-rbd rbd
# (опционально) инициализировать rbd метаданные
rbd pool init k8s-rbd
# (опционально) выставить репликацию size=3 (или как у тебя принято)
ceph osd pool set k8s-rbd size 3
ceph auth get-or-create client.k8s-rbd-csi \
mon 'profile rbd' \
osd "profile rbd pool=k8s-rbd" \
mgr "profile rbd"
# посмотреть ключ
ceph auth get client.k8s-rbd-csi
```

View File

@@ -0,0 +1,85 @@
# ЭТАП 0. Подготовка ОС (на всех Ceph-нодах)
## обновление системы
apt update && apt upgrade -y
## базовые пакеты (без chrony/dns/hosts)
apt install -y \
ca-certificates \
curl \
gnupg \
lvm2 \
podman
## отключаем swap (ОБЯЗАТЕЛЬНО для k8s; для Ceph не строго, но лучше сразу)
swapoff -a
sed -i '/ swap / s/^/#/' /etc/fstab
## проверка дисков (убедись, что OSD диски пустые)
lsblk
# ЭТАП 1. Установка Cephadm (на bootstrap-ноде и затем на всех нодах)
apt install -y cephadm ceph-common
cephadm version
ceph -v
# ЭТАП 2. Bootstrap кластера (только на первой ноде / mon)
cephadm bootstrap \
--mon-ip 192.168.0.102 \
--initial-dashboard-user admin \
--initial-dashboard-password password \
--allow-fqdn-hostname
ceph -s
ceph orch ps
# ЭТАП 3. Добавляем остальные ноды в orchestrator
ceph cephadm get-pub-key
systemctl restart ssh
ceph orch host add dev-kyiv01-vm-ceph-main-02 192.168.0.103
ceph orch host add dev-kyiv01-vm-ceph-main-03 192.168.0.104
ceph orch host ls
# ЭТАП 4. Добавляем OSD (на каждой ноде)
## bootstrap-node (локальная)
ceph orch daemon add osd dev-kyiv01-vm-ceph-main-01:/dev/sdb
ceph orch daemon add osd dev-kyiv01-vm-ceph-main-01:/dev/sdc
## vm-ceph-kyiv-02
ceph orch daemon add osd dev-kyiv01-vm-ceph-main-02:/dev/sdb
ceph orch daemon add osd dev-kyiv01-vm-ceph-main-02:/dev/sdc
## vm-ceph-kyiv-03
ceph orch daemon add osd dev-kyiv01-vm-ceph-main-03:/dev/sdb
ceph orch daemon add osd dev-kyiv01-vm-ceph-main-03:/dev/sdc
## Проверка:
ceph osd tree
ceph -s
# ЭТАП 5. Пул под k8s RBD
ceph osd pool create k8s-rbd 64
ceph osd pool application enable k8s-rbd rbd
ceph osd pool ls
ceph osd pool get k8s-rbd all
## Мини-чеклист
ceph -s
ceph orch host ls
ceph orch ps
ceph osd tree
# Delete broken cluster
cephadm rm-cluster --force --fsid e3b4050a-e8be-11f0-84c2-027a4c119066

View File

@@ -0,0 +1,38 @@
# example dns path in Debian13
App → glibc resolver → /etc/resolv.conf (127.0.0.53) → systemd-resolved → 192.168.0.1 (Proxmox)
# before role running
```bash
sudo systemctl disable --now systemd-resolved
sudo rm -f /etc/resolv.conf
echo -e "nameserver 1.1.1.1\nnameserver 8.8.8.8" | sudo tee /etc/resolv.conf
docker compose down
docker compose up -d
```
```bash
# pdns-auth web/api через nginx
curl -i -H 'Host: auth.infra.hran' http://127.0.0.1/
# recursor web/api через nginx
curl -i -H 'Host: recursor.infra.hran' http://127.0.0.1/
# dnsdist web через nginx
curl -i -H 'Host: dnsdist.infra.hran' http://127.0.0.1/
curl -i -u 'admin:CHANGE_ME_DNSDIST_WEB_PASSWORD' -H 'Host: dnsdist.infra.hran' http://127.0.0.1/
# windows
C:\Windows\System32\drivers\etc\hosts
127.0.0.1 auth.infra.hran
127.0.0.1 recursor.infra.hran
127.0.0.1 dnsdist.infra.hran:8084
# check from browser
http://dnsdist.infra.hran:8080/
http://auth.infra.hran:8080/
http://recursor.infra.hran:8080/
```

View File

@@ -0,0 +1,37 @@
- name: ensure directory structure exists
ansible.builtin.file:
path: "{{ item }}"
state: directory
owner: "root"
group: "root"
mode: "0755"
loop:
- "{{ dns_stack_root }}"
- "{{ dns_stack_root }}/postgres/initdb"
- "{{ dns_stack_root }}/pdns-auth"
- "{{ dns_stack_root }}/pdns-recursor"
- "{{ dns_stack_root }}/dnsdist"
- "{{ dns_stack_root }}/nginx"
- name: render stack files
ansible.builtin.template:
src: "{{ item.src }}"
dest: "{{ dns_stack_root }}/{{ item.dest }}"
owner: "root"
group: "root"
mode: "0644"
loop:
- { src: "docker-compose.yml.j2", dest: "docker-compose.yml" }
- { src: ".env.j2", dest: ".env", mode: "0600" }
- {
src: "postgres/initdb/01-pdns-schema.sql.j2",
dest: "postgres/initdb/01-pdns-schema.sql",
}
- { src: "pdns-auth/pdns.conf.j2", dest: "pdns-auth/pdns.conf" }
- {
src: "pdns-recursor/recursor.conf.j2",
dest: "pdns-recursor/recursor.conf",
}
- { src: "dnsdist/dnsdist.conf.j2", dest: "dnsdist/dnsdist.conf" }
- { src: "nginx/nginx.conf.j2", dest: "nginx/nginx.conf" }
register: rendered

View File

@@ -0,0 +1,41 @@
addLocal("0.0.0.0:53")
addLocal("[::]:53")
-- ACL для клиентов, которым вообще можно отвечать
addACL("127.0.0.0/8") -- localhost на IPv4 (машина сама себе).
addACL("10.0.0.0/8") -- приватные сети RFC1918 (часто VPN/корп сеть).
addACL("172.16.0.0/12") -- приватные 172.16172.31 (сюда попадает и 172.30.x, docker-сеть).
addACL("192.168.0.0/16") -- типичная домашняя LAN.
addACL("::1/128") -- localhost на IPv6.
addACL("fc00::/7") -- IPv6 ULA (аналог приватных)
addACL("fe80::/10") --IPv6 link-local (адреса “на линке”, часто у интерфейса).
newServer({
address="172.30.0.11:5300",
pool="auth",
name="pdns-auth"
})
newServer({
address="172.30.0.12:5301",
pool="recursor",
name="pdns-recursor"
})
-- Авторитативные зоны -> в pool auth, остальное -> recursor
local authZones = newSuffixMatchNode()
authZones:add("infra.hran.")
pc = newPacketCache(100000, {maxTTL=86400, minTTL=0, temporaryFailureTTL=60})
getPool("recursor"):setCache(pc)
getPool("auth"):setCache(pc)
addAction(SuffixMatchNodeRule(authZones), PoolAction("auth"))
addAction(AllRule(), PoolAction("recursor"))
webserver("0.0.0.0:8084")
setWebserverConfig({
password="CHANGE_ME_DNSDIST_WEB_PASSWORD",
apiKey="CHANGE_ME_DNSDIST_KEY",
acl="127.0.0.0/8, 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16, ::1/128, fc00::/7, fe80::/10"
})

View File

@@ -0,0 +1,142 @@
services:
postgres:
image: postgres:16
container_name: dnsstack-postgres
restart: unless-stopped
environment:
TZ: Europe/Kyiv
POSTGRES_DB: pdns
POSTGRES_USER: pdns
POSTGRES_PASSWORD: CHANGE_ME_POSTGRES_PASSWORD
volumes:
- /opt/dns-stack/postgres/data:/var/lib/postgresql/data
- ./postgres/initdb:/docker-entrypoint-initdb.d:ro
networks:
dnsnet:
ipv4_address: "172.30.0.10"
healthcheck:
test: ["CMD-SHELL", "pg_isready -U $${POSTGRES_USER} -d $${POSTGRES_DB} -h 127.0.0.1 -p 5432"]
interval: 2s
timeout: 3s
retries: 30
start_period: 10s
logging:
driver: "json-file"
options:
tag: "dnsstack.postgres"
max-size: "20m"
max-file: "10"
pdns-auth:
image: powerdns/pdns-auth-50:latest
container_name: dnsstack-pdns-auth
restart: unless-stopped
depends_on:
postgres:
condition: service_healthy
environment:
TZ: Europe/Kyiv
volumes:
- ./pdns-auth/pdns.conf:/etc/powerdns/pdns.conf:ro
networks:
dnsnet:
ipv4_address: "172.30.0.11"
expose:
- "5300"
- "8083"
ulimits:
nofile:
soft: 10064
hard: 10064
logging:
driver: "json-file"
options:
tag: "dnsstack.pdns-auth"
max-size: "20m"
max-file: "10"
pdns-recursor:
image: powerdns/pdns-recursor-53:latest
container_name: dnsstack-pdns-recursor
restart: unless-stopped
environment:
TZ: Europe/Kyiv
volumes:
- ./pdns-recursor/recursor.conf:/etc/powerdns/recursor.conf:ro
networks:
dnsnet:
ipv4_address: "172.30.0.12"
expose:
- "5301"
- "8082"
ulimits:
nofile:
soft: 10064
hard: 10064
logging:
driver: "json-file"
options:
tag: "dnsstack.pdns-recursor"
max-size: "20m"
max-file: "10"
dnsdist:
image: powerdns/dnsdist-20:latest
container_name: dnsstack-dnsdist
restart: unless-stopped
depends_on:
- pdns-auth
- pdns-recursor
environment:
TZ: Europe/Kyiv
volumes:
- ./dnsdist/dnsdist.conf:/etc/dnsdist/dnsdist.conf:ro
networks:
dnsnet:
ipv4_address: "172.30.0.2"
ports:
- "53:53/udp"
- "53:53/tcp"
expose:
- "8084"
ulimits:
nofile:
soft: 65535
hard: 65535
logging:
driver: "json-file"
options:
tag: "dnsstack.dnsdist"
max-size: "50m"
max-file: "10"
nginx:
image: nginx:1.27-alpine
container_name: dnsstack-nginx
restart: unless-stopped
depends_on:
- pdns-auth
- pdns-recursor
- dnsdist
environment:
TZ: Europe/Kyiv
volumes:
- ./nginx/nginx.conf:/etc/nginx/nginx.conf:ro
networks:
dnsnet:
ipv4_address: "172.30.0.3"
ports:
- "80:80/tcp"
logging:
driver: "json-file"
options:
tag: "dnsstack.nginx"
max-size: "20m"
max-file: "10"
networks:
dnsnet:
driver: bridge
ipam:
config:
- subnet: "172.30.0.0/24"

View File

@@ -0,0 +1,53 @@
worker_processes auto;
events { worker_connections 1024; }
http {
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log warn;
# auth.infra.hran -> pdns-auth:8083
server {
listen 80;
server_name auth.infra.hran;
location / {
proxy_http_version 1.1;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_pass http://pdns-auth:8083;
}
}
# recursor.infra.hran -> pdns-recursor:8082
server {
listen 80;
server_name recursor.infra.hran;
location / {
proxy_http_version 1.1;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_pass http://pdns-recursor:8082;
}
}
# dnsdist.infra.hran -> dnsdist:8084
server {
listen 80;
server_name dnsdist.infra.hran;
location / {
proxy_http_version 1.1;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_pass http://dnsdist:8084;
}
}
}

View File

@@ -0,0 +1,21 @@
local-address=0.0.0.0,::
local-port=5300
launch=gpgsql
gpgsql-host=postgres
gpgsql-port=5432
gpgsql-dbname=pdns
gpgsql-user=pdns
gpgsql-password=CHANGE_ME_POSTGRES_PASSWORD
api=yes
api-key=CHANGE_ME_PDNS_API_KEY
webserver=yes
webserver-address=0.0.0.0
webserver-port=8083
webserver-allow-from=127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16
disable-axfr=yes
version-string=anonymous
loglevel=4

View File

@@ -0,0 +1,46 @@
# PowerDNS Recursor 5.1+ YAML config
incoming:
listen:
- "0.0.0.0:5301"
- "[::]:5301"
allow_from:
- "127.0.0.0/8"
- "10.0.0.0/8"
- "172.16.0.0/12"
- "192.168.0.0/16"
- "::1/128"
- "fc00::/7"
- "fe80::/10"
outgoing:
source_address:
- "0.0.0.0"
- "::"
webservice:
webserver: true
address: "0.0.0.0"
port: 8082
api_key: "CHANGE_ME_RECURSOR_API_KEY"
allow_from:
- "127.0.0.0/8"
- "10.0.0.0/8"
- "172.16.0.0/12"
- "192.168.0.0/16"
- "::1/128"
- "fc00::/7"
- "fe80::/10"
logging:
loglevel: 6
quiet: false
recursor:
version_string: "anonymous"
forward_zones_recurse:
- zone: "."
forwarders:
- "1.1.1.1"
- "8.8.8.8"

View File

@@ -0,0 +1,103 @@
-- PowerDNS Generic PostgreSQL schema (gpgsql)
-- Source: PowerDNS pdns/modules/gpgsqlbackend/schema.pgsql.sql
CREATE TABLE domains (
id SERIAL PRIMARY KEY,
name VARCHAR(255) NOT NULL,
master VARCHAR(128) DEFAULT NULL,
last_check INT DEFAULT NULL,
type TEXT NOT NULL,
notified_serial INT DEFAULT NULL,
account VARCHAR(40) DEFAULT NULL,
options TEXT DEFAULT NULL,
catalog VARCHAR(255) DEFAULT NULL
);
CREATE UNIQUE INDEX name_index ON domains(name);
CREATE INDEX catalog_idx ON domains(catalog);
CREATE TABLE records (
id BIGSERIAL PRIMARY KEY,
domain_id INT DEFAULT NULL,
name VARCHAR(255) DEFAULT NULL,
type VARCHAR(10) DEFAULT NULL,
content VARCHAR(65535) DEFAULT NULL,
ttl INT DEFAULT NULL,
prio INT DEFAULT NULL,
disabled BOOL DEFAULT 'f',
ordername VARCHAR(255),
auth BOOL DEFAULT 't'
);
CREATE INDEX rec_name_index ON records(name);
CREATE INDEX nametype_index ON records(name, type);
CREATE INDEX domain_id ON records(domain_id);
CREATE INDEX ordername ON records(ordername);
CREATE TABLE supermasters (
ip INET NOT NULL,
nameserver VARCHAR(255) NOT NULL,
account VARCHAR(40) NOT NULL,
PRIMARY KEY (ip, nameserver)
);
CREATE TABLE comments (
id SERIAL PRIMARY KEY,
domain_id INT NOT NULL,
name VARCHAR(255) NOT NULL,
type VARCHAR(10) NOT NULL,
modified_at INT NOT NULL,
account VARCHAR(40) DEFAULT NULL,
comment VARCHAR(65535) NOT NULL
);
CREATE INDEX comments_domain_id_idx ON comments(domain_id);
CREATE INDEX comments_name_type_idx ON comments(name, type);
CREATE INDEX comments_order_idx ON comments(domain_id, modified_at);
CREATE TABLE domainmetadata (
id SERIAL PRIMARY KEY,
domain_id INT NOT NULL,
kind VARCHAR(32),
content TEXT
);
CREATE INDEX domainmetadata_idx ON domainmetadata(domain_id, kind);
CREATE TABLE cryptokeys (
id SERIAL PRIMARY KEY,
domain_id INT NOT NULL,
flags INT NOT NULL,
active BOOL,
published BOOL DEFAULT TRUE,
content TEXT
);
CREATE INDEX domainidindex ON cryptokeys(domain_id);
CREATE TABLE tsigkeys (
id SERIAL PRIMARY KEY,
name VARCHAR(255),
algorithm VARCHAR(50),
secret VARCHAR(255)
);
CREATE UNIQUE INDEX namealgoindex ON tsigkeys(name, algorithm);
CREATE TABLE luarecords (
id SERIAL PRIMARY KEY,
domain_id INT NOT NULL,
name VARCHAR(255) NOT NULL,
type VARCHAR(10) NOT NULL,
content VARCHAR(65535) NOT NULL,
ttl INT NOT NULL,
prio INT DEFAULT NULL,
disabled BOOL DEFAULT 'f',
ordername VARCHAR(255),
auth BOOL DEFAULT 't'
);
CREATE INDEX luarecord_name_index ON luarecords(name);
CREATE INDEX luarecord_nametype_index ON luarecords(name, type);
CREATE INDEX luarecord_domain_id ON luarecords(domain_id);
CREATE INDEX luarecord_ordername ON luarecords(ordername);

View File

@@ -0,0 +1,9 @@
---
- name: restart dhcpcd
ansible.builtin.shell: |
set -euo pipefail
dhcpcd -k eth0 || true
sleep 1
dhcpcd -f /etc/dhcpcd.conf eth0
args:
executable: /bin/bash

View File

@@ -0,0 +1,4 @@
```bash
cat /etc/resolv.conf
getent hosts ntp-edge.infra.hran
```

View File

@@ -0,0 +1,9 @@
---
- name: render dhcpcd.conf (DNS override)
ansible.builtin.template:
src: dhcpcd.conf.j2
dest: /etc/dhcpcd.conf
owner: root
group: root
mode: "0644"
notify: restart dhcpcd

View File

@@ -0,0 +1,45 @@
# A sample configuration for dhcpcd.
# See dhcpcd.conf(5) for details.
# Allow users of this group to interact with dhcpcd via the control socket.
#controlgroup wheel
# Inform the DHCP server of our hostname for DDNS.
hostname
# Use the hardware address of the interface for the Client ID.
#clientid
# or
# Use the same DUID + IAID as set in DHCPv6 for DHCPv4 ClientID as per RFC4361.
# Some non-RFC compliant DHCP servers do not reply with this set.
# In this case, comment out duid and enable clientid above.
duid
# Persist interface configuration when dhcpcd exits.
persistent
# vendorclassid is set to blank to avoid sending the default of
# dhcpcd-<version>:<os>:<machine>:<platform>
vendorclassid
# A list of options to request from the DHCP server.
option domain_name_servers, domain_name, domain_search
option classless_static_routes
# Respect the network MTU. This is applied to DHCP routes.
option interface_mtu
# Request a hostname from the network
option host_name
# Most distributions have NTP support.
#option ntp_servers
# A ServerID is required by RFC2131.
require dhcp_server_identifier
# Generate SLAAC address using the Hardware Address of the interface
#slaac hwaddr
# OR generate Stable Private IPv6 Addresses based from the DUID
slaac private
static domain_name_servers=192.168.0.100 1.1.1.1 8.8.8.8

View File

@@ -0,0 +1,4 @@
---
- name: update apt cache
apt:
update_cache: yes

View File

@@ -0,0 +1,74 @@
---
# 1) Чистим потенциально битый repo-файл (как у тебя было)
- name: remove broken docker repo if exists
file:
path: /etc/apt/sources.list.d/docker.list
state: absent
# 2) Минимум нужных пакетов
- name: install prerequisites
apt:
name:
- ca-certificates
- curl
- gnupg
state: present
update_cache: yes
# 3) Keyring + ключ
- name: ensure keyrings dir exists
file:
path: /etc/apt/keyrings
state: directory
mode: "0755"
- name: download docker GPG key
get_url:
url: https://download.docker.com/linux/debian/gpg
dest: /etc/apt/keyrings/docker.gpg
mode: "0644"
# 4) Repo (архитектура через ansible_architecture -> amd64)
- name: add docker apt repository
copy:
dest: /etc/apt/sources.list.d/docker.list
content: |
deb [arch={{ 'amd64' if ansible_architecture in ['x86_64','amd64'] else ansible_architecture }} signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian {{ ansible_lsb.codename }} stable
# 5) Пробуем поставить containerd.io, перебирая версии (и сразу держим)
- name: install first working containerd.io (skip broken versions) and hold
shell: |
set -euo pipefail
apt-get update
mapfile -t versions < <(apt-cache madison containerd.io | awk '{print $3}' | sort -V | tac)
for v in "${versions[@]}"; do
echo "Trying containerd.io=$v"
if apt-get install -y "containerd.io=$v"; then
apt-mark hold containerd.io
exit 0
fi
done
echo "No working containerd.io version found in repo"
exit 1
args:
executable: /bin/bash
changed_when: true
# 6) Docker пакеты (containerd.io уже стоит/held)
- name: install docker packages
apt:
name:
- docker-ce
- docker-ce-cli
- docker-buildx-plugin
- docker-compose-plugin
state: present
update_cache: yes
- name: enable & start docker service
service:
name: docker
state: started
enabled: yes

View File

@@ -0,0 +1,109 @@
# Gitea Setup Notes
## 1⃣ Добавление HTTPS сертификата (Let's Encrypt + Nginx)
### Установка certbot
ставим certbot на хост (НЕ в контейнер)
``` bash
sudo apt update
sudo apt install certbot python3-certbot-nginx -y
```
### Базовый nginx конфиг (HTTP → прокси в Gitea)
Файл: `./nginx/nginx.conf`
``` nginx
server {
listen 80;
server_name gitea.quietblock.net;
location / {
proxy_pass http://gitea:3000;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
}
```
### Получение сертификата
``` bash
sudo certbot certonly --standalone -d gitea.quietblock.net
```
Запрашивает SSL сертификат для домена через standalone режим.
После успеха сертификаты будут:
/etc/letsencrypt/live/gitea.quietblock.net/fullchain.pem
/etc/letsencrypt/live/gitea.quietblock.net/privkey.pem
### Docker nginx сервис
``` yaml
nginx:
image: nginx:stable
container_name: nginx
restart: always
ports:
- "80:80"
- "443:443"
volumes:
- ./nginx:/etc/nginx/conf.d
- /etc/letsencrypt:/etc/letsencrypt:ro
depends_on:
- gitea
```
### Финальный nginx конфиг (HTTP → HTTPS + SSL)
``` nginx
server {
listen 80;
server_name gitea.quietblock.net;
return 301 https://$host$request_uri;
}
server {
listen 443 ssl;
server_name gitea.quietblock.net;
ssl_certificate /etc/letsencrypt/live/gitea.quietblock.net/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/gitea.quietblock.net/privkey.pem;
location / {
proxy_pass http://gitea:3000;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-Proto https;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
}
```
Что происходит: - HTTP редиректится на HTTPS - nginx использует SSL
сертификаты - HTTPS проксируется в контейнер gitea
------------------------------------------------------------------------
## 2⃣ Создание администратора в Gitea
### Зайти внутрь контейнера
``` bash
docker exec -it --user git gitea /bin/bash
```
Открывает shell внутри контейнера gitea от пользователя git.
### Создать администратора
``` bash
gitea admin user create --username adminuser --password 14881488 --email you@mail.com --admin
```

View File

@@ -0,0 +1,23 @@
- name: ensure directory structure exists
ansible.builtin.file:
path: "{{ item }}"
state: directory
owner: "root"
group: "root"
mode: "0755"
loop:
- "/opt/gitea"
- "/opt/gitea/nginx"
- name: render stack files
ansible.builtin.template:
src: "{{ item.src }}"
dest: "/opt/gitea/{{ item.dest }}"
owner: "root"
group: "root"
mode: "0644"
loop:
- { src: "docker-compose.yml.j2", dest: "docker-compose.yml" }
- { src: ".env.j2", dest: ".env", mode: "0600" }
- { src: "nginx/nginx.conf.j2", dest: "nginx/nginx.conf" }
register: rendered

View File

@@ -0,0 +1,78 @@
version: "3.9"
services:
postgres:
image: postgres:15
container_name: postgres
restart: always
environment:
POSTGRES_DB: ${POSTGRES_DB}
POSTGRES_USER: ${POSTGRES_USER}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
volumes:
- ./data/postgres:/var/lib/postgresql/data
networks:
- gitea_net
gitea:
image: gitea/gitea:latest
container_name: gitea
restart: always
environment:
- USER_UID=1000
- USER_GID=1000
# DB
- GITEA__database__DB_TYPE=postgres
- GITEA__database__HOST=postgres:5432
- GITEA__database__NAME=${POSTGRES_DB}
- GITEA__database__USER=${POSTGRES_USER}
- GITEA__database__PASSWD=${POSTGRES_PASSWORD}
# basic
- GITEA__server__DOMAIN=${GITEA_URL}
- GITEA__server__ROOT_URL=https://${GITEA_URL}/
- GITEA__server__SSH_DOMAIN=${GITEA_URL}
- GITEA__server__HTTP_PORT=3000
- GITEA__server__SSH_PORT=2222
# security
- GITEA__security__INSTALL_LOCK=true
- GITEA__service__DISABLE_REGISTRATION=true
volumes:
- ./data/gitea:/data
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
depends_on:
- postgres
networks:
- gitea_net
nginx:
image: nginx:stable
container_name: nginx
restart: always
ports:
- "80:80"
- "443:443"
volumes:
- ./nginx:/etc/nginx/conf.d
- /etc/letsencrypt:/etc/letsencrypt:ro
depends_on:
- gitea
networks:
- gitea_net
networks:
gitea_net:

View File

@@ -0,0 +1,23 @@
server {
listen 80;
server_name gitea.quietblock.net;
return 301 https://$host$request_uri;
}
server {
listen 443 ssl;
server_name gitea.quietblock.net;
ssl_certificate /etc/letsencrypt/live/gitea.quietblock.net/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/gitea.quietblock.net/privkey.pem;
location / {
proxy_pass http://gitea:3000;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-Proto https;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
client_max_body_size 50M;
}

View File

@@ -0,0 +1,14 @@
---
- name: validate fail2ban config
listen: "validate and restart fail2ban"
become: true
ansible.builtin.command: fail2ban-client -t
register: f2b_validate
changed_when: false
- name: restart fail2ban
listen: "validate and restart fail2ban"
become: true
ansible.builtin.systemd:
name: fail2ban
state: restarted

View File

@@ -0,0 +1,58 @@
---
- name: install fail2ban + deps
ansible.builtin.apt:
name:
- fail2ban
- python3
- python3-systemd
- nftables
state: present
update_cache: true
become: true
- name: enable & start nftables
ansible.builtin.systemd:
name: nftables
enabled: true
state: started
become: true
- name: ensure fail2ban directories exist
ansible.builtin.file:
path: "{{ item }}"
state: directory
owner: root
group: root
mode: "0755"
loop:
- /etc/fail2ban
- /etc/fail2ban/jail.d
- /etc/fail2ban/filter.d
become: true
- name: deploy /etc/fail2ban/fail2ban.local
ansible.builtin.template:
src: fail2ban.local.j2
dest: /etc/fail2ban/fail2ban.local
owner: root
group: root
mode: "0644"
notify: validate and restart fail2ban
become: true
- name: deploy /etc/fail2ban/jail.local
ansible.builtin.template:
src: jail.local.j2
dest: /etc/fail2ban/jail.local
owner: root
group: root
mode: "0644"
notify: validate and restart fail2ban
become: true
- name: ensure fail2ban enabled and started
ansible.builtin.systemd:
name: fail2ban
enabled: true
state: started
become: true

View File

@@ -0,0 +1,6 @@
[Definition]
loglevel = INFO
logtarget = /var/log/fail2ban.log
socket = /run/fail2ban/fail2ban.sock
pidfile = /run/fail2ban/fail2ban.pid
dbpurgeage = 86400

View File

@@ -0,0 +1,18 @@
[DEFAULT]
ignoreip = 127.0.0.1/8 ::1
findtime = 600
maxretry = 5
bantime = 1h
backend = systemd
banaction = nftables[type=multiport]
[sshd]
enabled = true
port = 25105
filter = sshd
maxretry = 5
findtime = 600
bantime = 1h
mode = aggressive

View File

@@ -0,0 +1,12 @@
---
- name: validate nftables config
ansible.builtin.command:
cmd: nft -c -f /etc/nftables.conf
listen: apply nftables
changed_when: false
- name: reload nftables
ansible.builtin.systemd:
name: nftables
state: reloaded
listen: apply nftables

View File

@@ -0,0 +1,22 @@
---
- name: install nftables
ansible.builtin.apt:
name: nftables
state: present
update_cache: true
notify: apply nftables
- name: deploy nftables config
ansible.builtin.template:
src: "{{ nftables_conf_name }}"
dest: /etc/nftables.conf
owner: root
group: root
mode: "0644"
notify: apply nftables
- name: enable and start nftables service
ansible.builtin.systemd:
name: nftables
enabled: true
state: started

View File

@@ -0,0 +1,36 @@
#!/usr/sbin/nft -f
flush ruleset
table inet filter {
chain input {
type filter hook input priority 0;
policy drop;
iif "lo" accept
ct state established,related accept
# SSH
tcp dport {{ ssh_port }} accept
# ICMP
ip protocol icmp accept
ip6 nexthdr icmpv6 accept
# Proxmox Web/API (LAN only)
ip saddr 192.168.0.0/24 tcp dport 8006 accept
# NTP
ip saddr 192.168.0.0/24 udp dport {{ ntp_port }} accept
}
chain forward {
type filter hook forward priority 0;
policy drop;
}
chain output {
type filter hook output priority 0;
policy accept;
}
}

View File

@@ -0,0 +1,32 @@
#!/usr/sbin/nft -f
flush ruleset
table inet filter {
chain input {
type filter hook input priority 0;
policy drop;
iif "lo" accept
ct state established,related accept
# SSH
tcp dport {{ ssh_port }} accept
# udp dport {{ ntp_port }} accept
# ICMP
ip protocol icmp accept
ip6 nexthdr icmpv6 accept
}
chain forward {
type filter hook forward priority 0;
policy drop;
}
chain output {
type filter hook output priority 0;
policy accept;
}
}

View File

@@ -0,0 +1,25 @@
---
- name: ensure sshd_config.d directory exists
become: true
file:
path: "/etc/ssh/sshd_config.d"
state: directory
owner: root
group: root
mode: "0755"
- name: deploy sshd config file
become: true
template:
src: "00-sshd_config-hardening.conf.j2"
dest: "/etc/ssh/sshd_config.d/00-sshd_config-hardening.conf"
owner: root
group: root
mode: "0644"
validate: "sshd -t -f %s"
- name: restart SSH service
become: true
service:
name: ssh
state: restarted

View File

@@ -0,0 +1,107 @@
# --- MAIN ---
# Change default port 22 → {{ ssh_port }} (reduces noise from scanners)
Port {{ ssh_port }}
# Optionally limit interfaces (default is all)
# ListenAddress 0.0.0.0 # IPv4
# ListenAddress :: # IPv6
# Allow only SSH protocol version 2 (v1 is insecure)
Protocol 2
# --- AUTHENTICATION ---
# Disable root login (only via sudo)
PermitRootLogin prohibit-password
# Disable password login (keys only)
PasswordAuthentication no
# Disable interactive keyboard auth (OTP, TOTP, etc.)
KbdInteractiveAuthentication no
# Disable challenge-response auth (legacy)
ChallengeResponseAuthentication no
# Enable public key authentication (main method)
PubkeyAuthentication yes
# --- ACCESS ---
# Allow only specific user
# AllowUsers adminuser
# Or alternatively allow a group:
# AllowGroups sshusers
# --- FUNCTION RESTRICTIONS ---
# Disallow empty passwords
PermitEmptyPasswords no
# Disallow user environment modification (~/.ssh/environment)
PermitUserEnvironment no
# Disable X11 forwarding (no GUI sessions)
X11Forwarding no
# Disable TCP forwarding (no tunnels)
AllowTcpForwarding yes
# Disable gateway ports (no external binding)
GatewayPorts no
# Disable VPN tunnels via SSH
PermitTunnel no
# Disable SSH agent forwarding
AllowAgentForwarding yes
# --- ANTI-BRUTEFORCE & STABILITY ---
# Login timeout (20 seconds)
LoginGraceTime 20
# Max 3 auth attempts per connection
MaxAuthTries 3
# Limit simultaneous connections
# Allow 10 new, start dropping at 30, max 60 queued
MaxStartups 10:30:60
# --- SESSION ACTIVITY ---
# Ping client every 300s (5 minutes)
ClientAliveInterval 300
# Disconnect if no response twice
ClientAliveCountMax 2
# Disable TCP keepalive
TCPKeepAlive no
# Skip DNS checks for faster login
UseDNS no
# --- SFTP ---
# Use internal SFTP subsystem
Subsystem sftp internal-sftp
# --- CRYPTOGRAPHY (optional) ---
# Modern key exchange algorithms (if supported)
# KexAlgorithms sntrup761x25519-sha512@openssh.com,curve25519-sha256
# Modern ciphers
# Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes256-ctr
# Modern MAC algorithms
# MACs umac-128-etm@openssh.com,hmac-sha2-256-etm@openssh.com

View File

@@ -0,0 +1,15 @@
---
- name: restart unattended-upgrades
ansible.builtin.service:
name: unattended-upgrades
state: restarted
enabled: true
- name: restart apt timers
ansible.builtin.systemd:
name: "{{ item }}"
state: restarted
enabled: true
loop:
- apt-daily.timer
- apt-daily-upgrade.timer

View File

@@ -0,0 +1,17 @@
```bash
## Проверить, что таймеры включены и “тикают”
systemctl status apt-daily.timer apt-daily-upgrade.timer
systemctl list-timers --all | egrep 'apt-daily|apt-daily-upgrade'
## Проверить, что unattended-upgrades реально запускался
systemctl status unattended-upgrades.service
journalctl -u unattended-upgrades --no-pager -n 200
## Проверить логи и фактические действия
ls -l /var/log/unattended-upgrades/
tail -n 200 /var/log/unattended-upgrades/unattended-upgrades.log
tail -n 200 /var/log/unattended-upgrades/unattended-upgrades-dpkg.log
## Быстрый “самотест” (прогон в dry-run)
unattended-upgrade --dry-run --debug
```

View File

@@ -0,0 +1,49 @@
---
- name: ensure required packages are present
ansible.builtin.apt:
name:
- unattended-upgrades
- apt-listchanges
- gpg
state: present
update_cache: true
- name: ensure debian-security repo is present
ansible.builtin.apt_repository:
repo: >-
deb http://deb.debian.org/debian-security
{{ ansible_facts.lsb.codename | default(ansible_facts.distribution_release) }}-security
main contrib non-free non-free-firmware
state: present
filename: debian-security
update_cache: true
notify: restart apt timers
- name: deploy /etc/apt/apt.conf.d/50unattended-upgrades
ansible.builtin.template:
src: 50unattended-upgrades.j2
dest: /etc/apt/apt.conf.d/50unattended-upgrades
owner: root
group: root
mode: "0644"
notify: restart unattended-upgrades
- name: deploy /etc/apt/apt.conf.d/20auto-upgrades
ansible.builtin.template:
src: 20auto-upgrades.j2
dest: /etc/apt/apt.conf.d/20auto-upgrades
owner: root
group: root
mode: "0644"
notify:
- restart unattended-upgrades
- restart apt timers
- name: enable & start apt timers
ansible.builtin.systemd:
name: "{{ item }}"
state: started
enabled: true
loop:
- apt-daily.timer
- apt-daily-upgrade.timer

View File

@@ -0,0 +1,4 @@
APT::Periodic::Update-Package-Lists "1";
APT::Periodic::Download-Upgradeable-Packages "1";
APT::Periodic::Unattended-Upgrade "1";
APT::Periodic::AutocleanInterval "7";

View File

@@ -0,0 +1,10 @@
Unattended-Upgrade::Origins-Pattern {
"origin=Debian,codename=${distro_codename}-security";
};
Unattended-Upgrade::Automatic-Reboot "false";
Unattended-Upgrade::Automatic-Reboot-Time "03:30";
Unattended-Upgrade::Automatic-Reboot-WithUsers "false";
Unattended-Upgrade::Remove-Unused-Dependencies "true";
Unattended-Upgrade::MinimalSteps "true";

View File

@@ -0,0 +1,8 @@
---
- name: Ensure required Python libraries are installed
ansible.builtin.apt:
name:
- python3-pip
- python3-kubernetes
state: present
update_cache: yes

View File

@@ -0,0 +1,3 @@
```bash
curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash
```

View File

@@ -0,0 +1,20 @@
---
- name: Download Helm install script
ansible.builtin.get_url:
url: https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3
dest: /tmp/get-helm-3.sh
mode: '0755'
- name: Install Helm
ansible.builtin.command: /tmp/get-helm-3.sh
args:
creates: /usr/local/bin/helm
- name: Verify Helm installation
ansible.builtin.command: helm version
register: helm_version_output
changed_when: false
- name: Show Helm version
ansible.builtin.debug:
var: helm_version_output.stdout

View File

@@ -0,0 +1,172 @@
# roles/k8s/k8scommon/tasks/main.yml
---
# === 1. Обновление пакетов и базовые утилиты ===
- name: Install base packages
ansible.builtin.apt:
update_cache: yes
name:
- apt-transport-https
- ca-certificates
- curl
- gnupg
- lsb-release
state: present
# === 2. Отключить swap ===
- name: Disable swap immediately
ansible.builtin.command: swapoff -a
changed_when: false
- name: Backup fstab
ansible.builtin.copy:
src: /etc/fstab
dest: /etc/fstab.bak
remote_src: yes
force: no
- name: Comment out swap entries in fstab
ansible.builtin.replace:
path: /etc/fstab
regexp: '^\s*([^#].*\s+swap\s+.*)$'
replace: '# \1'
# === 3. Модули ядра ===
- name: Write kernel modules config for Kubernetes
ansible.builtin.copy:
dest: /etc/modules-load.d/k8s.conf
content: |
overlay
br_netfilter
- name: Load overlay module
ansible.builtin.command: modprobe overlay
changed_when: false
- name: Load br_netfilter module
ansible.builtin.command: modprobe br_netfilter
changed_when: false
# === 4. sysctl для Kubernetes / containerd ===
- name: Configure Kubernetes sysctl params
ansible.builtin.copy:
dest: /etc/sysctl.d/99-kubernetes-cri.conf
content: |
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward = 1
- name: Apply sysctl settings
ansible.builtin.command: sysctl --system
changed_when: false
# === 5. Установить containerd ===
- name: Install containerd
ansible.builtin.apt:
update_cache: yes
name: containerd
state: present
- name: Ensure containerd config directory exists
ansible.builtin.file:
path: /etc/containerd
state: directory
mode: '0755'
# ВАЖНО: всегда пересоздаём config.toml, как в manual script
- name: Generate default containerd config (overwrite)
ansible.builtin.shell: |
set -o errexit
containerd config default > /etc/containerd/config.toml
args:
executable: /bin/bash
- name: Enable SystemdCgroup in containerd config
ansible.builtin.replace:
path: /etc/containerd/config.toml
regexp: 'SystemdCgroup = false'
replace: 'SystemdCgroup = true'
- name: Set correct CNI bin_dir in containerd config
ansible.builtin.replace:
path: /etc/containerd/config.toml
regexp: 'bin_dir = .*'
replace: 'bin_dir = "/opt/cni/bin"'
- name: Set correct CNI conf_dir in containerd config
ansible.builtin.replace:
path: /etc/containerd/config.toml
regexp: 'conf_dir = .*'
replace: 'conf_dir = "/etc/cni/net.d"'
- name: Enable and restart containerd
ansible.builtin.systemd:
name: containerd
enabled: true
state: restarted
# === 6. Подготовить директории для CNI ===
- name: Ensure CNI directories exist
ansible.builtin.file:
path: "{{ item }}"
state: directory
mode: '0755'
loop:
- /opt/cni/bin
- /etc/cni/net.d
# /usr/lib/cni → /opt/cni/bin, только если /usr/lib/cni не существует
- name: Check if /usr/lib/cni exists
ansible.builtin.stat:
path: /usr/lib/cni
register: cni_usr_lib
- name: Create symlink /usr/lib/cni -> /opt/cni/bin (if not exists)
ansible.builtin.file:
src: /opt/cni/bin
dest: /usr/lib/cni
state: link
when: not cni_usr_lib.stat.exists
# === 7. Репозиторий Kubernetes v1.34 ===
- name: Ensure apt keyrings directory exists
ansible.builtin.file:
path: /etc/apt/keyrings
state: directory
mode: '0755'
- name: Download Kubernetes repo key
ansible.builtin.shell: |
set -o errexit
curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.34/deb/Release.key \
| gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
args:
executable: /bin/bash
creates: /etc/apt/keyrings/kubernetes-apt-keyring.gpg
- name: Add Kubernetes apt repository
ansible.builtin.copy:
dest: /etc/apt/sources.list.d/kubernetes.list
content: |
deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.34/deb/ /
- name: Update apt cache after adding Kubernetes repo
ansible.builtin.apt:
update_cache: yes
# === 8. Установить kubelet, kubeadm, kubectl и зафиксировать версии ===
- name: Install kubelet, kubeadm, kubectl
ansible.builtin.apt:
name:
- kubelet
- kubeadm
- kubectl
state: present
update_cache: yes
- name: Hold Kubernetes packages
ansible.builtin.command: apt-mark hold kubelet kubeadm kubectl
register: hold_result
changed_when: >-
'hold' in hold_result.stdout
or 'marked' in hold_result.stdout
or hold_result.rc == 0

View File

@@ -0,0 +1,136 @@
# roles/k8s/k8smaster/tasks/main.yml
---
# === 9. kubeadm init (аналог шага 14) ===
- name: Initialize Kubernetes control plane (kubeadm init)
ansible.builtin.command: >
kubeadm init
--apiserver-advertise-address={{ ansible_default_ipv4.address }}
--pod-network-cidr=10.244.0.0/16
args:
creates: /etc/kubernetes/admin.conf
# === 10. kubeconfig для root и пользователя ===
- name: Ensure kubeconfig directory for root exists
ansible.builtin.file:
path: /root/.kube
state: directory
mode: "0700"
- name: Copy admin kubeconfig for root
ansible.builtin.copy:
src: /etc/kubernetes/admin.conf
dest: /root/.kube/config
owner: root
group: root
mode: "0600"
remote_src: yes
- name: Ensure kubeconfig directory for user exists
ansible.builtin.file:
path: "/home/adminuser/.kube"
state: directory
owner: "adminuser"
group: "adminuser"
mode: "0700"
- name: Copy admin kubeconfig to user home
ansible.builtin.copy:
src: /etc/kubernetes/admin.conf
dest: "/home/adminuser/.kube/config"
owner: "adminuser"
group: "adminuser"
mode: "0600"
remote_src: yes
# === 11. Ждём API-сервер ===
- name: Wait for Kubernetes API to become reachable
ansible.builtin.command: kubectl get --raw=/healthz
register: api_health
until: api_health.rc == 0
retries: 30
delay: 10
environment:
KUBECONFIG: /etc/kubernetes/admin.conf
# === 12. Ставим Flannel CNI (НЕ ждём Ready ноды до него) ===
- name: Install Flannel CNI
ansible.builtin.command: >
kubectl apply --validate=false
-f https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml
register: flannel_result
until: flannel_result.rc == 0
retries: 10
delay: 6
environment:
KUBECONFIG: /etc/kubernetes/admin.conf
- name: Wait for flannel DaemonSet to be Ready
ansible.builtin.command: >
kubectl -n kube-flannel rollout status daemonset/kube-flannel-ds --timeout=300s
register: flannel_rollout
until: flannel_rollout.rc == 0
retries: 5
delay: 15
environment:
KUBECONFIG: /etc/kubernetes/admin.conf
# === 13. Теперь ждём, пока нода станет Ready ===
- name: Wait for control-plane node to become Ready
ansible.builtin.shell: |
kubectl get node "$(hostname -s)" \
-o jsonpath='{.status.conditions[?(@.type=="Ready")].status}'
register: node_ready
until: node_ready.stdout == "True"
retries: 30
delay: 10
environment:
KUBECONFIG: /etc/kubernetes/admin.conf
# === 14. Ждём CoreDNS ===
- name: Wait for CoreDNS deployment to be Ready
ansible.builtin.command: >
kubectl -n kube-system rollout status deployment/coredns --timeout=300s
register: coredns_rollout
until: coredns_rollout.rc == 0
retries: 5
delay: 15
environment:
KUBECONFIG: /etc/kubernetes/admin.conf
# === 14. Разрешаем поды на master (как шаг 18), если нужно ===
- name: Allow scheduling pods on control-plane node
ansible.builtin.command: >
kubectl taint nodes --all node-role.kubernetes.io/control-plane-
environment:
KUBECONFIG: /etc/kubernetes/admin.conf
when: false
# === 15. Проверка статуса кластера ===
- name: Get nodes
ansible.builtin.command: kubectl get nodes
register: nodes_out
environment:
KUBECONFIG: /etc/kubernetes/admin.conf
- name: Show nodes
ansible.builtin.debug:
var: nodes_out.stdout
- name: Get all pods in all namespaces
ansible.builtin.command: kubectl get pods -A
register: pods_out
environment:
KUBECONFIG: /etc/kubernetes/admin.conf
- name: Show pods
ansible.builtin.debug:
var: pods_out.stdout
# === 16. Вывести join-команду (как шаг 20) ===
- name: Get kubeadm join command
ansible.builtin.command: kubeadm token create --print-join-command
register: join_cmd
- name: Show join command
ansible.builtin.debug:
msg: "Use this command on workers: {{ join_cmd.stdout }}"

View File

@@ -0,0 +1,13 @@
---
# === 2. Join в кластер (аналог kubeadm join в ручном скрипте) ===
- name: Join node to Kubernetes cluster
ansible.builtin.command: "{{ k8s_kubeadm_join_command }}"
args:
creates: /etc/kubernetes/kubelet.conf
# === 3. Убедиться, что kubelet включён и работает ===
- name: Ensure kubelet is enabled and running
ansible.builtin.systemd:
name: kubelet
enabled: true
state: started

View File

@@ -0,0 +1,109 @@
```bash
# === Стать root (если ещё не) ===
sudo -i
```
```bash
# === 1. Обновление пакетов и базовые утилиты ===
apt-get update -y
apt-get install -y apt-transport-https ca-certificates curl gnupg lsb-release
```
```bash
# === 2. Отключить swap немедленно ===
swapoff -a
```
```bash
# === 3. Убрать swap из /etc/fstab (чтобы не включался после перезагрузки) ===
cp /etc/fstab /etc/fstab.bak
sed -i '/ swap / s/^/#/' /etc/fstab
```
```bash
# === 4. Включить модули ядра overlay и br_netfilter ===
cat <<EOF >/etc/modules-load.d/k8s.conf
overlay
br_netfilter
EOF
modprobe overlay
modprobe br_netfilter
```
```bash
# === 5. Настроить sysctl для Kubernetes и containerd ===
cat <<EOF >/etc/sysctl.d/99-kubernetes-cri.conf
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward = 1
EOF
sysctl --system
```
```bash
# === 6. Установить containerd ===
apt-get install -y containerd
```
```bash
# === 7. Сгенерировать конфиг containerd и включить SystemdCgroup ===
mkdir -p /etc/containerd
containerd config default >/etc/containerd/config.toml
# Включаем SystemdCgroup
sed -i 's/SystemdCgroup = false/SystemdCgroup = true/' /etc/containerd/config.toml
# (Опционально) Убедиться, что пути CNI прописаны как /opt/cni/bin и /etc/cni/net.d
sed -i 's@bin_dir = .*@bin_dir = "/opt/cni/bin"@' /etc/containerd/config.toml
sed -i 's@conf_dir = .*@conf_dir = "/etc/cni/net.d"@' /etc/containerd/config.toml
systemctl restart containerd
systemctl enable containerd
```
```bash
# === 8. Подготовить директории для CNI-плагинов ===
mkdir -p /opt/cni/bin
mkdir -p /etc/cni/net.d
```
```bash
# === 9. Фикс пути для flannel: /usr/lib/cni → /opt/cni/bin ===
# ВАЖНО: если каталог /usr/lib/cni уже существует — ЭТУ команду пропусти
ln -s /opt/cni/bin /usr/lib/cni
```
<!-- # === 9. Установить CNI-плагины (официальный набор) ===
```bash
curl -L -o /tmp/cni-plugins.tgz \
"https://github.com/containernetworking/plugins/releases/download/v1.5.1/cni-plugins-linux-amd64-v1.5.1.tgz"
tar -C /opt/cni/bin -xzvf /tmp/cni-plugins.tgz
``` -->
<!-- # === 10. (Опционально) Симлинк /usr/lib/cni -> /opt/cni/bin, если НЕ существует ===
if [ ! -e /usr/lib/cni ]; then
ln -s /opt/cni/bin /usr/lib/cni
fi -->
```bash
# === 10. Добавить официальный репозиторий Kubernetes (pkgs.k8s.io, ветка v1.34) ===
mkdir -p /etc/apt/keyrings
curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.34/deb/Release.key \
| gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
cat <<EOF >/etc/apt/sources.list.d/kubernetes.list
deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.34/deb/ /
EOF
apt-get update -y
```
```bash
# === 11. Установить kubelet, kubeadm, kubectl и зафиксировать версии ===
apt-get install -y kubelet kubeadm kubectl
apt-mark hold kubelet kubeadm kubectl
```

View File

@@ -0,0 +1,53 @@
```bash
# === 13. Посмотреть IP адреса мастера ===
hostname -I
# Запомни нужный IP (например, 192.168.0.26) и подставь его в следующую команду.
# POD CIDR под Flannel — 10.244.0.0/16
```
```bash
# === 14. Инициализация control-plane (kubeadm init) ===
kubeadm init \
--apiserver-advertise-address=192.168.0.154 \
--pod-network-cidr=10.244.0.0/16
```
```bash
# === 15. Настроить kubeconfig для root (чтобы kubectl работал без доп. флагов) ===
mkdir -p /root/.kube
cp /etc/kubernetes/admin.conf /root/.kube/config
chown root:root /root/.kube/config
```
```bash
# === 16. (Опционально) Скопировать kubeconfig обычному пользователю adminuser ===
# ЗАМЕНИ adminuser на своё имя пользователя
mkdir -p /home/adminuser/.kube
cp /etc/kubernetes/admin.conf /home/adminuser/.kube/config
chown adminuser:adminuser /home/adminuser/.kube/config
```
```bash
# === 17. Установить Flannel как CNI-плагин ===
kubectl apply -f https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml
```
```bash
# === 18. (Опционально) Разрешить запуск pod'ов на master (single-node кластер) ===
# Если хочешь использовать мастер и как worker:
kubectl taint nodes --all node-role.kubernetes.io/control-plane-
```
```bash
# === 19. Проверить статус кластера ===
kubectl get nodes
kubectl get pods -A
```
```bash
# === 20. Получить команду для присоединения worker-узлов ===
kubeadm token create --print-join-command
# Скопируй полностью выведенную команду "kubeadm join ..." — она понадобится на worker.
```

View File

@@ -0,0 +1,14 @@
```bash
# === 21. Выполнить join-команду, полученную на мастере ===
# Пример (ЭТО ТОЛЬКО ПРИМЕР, ИСПОЛЬЗУЙ СВОЮ КОМАНДУ ИЗ ШАГА 20):
kubeadm join 192.168.0.154:6443 --token 9jz5xr.xvwirgtsp2v2brge \
--discovery-token-ca-cert-hash sha256:e09d4918b52e647af493e8345504ecb9907e79637a52932e730df350d3f76ede
```
```bash
# === 22. Проверить с мастера, что worker добавился в кластер ===
# Команды выполняются на master-узле:
kubectl get nodes
kubectl get pods -A
```

View File

@@ -0,0 +1,87 @@
```bash
helm repo add codecentric https://codecentric.github.io/helm-charts
helm repo update
```
```bash
kubectl create namespace keycloak
```
```bash
vim values-keycloak.yaml
# Какой именно Keycloak ставим
image:
repository: quay.io/keycloak/keycloak
# Подставь нужную версию, например ту, которую ты хочешь зафиксировать
# (пример — 26.0.7, но лучше глянуть актуальные теги на quay.io/keycloak/keycloak)
tag: "26.0.7"
pullPolicy: IfNotPresent
replicas: 1
# HTTP-путь, по которому будет доступен Keycloak
http:
# "/" или "/auth" — на твой вкус, я делаю "/" для простоты
relativePath: "/"
# Подключение к внешней БД PostgreSQL
database:
vendor: postgres
hostname: postgres-postgresql.postgres.svc.cluster.local
port: 5432
database: keycloak
username: keycloak_user
password: "password"
# Команда запуска Keycloak (рекомендуемый стиль с kc.sh)
command:
- "/opt/keycloak/bin/kc.sh"
- "start"
- "--http-enabled=true"
- "--http-port=8080"
- "--hostname-strict=false"
- "--hostname-strict-https=false"
- "--proxy=edge"
# Ingress NGINX на keycloak.local
ingress:
enabled: true
ingressClassName: "nginx"
annotations:
nginx.ingress.kubernetes.io/ssl-redirect: "false"
rules:
- host: "keycloak.local"
paths:
- path: '{{ tpl .Values.http.relativePath $ | trimSuffix "/" }}/'
pathType: Prefix
tls: [] # позже можно включить TLS через cert-manager
# Переменные окружения Keycloak
extraEnv: |
# Админ и пароль
- name: KEYCLOAK_ADMIN
value: admin
- name: KEYCLOAK_ADMIN_PASSWORD
value: password
# Настройки прокси / hostname
- name: KC_PROXY
value: edge
- name: KC_HOSTNAME
value: "keycloak.local"
# JGroups discovery через headless-сервис чарта
- name: JAVA_OPTS_APPEND
value: >-
-XX:+UseContainerSupport
-XX:MaxRAMPercentage=50.0
-Djava.awt.headless=true
-Djgroups.dns.query={{ include "keycloak.fullname" . }}-headless
helm install keycloak codecentric/keycloakx \
--namespace keycloak \
--values values-keycloak.yaml
```

View File

@@ -0,0 +1,288 @@
```bash
helm repo add jaconi https://charts.jaconi.io
helm repo update
```
```bash
fullnameOverride: "netbird"
config:
database:
DB_TYPE: postgres
HOST: postgres-postgresql.postgres.svc.cluster.local
PORT: 5432
NAME: netbird
USER: netbird_user
PASSWD: password
relay:
enabled: true
config:
NB_EXPOSED_ADDRESS: "netbird-relay.netbird.svc.cluster.local:33080"
signal:
enabled: true
management:
enabled: true
config:
NETBIRD_SIGNAL_URI: "netbird-signal.netbird.svc.cluster.local:10000"
NETBIRD_SIGNAL_PROTOCOL: "https"
NETBIRD_RELAY_DOMAIN: "netbird-relay.netbird.svc.cluster.local"
NETBIRD_RELAY_PORT: "33080"
NETBIRD_STUN_URI: "stun:netbird-signal.netbird.svc.cluster.local:3478"
NETBIRD_TURN_URI: "turn:netbird-signal.netbird.svc.cluster.local:3478"
dashboard:
enabled: true
service:
type: ClusterIP
ingress:
enabled: false
```
```bash
openssl rand -hex 32
kubectl create secret generic netbird-relay-secret \
-n netbird \
--from-literal=netbird-relay-secret-key="8626c1ed1c8cfcb13df6c65819042771a2bf7a280c16f0ba54abea8cde7b560d"
```
```bash
helm install netbird jaconi/netbird \
-n netbird \
--create-namespace \
-f netbird-values.yaml
or
helm upgrade netbird jaconi/netbird \
-n netbird \
-f netbird-values.yaml
```
```bash
kubectl -n netbird get pods
kubectl -n netbird get svc
kubectl -n netbird get ingress
```
<!-- dashboard -->
```bash
vim netbird-dashboard-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: netbird-dashboard
namespace: netbird
labels:
app: netbird-dashboard
spec:
replicas: 1
selector:
matchLabels:
app: netbird-dashboard
template:
metadata:
labels:
app: netbird-dashboard
spec:
containers:
- name: dashboard
image: netbirdio/dashboard:0.45.1
ports:
- containerPort: 80
env:
- name: NB_MANAGEMENT_API_ENDPOINT
value: "http://netbird.local:30830"
```
```bash
vim netbird-dashboard-service.yaml
apiVersion: v1
kind: Service
metadata:
name: netbird-dashboard
namespace: netbird
spec:
selector:
app: netbird-dashboard
ports:
- protocol: TCP
port: 80
targetPort: 80
type: ClusterIP
```
```bash
vim netbird-dashboard-ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: netbird-dashboard
namespace: netbird
spec:
ingressClassName: nginx
rules:
- host: netbird.local
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: netbird-dashboard
port:
number: 80
```
```bash
kubectl apply -f netbird-dashboard-deployment.yaml
kubectl apply -f netbird-dashboard-service.yaml
kubectl apply -f netbird-dashboard-ingress.yaml
```
```bash
C:\Windows\System32\drivers\etc\hosts
```
# k8s
```bash
vim netbird-application.yaml
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: netbird # как будет называться приложение в ArgoCD
namespace: argocd # namespace, где установлен ArgoCD
spec:
project: default
source:
repoURL: https://charts.jaconi.io # тот самый helm repo
chart: netbird # имя чарта
targetRevision: "*" # можно зафиксировать версию, пока пусть будет любая
helm:
releaseName: netbird # как будто ты делал "helm install netbird ..."
values: |-
fullnameOverride: "netbird"
config:
database:
DB_TYPE: postgres
HOST: postgres-postgresql.postgres.svc.cluster.local
PORT: 5432
NAME: netbird
USER: netbird_user
PASSWD: password
relay:
enabled: true
config:
NB_EXPOSED_ADDRESS: "netbird-relay.netbird.svc.cluster.local:33080"
signal:
enabled: true
management:
enabled: true
config:
NETBIRD_SIGNAL_URI: "netbird-signal.netbird.svc.cluster.local:10000"
NETBIRD_SIGNAL_PROTOCOL: "https"
NETBIRD_RELAY_DOMAIN: "netbird-relay.netbird.svc.cluster.local"
NETBIRD_RELAY_PORT: "33080"
NETBIRD_STUN_URI: "stun:netbird-signal.netbird.svc.cluster.local:3478"
NETBIRD_TURN_URI: "turn:netbird-signal.netbird.svc.cluster.local:3478"
dashboard:
enabled: true
service:
type: ClusterIP
ingress:
enabled: true
className: nginx
hosts:
- host: netbird.local
paths:
- path: /
pathType: Prefix
destination:
server: https://kubernetes.default.svc
namespace: netbird # сюда чарты будут ставиться
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true
kubectl apply -f netbird-application.yaml -n argocd
```
```bash
kubectl create namespace netbird || true
kubectl create secret generic netbird-relay-secret \
-n netbird \
--from-literal=netbird-relay-secret-key="8626c1ed1c8cfcb13df6c65819042771a2bf7a280c16f0ba54abea8cde7b560d"
```
```bash
helm repo add jaconi https://charts.jaconi.io
helm repo update
vim netbird-dashboard-values.yaml
image:
# Версия образа UI; есть тег v2.22.2 на Docker Hub
# см. netbirdio/dashboard:v2.22.2 :contentReference[oaicite:2]{index=2}
tag: v2.22.2
auth:
# OIDC-провайдер (например, Keycloak)
authority: https://keycloak.example.com/realms/homelab
audience: netbird
clientID: netbird
supportedScopes: >
openid profile email offline_access netbird-api
netbird:
# HTTP API management-сервиса NetBird (тот же, к которому коннектятся клиенты)
managementApiEndpoint: https://netbird.example.com
# gRPC endpoint того же сервиса
managementGrpcApiEndpoint: https://netbird.example.com
ingress:
enabled: true
className: nginx
annotations:
# Пример для cert-manager, можно убрать если не используешь
cert-manager.io/cluster-issuer: letsencrypt
hosts:
- host: netbird.example.com
paths:
- path: /
pathType: Prefix
tls:
- secretName: netbird-tls-certificate
hosts:
- netbird.example.com
# namespace можно выбрать любой, но обычно используют netbird
kubectl create namespace netbird --dry-run=client -o yaml | kubectl apply -f -
helm install netbird-dashboard jaconi/netbird-dashboard \
--namespace netbird \
--values netbird-dashboard-values.yaml
```

View File

@@ -0,0 +1,20 @@
---
- name: Set hostname
ansible.builtin.hostname:
name: "{{ hostname }}"
- name: Ensure /etc/hosts has proper hostname entry
ansible.builtin.lineinfile:
path: /etc/hosts
regexp: "^127\\.0\\.1\\.1"
line: "127.0.1.1 {{ hostname }}"
create: yes
backup: yes
- name: Reboot system
ansible.builtin.reboot:
msg: "Rebooting after hostname change"
connect_timeout: 5
reboot_timeout: 300
pre_reboot_delay: 0
post_reboot_delay: 10

View File

@@ -0,0 +1,5 @@
---
- name: execute [ {{ command }} ] command
ansible.builtin.command: "{{ command }}"
args:
chdir: "{{ chdir | default(omit) }}"

View File

@@ -0,0 +1,7 @@
---
- name: copy local directory to remote node (recursive)
ansible.builtin.copy:
src: "{{ resource_dir }}"
dest: "{{ target_dir }}"
mode: "0644"
directory_mode: "0755"

View File

@@ -0,0 +1,5 @@
- name: remove file
become: true
ansible.builtin.file:
path: "{{ file_path }}"
state: absent

View File

@@ -0,0 +1,6 @@
---
# Удалять ли домашнюю директорию и почту (/var/mail/<user>)
remove_user_home: true
# Форсировать удаление даже если есть процессы (полезно для билд-юнитов/packer)
remove_user_force: true

View File

@@ -0,0 +1,13 @@
---
- name: remove sudoers drop-in for {{ remove_user }} user (if exists)
ansible.builtin.file:
path: "/etc/sudoers.d/{{ remove_user }}"
state: absent
mode: "0440"
- name: remove {{ remove_user }} user
ansible.builtin.user:
name: "{{ remove_user }}"
state: absent
remove: "{{ remove_user_home }}"
force: "{{ remove_user_force }}"

View File

@@ -0,0 +1,5 @@
---
- name: restart chrony
ansible.builtin.service:
name: chrony
state: restarted

View File

@@ -0,0 +1,74 @@
---
- name: install chrony
ansible.builtin.apt:
name:
- chrony
state: present
update_cache: true
# чтобы не было “двух клиентов времени” (минимально и без сложных проверок)
- name: stop and disable systemd-timesyncd (if exists)
ansible.builtin.service:
name: systemd-timesyncd
state: stopped
enabled: false
ignore_errors: true
- name: ensure /etc/chrony/sources.d exists
ansible.builtin.file:
path: /etc/chrony/sources.d
state: directory
owner: root
group: root
mode: "0755"
- name: ensure /etc/chrony/conf.d exists
ansible.builtin.file:
path: /etc/chrony/conf.d
state: directory
owner: root
group: root
mode: "0755"
- name: deploy /etc/chrony/chrony.conf
ansible.builtin.template:
src: chrony.conf.j2
dest: /etc/chrony/chrony.conf
owner: root
group: root
mode: "0644"
notify: restart chrony
- name: configure upstream sources
ansible.builtin.template:
src: 00-upstream.sources.j2
dest: /etc/chrony/sources.d/00-upstream.sources
owner: root
group: root
mode: "0644"
notify: restart chrony
# server-mode: allow clients (опционально)
- name: configure allowed client networks (optional)
ansible.builtin.template:
src: 00-allow.conf.j2
dest: /etc/chrony/conf.d/00-allow.conf
owner: root
group: root
mode: "0644"
when: chrony_allow_networks | length > 0
notify: restart chrony
# если раньше был allow, а теперь роль как client — подчистим файл
- name: remove allow config when not needed
ansible.builtin.file:
path: /etc/chrony/conf.d/00-allow.conf
state: absent
when: chrony_allow_networks | length == 0
notify: restart chrony
- name: ensure chrony is enabled and started
ansible.builtin.service:
name: chrony
enabled: true
state: started

View File

@@ -0,0 +1,5 @@
# Managed by Ansible: allow NTP clients (server)
deny all
{% for net in chrony_allow_networks %}
allow {{ net }}
{% endfor %}

View File

@@ -0,0 +1,4 @@
# Managed by Ansible: upstream NTP sources
{% for s in chrony_upstream_sources %}
server {{ s }} iburst
{% endfor %}

View File

@@ -0,0 +1,47 @@
# Welcome to the chrony configuration file. See chrony.conf(5) for more
# information about usable directives.
# Use Debian vendor zone.
# pool 2.debian.pool.ntp.org iburst
# Use time sources from DHCP.
# sourcedir /run/chrony-dhcp
# Use NTP sources found in /etc/chrony/sources.d.
sourcedir /etc/chrony/sources.d
# This directive specifies the location of the file containing ID/key pairs for
# NTP authentication.
keyfile /etc/chrony/chrony.keys
# This directive specifies the file into which chronyd will store the rate
# information.
driftfile /var/lib/chrony/chrony.drift
# Save NTS keys and cookies.
ntsdumpdir /var/lib/chrony
# Uncomment the following line to turn logging on.
#log tracking measurements statistics
# Log files location.
logdir /var/log/chrony
# Stop bad estimates upsetting machine clock.
maxupdateskew 100.0
# This directive enables kernel synchronisation (every 11 minutes) of the
# real-time clock. Note that it can't be used along with the 'rtcfile' directive.
rtcsync
# Step the system clock instead of slewing it if the adjustment is larger than
# one second, but only in the first three clock updates.
makestep 1 3
# Get TAI-UTC offset and leap seconds from the system tz database.
# This directive must be commented out when using time sources serving
# leap-smeared time.
leapseclist /usr/share/zoneinfo/leap-seconds.list
# Include configuration files found in /etc/chrony/conf.d.
confdir /etc/chrony/conf.d

View File

@@ -0,0 +1,20 @@
```bash
vim /etc/chrony/chrony.conf
# закоментить
pool 2.debian.pool.ntp.org iburst
sourcedir /run/chrony-dhcp
# задать внешние апстримы отдельным файлом
cat >/etc/chrony/sources.d/00-upstream.sources <<'EOF'
server ntp.time.in.ua iburst
server ntp2.time.in.ua iburst
server time.google.com iburst
server time.cloudflare.com iburst
EOF
# применить и проверить
systemctl restart chrony
chronyc sources -v
chronyc tracking
```

View File

@@ -0,0 +1,48 @@
---
- name: install base deps for HashiCorp repo
ansible.builtin.apt:
update_cache: true
name:
- ca-certificates # чтобы качать по HTTPS
- curl # чтобы скачать packer/плагины
- gnupg
- lsb-release
- unzip # packer часто в zip
state: present
- name: ensure keyrings dir exists
ansible.builtin.file:
path: /usr/share/keyrings
state: directory
mode: "0755"
- name: add HashiCorp GPG key (dearmored)
ansible.builtin.shell: |
set -euo pipefail
curl -fsSL https://apt.releases.hashicorp.com/gpg \
| gpg --dearmor -o /usr/share/keyrings/hashicorp-archive-keyring.gpg
args:
executable: /bin/bash
creates: /usr/share/keyrings/hashicorp-archive-keyring.gpg
- name: add HashiCorp APT repository
ansible.builtin.copy:
dest: /etc/apt/sources.list.d/hashicorp.list
mode: "0644"
content: |
deb [signed-by=/usr/share/keyrings/hashicorp-archive-keyring.gpg] https://apt.releases.hashicorp.com {{ ansible_distribution_release }} main
- name: install packer
ansible.builtin.apt:
update_cache: true
name: packer
state: present
- name: check packer version
ansible.builtin.command: packer version
register: packer_version
changed_when: false
- name: print packer version
ansible.builtin.debug:
var: packer_version.stdout

View File

@@ -0,0 +1,33 @@
---
- name: ensure packer exists
ansible.builtin.command: packer version
changed_when: false
- name: packer init
ansible.builtin.command: packer init .
args:
chdir: "{{ packer_config_dir }}"
changed_when: false
- name: packer fmt
ansible.builtin.command: packer fmt -recursive .
args:
chdir: "{{ packer_config_dir }}"
changed_when: false
- name: packer validate
ansible.builtin.command: packer validate .
args:
chdir: "{{ packer_config_dir }}"
changed_when: false
- name: packer build
ansible.builtin.shell: |
set -euo pipefail
stdbuf -oL -eL packer build -on-error=cleanup -timestamp-ui .
args:
chdir: "{{ packer_config_dir }}"
executable: /bin/bash
environment:
PACKER_LOG: "1"
PACKER_LOG_PATH: ""

View File

@@ -0,0 +1,4 @@
---
- name: enable snippets on storage "local"
ansible.builtin.command: >
pvesm set local --content backup,iso,vztmpl,snippets

View File

@@ -0,0 +1,41 @@
## 1.0 Быстрая проверка, что GPU видна хосту
lspci -nn | grep -i nvidia
## 1.1 GRUB
nano /etc/default/grub
GRUB_CMDLINE_LINUX_DEFAULT="quiet iommu=pt"
update-grub
reboot
## 1.2 VFIO модули
nano /etc/modules-load.d/vfio.conf
vfio
vfio_iommu_type1
vfio_pci
vfio_virqfd
## 1.3 Привязать GPU к vfio-pci по ID
nano /etc/modprobe.d/vfio.conf
options vfio-pci ids=10de:2d58,10de:22eb disable_vga=1
## 1.4 Заблэклистить nouveau (и не ставить nvidia на хост, если passthrough нужен “чисто”)
nano /etc/modprobe.d/blacklist-nouveau.conf
blacklist nouveau
options nouveau modeset=0
## 1.5 Пересобрать initramfs и перезагрузиться
update-initramfs -u -k all
reboot
## 1.6 Проверка: GPU реально ушла в VFIO
dmesg | grep -E "AMD-Vi|IOMMU" | tail -n 50
lspci -nnk -s 01:00.0
lspci -nnk -s 01:00.1
## В Proxmox создай PCI mapping для RTX 5070
Datacenter → Resource Mapping → PCI Devices → Add
Сделай маппинг:
rtx5070_gpu → 0000:01:00
dmesg | grep -E "IOMMU|AMD-Vi"

View File

@@ -0,0 +1,10 @@
---
- name: update LXC template index
ansible.builtin.command: pveam update
register: pveam_update
changed_when: false
- name: download LXC template
ansible.builtin.command: "pveam download local {{ lxc_template_name }}"
args:
creates: "/var/lib/vz/template/cache/{{ lxc_template_name }}"

View File

@@ -0,0 +1,5 @@
---
- name: shutdown LXC container
ansible.builtin.command: pct shutdown {{ lxc_id }}
become: true
changed_when: true

View File

@@ -0,0 +1,30 @@
---
- name: remove proxmox enterprise repo
ansible.builtin.file:
path: /etc/apt/sources.list.d/pve-enterprise.sources
state: absent
- name: remove ceph enterprise repo
ansible.builtin.file:
path: /etc/apt/sources.list.d/ceph.sources
state: absent
- name: remove duplicate no-subscription entries from /etc/apt/sources.list
ansible.builtin.replace:
path: /etc/apt/sources.list
regexp: "^deb .*pve-no-subscription.*$"
replace: ""
ignore_errors: true
- name: ensure proxmox no-subscription repo file exists
ansible.builtin.copy:
dest: /etc/apt/sources.list.d/pve-no-subscription.list
content: |
deb http://download.proxmox.com/debian/pve trixie pve-no-subscription
owner: root
group: root
mode: "0644"
- name: update apt cache
ansible.builtin.apt:
update_cache: yes

View File

@@ -0,0 +1,18 @@
---
- name: Ensure ISO directory exists
ansible.builtin.file:
path: /var/lib/vz/template/iso
state: directory
owner: root
group: root
mode: "0755"
- name: Download Debian netinst ISO
ansible.builtin.get_url:
url: "{{ vm_iso_url }}"
dest: "/var/lib/vz/template/iso/{{ vm_iso_name }}"
mode: "0644"
owner: root
group: root
force: false # не перекачивать, если файл уже есть
timeout: 60