init
This commit is contained in:
53
.devcontainer/Dockerfile
Executable file
53
.devcontainer/Dockerfile
Executable file
@@ -0,0 +1,53 @@
|
|||||||
|
FROM debian:bookworm-slim
|
||||||
|
|
||||||
|
ARG DEBIAN_FRONTEND=noninteractive
|
||||||
|
ARG TERRAFORM_VERSION=1.8.5
|
||||||
|
|
||||||
|
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||||
|
ca-certificates curl unzip git \
|
||||||
|
make openssh-client \
|
||||||
|
python3 python3-pip python3-venv \
|
||||||
|
locales gnupg \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
# Генерируем UTF-8 локаль
|
||||||
|
RUN sed -i 's/^# *\(en_US.UTF-8 UTF-8\)/\1/' /etc/locale.gen \
|
||||||
|
&& locale-gen
|
||||||
|
|
||||||
|
ENV LANG=en_US.UTF-8 \
|
||||||
|
LANGUAGE=en_US:en \
|
||||||
|
LC_ALL=en_US.UTF-8
|
||||||
|
|
||||||
|
# --- Packer (через HashiCorp APT repo) ---
|
||||||
|
RUN set -eux; \
|
||||||
|
curl -fsSL https://apt.releases.hashicorp.com/gpg | gpg --dearmor -o /usr/share/keyrings/hashicorp-archive-keyring.gpg; \
|
||||||
|
codename="$(. /etc/os-release && echo "$VERSION_CODENAME")"; \
|
||||||
|
echo "deb [signed-by=/usr/share/keyrings/hashicorp-archive-keyring.gpg] https://apt.releases.hashicorp.com ${codename} main" > /etc/apt/sources.list.d/hashicorp.list; \
|
||||||
|
apt-get update; \
|
||||||
|
apt-get install -y --no-install-recommends packer; \
|
||||||
|
rm -rf /var/lib/apt/lists/*; \
|
||||||
|
packer version
|
||||||
|
|
||||||
|
# --- Ansible (в venv) ---
|
||||||
|
RUN python3 -m venv /opt/ansible \
|
||||||
|
&& /opt/ansible/bin/pip install --no-cache-dir --upgrade pip \
|
||||||
|
&& /opt/ansible/bin/pip install --no-cache-dir ansible \
|
||||||
|
&& ln -sf /opt/ansible/bin/ansible /usr/local/bin/ansible \
|
||||||
|
&& ln -sf /opt/ansible/bin/ansible-playbook /usr/local/bin/ansible-playbook \
|
||||||
|
&& ln -sf /opt/ansible/bin/ansible-galaxy /usr/local/bin/ansible-galaxy \
|
||||||
|
&& ansible --version
|
||||||
|
|
||||||
|
# --- Terraform ---
|
||||||
|
RUN set -eux; \
|
||||||
|
arch="$(dpkg --print-architecture)"; \
|
||||||
|
case "$arch" in \
|
||||||
|
amd64) tf_arch="amd64" ;; \
|
||||||
|
arm64) tf_arch="arm64" ;; \
|
||||||
|
*) echo "Unsupported arch: $arch"; exit 1 ;; \
|
||||||
|
esac; \
|
||||||
|
curl -fsSL "https://releases.hashicorp.com/terraform/${TERRAFORM_VERSION}/terraform_${TERRAFORM_VERSION}_linux_${tf_arch}.zip" -o /tmp/terraform.zip; \
|
||||||
|
unzip /tmp/terraform.zip -d /usr/local/bin; \
|
||||||
|
rm -f /tmp/terraform.zip; \
|
||||||
|
terraform version
|
||||||
|
|
||||||
|
WORKDIR /work
|
||||||
39
.devcontainer/devcontainer.json
Executable file
39
.devcontainer/devcontainer.json
Executable file
@@ -0,0 +1,39 @@
|
|||||||
|
{
|
||||||
|
"name": "debian-devops",
|
||||||
|
"build": {
|
||||||
|
"dockerfile": "Dockerfile"
|
||||||
|
},
|
||||||
|
"remoteUser": "root",
|
||||||
|
"forwardPorts": [
|
||||||
|
8006
|
||||||
|
],
|
||||||
|
"portsAttributes": {
|
||||||
|
"8006": {
|
||||||
|
"label": "Proxmox 8006"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"customizations": {
|
||||||
|
"vscode": {
|
||||||
|
"extensions": [
|
||||||
|
"saoudrizwan.claude-dev",
|
||||||
|
"hashicorp.terraform",
|
||||||
|
"redhat.vscode-yaml",
|
||||||
|
"EditorConfig.EditorConfig",
|
||||||
|
"eamodio.gitlens",
|
||||||
|
"bierner.markdown-preview-github-styles"
|
||||||
|
],
|
||||||
|
"settings": {
|
||||||
|
"editor.formatOnSave": true,
|
||||||
|
"[terraform]": {
|
||||||
|
"editor.defaultFormatter": "hashicorp.terraform",
|
||||||
|
"editor.formatOnSave": true
|
||||||
|
},
|
||||||
|
"[terraform-vars]": {
|
||||||
|
"editor.defaultFormatter": "hashicorp.terraform",
|
||||||
|
"editor.formatOnSave": true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"postCreateCommand": "git --version && ansible --version && terraform version && packer version"
|
||||||
|
}
|
||||||
18
.gitignore
vendored
Normal file
18
.gitignore
vendored
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
.ssh
|
||||||
|
.env*
|
||||||
|
inventory*
|
||||||
|
terraform.tfvars
|
||||||
|
|
||||||
|
# Terraform
|
||||||
|
**/.terraform/*
|
||||||
|
**/*.tfstate
|
||||||
|
**/*.tfstate.*
|
||||||
|
**/*.tfplan
|
||||||
|
**/crash.log
|
||||||
|
**/crash.*.log
|
||||||
|
*.auto.tfvars
|
||||||
|
*.auto.tfvars.json
|
||||||
|
**/.terraform.lock.hcl
|
||||||
|
**/.terraform
|
||||||
|
|
||||||
|
# **/terraform.tfvars
|
||||||
271
README.md
Normal file
271
README.md
Normal file
@@ -0,0 +1,271 @@
|
|||||||
|
# 🧠 DevOps Infra Stack --- Proxmox + Ceph + Kubernetes + DNS
|
||||||
|
|
||||||
|
Fully automated self-hosted infrastructure.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
The project deploys:
|
||||||
|
- Proxmox infrastructure
|
||||||
|
- Golden VM templates via
|
||||||
|
Packer - VM provisioning via Terraform
|
||||||
|
- Hardened nodes (SSH, nftables,
|
||||||
|
fail2ban)
|
||||||
|
- DNS (PowerDNS)
|
||||||
|
- NTP (chrony hierarchy)
|
||||||
|
- Ceph cluster
|
||||||
|
- Kubernetes cluster
|
||||||
|
- K8s apps (MetalLB, ingress, postgres operator,
|
||||||
|
valkey)
|
||||||
|
|
||||||
|
Everything is deployed via Makefile + Ansible + Terraform + Packer.
|
||||||
|
|
||||||
|
------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# 🏗 Architecture
|
||||||
|
|
||||||
|
Infrastructure components:
|
||||||
|
|
||||||
|
- Proxmox host (bare metal)
|
||||||
|
- LXC packer builder
|
||||||
|
- Golden VM templates
|
||||||
|
- VM nodes:
|
||||||
|
- DNS
|
||||||
|
- NTP
|
||||||
|
- Ceph (3 nodes)
|
||||||
|
- Kubernetes master
|
||||||
|
- Kubernetes worker
|
||||||
|
- K8s stack:
|
||||||
|
- MetalLB
|
||||||
|
- nginx ingress
|
||||||
|
- Crunchy Postgres Operator
|
||||||
|
- Valkey (Redis alternative)
|
||||||
|
|
||||||
|
------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# 📦 Technology Stack
|
||||||
|
|
||||||
|
- Proxmox VE
|
||||||
|
- Terraform
|
||||||
|
- Ansible
|
||||||
|
- Packer
|
||||||
|
- Docker + Docker Compose (for DNS)
|
||||||
|
- Ceph
|
||||||
|
- Kubernetes
|
||||||
|
- Helm
|
||||||
|
- PowerDNS
|
||||||
|
- Chrony
|
||||||
|
- nftables + fail2ban hardening
|
||||||
|
|
||||||
|
------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# 🚀 Full Infrastructure Bootstrap
|
||||||
|
|
||||||
|
Main entrypoint:
|
||||||
|
|
||||||
|
``` bash
|
||||||
|
make -f bootstrap.mk
|
||||||
|
```
|
||||||
|
|
||||||
|
It will execute:
|
||||||
|
|
||||||
|
1. VM creation
|
||||||
|
2. Hardening
|
||||||
|
3. DNS setup
|
||||||
|
4. NTP setup
|
||||||
|
5. Ceph cluster
|
||||||
|
|
||||||
|
------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# 🧱 Deployment Stages
|
||||||
|
|
||||||
|
## 0. Create LXC + Packer
|
||||||
|
|
||||||
|
``` bash
|
||||||
|
make -f 00_create_and_setup_lxc_container_with_packer.mk
|
||||||
|
```
|
||||||
|
|
||||||
|
- Download LXC template
|
||||||
|
- Create LXC via Terraform
|
||||||
|
- Install packer inside LXC
|
||||||
|
|
||||||
|
------------------------------------------------------------------------
|
||||||
|
|
||||||
|
## 1. Golden VM template
|
||||||
|
|
||||||
|
``` bash
|
||||||
|
make -f 01_create_vm_golden_template.mk
|
||||||
|
```
|
||||||
|
|
||||||
|
- Download ISO
|
||||||
|
- Upload packer config
|
||||||
|
- Build golden image
|
||||||
|
- Shut down packer LXC
|
||||||
|
|
||||||
|
------------------------------------------------------------------------
|
||||||
|
|
||||||
|
## 2. Create VMs
|
||||||
|
|
||||||
|
``` bash
|
||||||
|
make -f 02_create_vms.mk
|
||||||
|
```
|
||||||
|
|
||||||
|
- Enable cloud-init snippets
|
||||||
|
- Terraform creates VMs
|
||||||
|
|
||||||
|
------------------------------------------------------------------------
|
||||||
|
|
||||||
|
## 3. Harden nodes
|
||||||
|
|
||||||
|
``` bash
|
||||||
|
make -f 03_harden_vms.mk
|
||||||
|
```
|
||||||
|
|
||||||
|
- Remove packer user
|
||||||
|
- SSH hardening
|
||||||
|
- nftables
|
||||||
|
- fail2ban
|
||||||
|
|
||||||
|
------------------------------------------------------------------------
|
||||||
|
|
||||||
|
## 4. DNS
|
||||||
|
|
||||||
|
``` bash
|
||||||
|
make -f 04_setup_dns.mk
|
||||||
|
```
|
||||||
|
|
||||||
|
- PowerDNS install
|
||||||
|
- Zones + records via Terraform
|
||||||
|
- systemd-resolved config
|
||||||
|
|
||||||
|
------------------------------------------------------------------------
|
||||||
|
|
||||||
|
## 5. NTP
|
||||||
|
|
||||||
|
``` bash
|
||||||
|
make -f 05_setup_ntp.mk
|
||||||
|
```
|
||||||
|
|
||||||
|
Hierarchy:
|
||||||
|
- edge NTP server (proxmox)
|
||||||
|
- core NTP server
|
||||||
|
- clients use core NTP server
|
||||||
|
|
||||||
|
------------------------------------------------------------------------
|
||||||
|
|
||||||
|
## 6. Ceph
|
||||||
|
|
||||||
|
``` bash
|
||||||
|
make -f 06_setup_ceph.mk
|
||||||
|
```
|
||||||
|
|
||||||
|
- install
|
||||||
|
- bootstrap
|
||||||
|
- share keys
|
||||||
|
- cluster init
|
||||||
|
|
||||||
|
------------------------------------------------------------------------
|
||||||
|
|
||||||
|
## 7. Kubernetes
|
||||||
|
|
||||||
|
``` bash
|
||||||
|
make -f 07_setup_k8s.mk
|
||||||
|
```
|
||||||
|
|
||||||
|
After installation:
|
||||||
|
|
||||||
|
``` bash
|
||||||
|
ssh user@k8smasternode -p 10525
|
||||||
|
```
|
||||||
|
|
||||||
|
Replace cluster endpoint with localhost tunnel.
|
||||||
|
|
||||||
|
Then:
|
||||||
|
|
||||||
|
``` bash
|
||||||
|
terraform apply -target=module.metallb_helm
|
||||||
|
terraform apply -target=module.crunchy_operator
|
||||||
|
terraform apply
|
||||||
|
```
|
||||||
|
|
||||||
|
Get credentials:
|
||||||
|
|
||||||
|
``` bash
|
||||||
|
# postgres
|
||||||
|
kubectl -n postgres-operator get secret hippo-pguser-gitlab -o jsonpath='{.data.user}' | base64 -d; echo
|
||||||
|
|
||||||
|
# valkey
|
||||||
|
kubectl -n valkey get secret valkey-users -o jsonpath='{.data.default}' | base64 -d; echo
|
||||||
|
```
|
||||||
|
|
||||||
|
------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# 📁 Project Structure
|
||||||
|
|
||||||
|
ansible/
|
||||||
|
terraform/
|
||||||
|
packer/
|
||||||
|
makefiles/
|
||||||
|
bootstrap.mk
|
||||||
|
|
||||||
|
------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# 🔐 Requirements
|
||||||
|
|
||||||
|
Before running:
|
||||||
|
|
||||||
|
- SSH access to Proxmox
|
||||||
|
- Proxmox API token
|
||||||
|
- terraform.tfvars filled
|
||||||
|
- inventory.ini filled
|
||||||
|
- kubeconfig path specified
|
||||||
|
|
||||||
|
------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# 🔭 Planned Services & Future Stack
|
||||||
|
|
||||||
|
The following services are planned for the next deployment stages:
|
||||||
|
|
||||||
|
- **NetBird** --- internal VPN mesh network (currently working on this
|
||||||
|
stage)
|
||||||
|
- **Keycloak** --- unified authentication and identity provider across
|
||||||
|
services
|
||||||
|
- **Monitoring stack (Grafana, Loki, Prometheus, Trickster)** ---
|
||||||
|
monitoring and observability tools\
|
||||||
|
*(previously deployed, but not yet integrated into this project)*
|
||||||
|
- **FreeIPA** --- centralized user and identity management inside
|
||||||
|
operating systems
|
||||||
|
- **Vault** --- centralized storage for passwords, tokens, and
|
||||||
|
operational credentials
|
||||||
|
- **OpenNebula** --- additional virtualization layer for providing
|
||||||
|
user VM spaces\
|
||||||
|
*(similar to AWS EC2 for internal infrastructure)*
|
||||||
|
- **Nextcloud + LibreOffice** --- Google Cloud alternative for
|
||||||
|
collaborative document editing\
|
||||||
|
*(Nextcloud deployed previously, but not yet within this project)*
|
||||||
|
- **Element + Matrix** --- Telegram-like communication platform\
|
||||||
|
*(stack deployed previously, but not yet integrated into this
|
||||||
|
project)*
|
||||||
|
- **LLM (local language model)** --- neural network for text
|
||||||
|
processing\
|
||||||
|
*(GPT‑2 already tested; LLaMA 7B planned as MVP depending on
|
||||||
|
available resources)*\
|
||||||
|
Future usage:
|
||||||
|
- LibreOffice document assistant
|
||||||
|
- Matrix/Element chatbot integration
|
||||||
|
- **Kafka** --- message queue layer between LibreOffice, Element, and
|
||||||
|
LLM services\
|
||||||
|
Ensures reliable request delivery and acts as a service integration
|
||||||
|
layer
|
||||||
|
- **OCR tools** --- document recognition and conversion pipeline\
|
||||||
|
Enables transforming documents into formats suitable for LLM
|
||||||
|
processing and search
|
||||||
|
|
||||||
|
------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# 🧠 Project Idea
|
||||||
|
|
||||||
|
Self-hosted cloud platform, own mini cloud. Fully autonomous infrastructure.
|
||||||
|
|
||||||
|
# 👤 Author
|
||||||
|
|
||||||
|
Aleksandr Hrankin
|
||||||
6
ansible/ansible.cfg
Normal file
6
ansible/ansible.cfg
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
[defaults]
|
||||||
|
inventory = ./inventory.ini
|
||||||
|
roles_path = ./roles
|
||||||
|
host_key_checking = False
|
||||||
|
deprecation_warnings = False
|
||||||
|
interpreter_python = auto
|
||||||
12
ansible/playbooks/ceph/00_install.yml
Normal file
12
ansible/playbooks/ceph/00_install.yml
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
---
|
||||||
|
- name: install ceph
|
||||||
|
hosts:
|
||||||
|
- dev-kyiv01-vm-ceph-main-01
|
||||||
|
- dev-kyiv01-vm-ceph-main-02
|
||||||
|
- dev-kyiv01-vm-ceph-main-03
|
||||||
|
become: true
|
||||||
|
|
||||||
|
roles:
|
||||||
|
- role: ceph/00_install
|
||||||
|
|
||||||
|
# ansible-playbook playbooks/ceph/00_install.yml -i inventory.ini
|
||||||
10
ansible/playbooks/ceph/01_bootstrap.yml
Normal file
10
ansible/playbooks/ceph/01_bootstrap.yml
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
---
|
||||||
|
- name: bootstrap ceph
|
||||||
|
hosts:
|
||||||
|
- dev-kyiv01-vm-ceph-main-01
|
||||||
|
become: true
|
||||||
|
|
||||||
|
roles:
|
||||||
|
- role: ceph/01_bootstrap
|
||||||
|
|
||||||
|
# ansible-playbook playbooks/ceph/01_bootstrap.yml -i inventory.ini
|
||||||
12
ansible/playbooks/ceph/02_share_pubkey.yml
Normal file
12
ansible/playbooks/ceph/02_share_pubkey.yml
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
---
|
||||||
|
- name: share ceph pubkey
|
||||||
|
hosts:
|
||||||
|
- dev-kyiv01-vm-ceph-main-01
|
||||||
|
- dev-kyiv01-vm-ceph-main-02
|
||||||
|
- dev-kyiv01-vm-ceph-main-03
|
||||||
|
become: true
|
||||||
|
|
||||||
|
roles:
|
||||||
|
- role: ceph/02_share_pubkey
|
||||||
|
|
||||||
|
# ansible-playbook playbooks/ceph/02_share_pubkey.yml -i inventory.ini
|
||||||
10
ansible/playbooks/ceph/03_setup_cluster.yml
Normal file
10
ansible/playbooks/ceph/03_setup_cluster.yml
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
---
|
||||||
|
- name: setup ceph cluster
|
||||||
|
hosts:
|
||||||
|
- dev-kyiv01-vm-ceph-main-01
|
||||||
|
become: true
|
||||||
|
|
||||||
|
roles:
|
||||||
|
- role: ceph/03_setup_cluster
|
||||||
|
|
||||||
|
# ansible-playbook playbooks/ceph/03_setup_cluster.yml -i inventory.ini
|
||||||
19
ansible/playbooks/dns/install_powerdns.yml
Normal file
19
ansible/playbooks/dns/install_powerdns.yml
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
---
|
||||||
|
- name: setup powerdns
|
||||||
|
hosts:
|
||||||
|
- dev-kyiv01-vm-dns-main-01
|
||||||
|
become: true
|
||||||
|
|
||||||
|
roles:
|
||||||
|
- role: install_docker
|
||||||
|
|
||||||
|
- role: dns/push_powerdns_configs_to_node
|
||||||
|
vars:
|
||||||
|
dns_stack_root: /opt/dns-stack
|
||||||
|
|
||||||
|
- role: node/execute_command
|
||||||
|
vars:
|
||||||
|
chdir: "/opt/dns-stack"
|
||||||
|
command: "docker compose up -d"
|
||||||
|
|
||||||
|
# ansible-playbook playbooks/dns/install_powerdns.yml -i inventory.ini
|
||||||
17
ansible/playbooks/dns/setup_systemd_resolved_config.yml
Normal file
17
ansible/playbooks/dns/setup_systemd_resolved_config.yml
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
---
|
||||||
|
- name: setup systemd resolved config
|
||||||
|
hosts:
|
||||||
|
- dev-kyiv01-vm-dns-main-01
|
||||||
|
- dev-kyiv01-vm-ntp-main-01
|
||||||
|
- dev-kyiv01-vm-ceph-main-01
|
||||||
|
- dev-kyiv01-vm-ceph-main-02
|
||||||
|
- dev-kyiv01-vm-ceph-main-03
|
||||||
|
- dev-kyiv01-vm-k8s-master-01
|
||||||
|
- dev-kyiv01-vm-k8s-worker-01
|
||||||
|
become: true
|
||||||
|
roles:
|
||||||
|
- role: dns/setup_systemd_resolved_config
|
||||||
|
vars:
|
||||||
|
dns_ip: 192.168.0.100
|
||||||
|
|
||||||
|
# ansible-playbook playbooks/dns/setup_systemd_resolved_config.yml -i inventory.ini
|
||||||
9
ansible/playbooks/docker/install.yml
Normal file
9
ansible/playbooks/docker/install.yml
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
---
|
||||||
|
- name: install docker
|
||||||
|
hosts:
|
||||||
|
- ec2
|
||||||
|
become: true
|
||||||
|
roles:
|
||||||
|
- role: docker
|
||||||
|
|
||||||
|
# ansible-playbook playbooks/docker/install.yml -i inventory.ec2.ini
|
||||||
9
ansible/playbooks/gitea/main.yml
Normal file
9
ansible/playbooks/gitea/main.yml
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
---
|
||||||
|
- name: copy gitea configs to node
|
||||||
|
hosts:
|
||||||
|
- ec2
|
||||||
|
become: true
|
||||||
|
roles:
|
||||||
|
- role: gitea
|
||||||
|
|
||||||
|
# ansible-playbook playbooks/gitea/main.yml -i inventory.ec2.ini
|
||||||
20
ansible/playbooks/harden/harden_node.yml
Normal file
20
ansible/playbooks/harden/harden_node.yml
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
---
|
||||||
|
- name: harden node
|
||||||
|
hosts: ec2
|
||||||
|
become: true
|
||||||
|
roles:
|
||||||
|
- role: harden/fail2ban
|
||||||
|
|
||||||
|
- role: harden/unattended_upgrades
|
||||||
|
|
||||||
|
- role: harden/sshd_config
|
||||||
|
vars:
|
||||||
|
ssh_port: "{{ ssh_port }}"
|
||||||
|
|
||||||
|
- role: harden/nftables
|
||||||
|
vars:
|
||||||
|
ssh_port: "{{ ssh_port }}"
|
||||||
|
# ntp_port: "{{ ntp_port }}"
|
||||||
|
nftables_conf_name: "vm-nftables.conf.j2"
|
||||||
|
|
||||||
|
# ansible-playbook playbooks/harden/harden_node.yml -i inventory.ec2.ini -e "ssh_port=25105"
|
||||||
21
ansible/playbooks/harden/harden_proxmox.yml
Normal file
21
ansible/playbooks/harden/harden_proxmox.yml
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
---
|
||||||
|
- name: harden node
|
||||||
|
hosts:
|
||||||
|
- dev-kyiv01-psy-proxmox-main-01
|
||||||
|
become: true
|
||||||
|
roles:
|
||||||
|
- role: harden/fail2ban
|
||||||
|
|
||||||
|
- role: harden/unattended_upgrades
|
||||||
|
|
||||||
|
- role: harden/sshd_config
|
||||||
|
vars:
|
||||||
|
ssh_port: "25105"
|
||||||
|
|
||||||
|
- role: harden/nftables
|
||||||
|
vars:
|
||||||
|
nftables_conf_name: "proxmox-nftables.conf.j2"
|
||||||
|
ssh_port: "25105"
|
||||||
|
ntp_port: "123"
|
||||||
|
|
||||||
|
# ansible-playbook playbooks/harden/harden_proxmox.yml -i inventory.ini
|
||||||
12
ansible/playbooks/k8s/install/k8s_master.yml
Normal file
12
ansible/playbooks/k8s/install/k8s_master.yml
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
---
|
||||||
|
- name: install k8s master
|
||||||
|
hosts: dev-kyiv01-vm-k8s-master-01
|
||||||
|
become: true
|
||||||
|
|
||||||
|
roles:
|
||||||
|
- role: k8s/install/00_python
|
||||||
|
- role: k8s/install/01_helm
|
||||||
|
- role: k8s/install/02_common
|
||||||
|
- role: k8s/install/03_master
|
||||||
|
|
||||||
|
# ansible-playbook playbooks/k8s/install/k8s_master.yml -i inventory.ini
|
||||||
10
ansible/playbooks/k8s/install/k8s_worker.yml
Normal file
10
ansible/playbooks/k8s/install/k8s_worker.yml
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
- name: install k8s master
|
||||||
|
hosts: dev-kyiv01-vm-k8s-worker-01
|
||||||
|
become: true
|
||||||
|
roles:
|
||||||
|
- role: k8s/install/02_common
|
||||||
|
- role: k8s/install/04_worker
|
||||||
|
vars:
|
||||||
|
k8s_kubeadm_join_command: "kubeadm join 192.168.0.105:6443 --token 5n2fv0.w67ya3tqfz8ucsae --discovery-token-ca-cert-hash sha256:9e944ac89557d42bd335ef175d232b3d78fd4b2af5935db23d52e443de539aad"
|
||||||
|
|
||||||
|
# ansible-playbook playbooks/k8s/install/k8s_worker.yml -i inventory.ini
|
||||||
11
ansible/playbooks/node/change_hostname.yml
Normal file
11
ansible/playbooks/node/change_hostname.yml
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
---
|
||||||
|
- name: change hostname
|
||||||
|
hosts: test
|
||||||
|
become: true
|
||||||
|
gather_facts: false
|
||||||
|
roles:
|
||||||
|
- role: node/change_hostname
|
||||||
|
vars:
|
||||||
|
hostname: "dev-lviv01-vm-k8s-worker-01"
|
||||||
|
|
||||||
|
# ansible-playbook playbooks/node/change_hostname.yml -i inventory-local.ini
|
||||||
11
ansible/playbooks/node/execute_command.yml
Normal file
11
ansible/playbooks/node/execute_command.yml
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
---
|
||||||
|
- name: execute command
|
||||||
|
hosts: all
|
||||||
|
become: true
|
||||||
|
gather_facts: false
|
||||||
|
roles:
|
||||||
|
- role: node/execute_command
|
||||||
|
vars:
|
||||||
|
command: "{{ command }}"
|
||||||
|
|
||||||
|
# ansible-playbook playbooks/node/execute_command.yml -i inventory.ini
|
||||||
11
ansible/playbooks/node/push_dir.yml
Normal file
11
ansible/playbooks/node/push_dir.yml
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
---
|
||||||
|
- name: push dir
|
||||||
|
hosts: all
|
||||||
|
become: true
|
||||||
|
roles:
|
||||||
|
- role: node/push_dir
|
||||||
|
vars:
|
||||||
|
resource_dir: "{{ resource_dir }}"
|
||||||
|
target_dir: "{{ target_dir }}"
|
||||||
|
|
||||||
|
# ansible-playbook playbooks/node/push_dir.yml -i inventory.ini
|
||||||
10
ansible/playbooks/node/remove_file.yml
Normal file
10
ansible/playbooks/node/remove_file.yml
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
---
|
||||||
|
- name: remove file
|
||||||
|
hosts: all
|
||||||
|
become: true
|
||||||
|
roles:
|
||||||
|
- role: node/remove_file
|
||||||
|
vars:
|
||||||
|
file_path: "{{ file_path }}"
|
||||||
|
|
||||||
|
# ansible-playbook playbooks/node/remove_file.yml -i inventory.ini
|
||||||
10
ansible/playbooks/node/remove_user.yml
Normal file
10
ansible/playbooks/node/remove_user.yml
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
---
|
||||||
|
- name: remove user
|
||||||
|
hosts: all
|
||||||
|
become: true
|
||||||
|
roles:
|
||||||
|
- role: node/remove_user
|
||||||
|
vars:
|
||||||
|
user_name: "{{ remove_user }}"
|
||||||
|
|
||||||
|
# ansible-playbook playbooks/node/remove_user.yml -i inventory.ini
|
||||||
16
ansible/playbooks/ntp/chrony/00_setup_edge_ntp_node.yml
Normal file
16
ansible/playbooks/ntp/chrony/00_setup_edge_ntp_node.yml
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
---
|
||||||
|
- name: setup edge ntp node
|
||||||
|
hosts:
|
||||||
|
- dev-kyiv01-psy-proxmox-main-01
|
||||||
|
become: true
|
||||||
|
roles:
|
||||||
|
- role: ntp/chrony
|
||||||
|
vars:
|
||||||
|
chrony_upstream_sources:
|
||||||
|
- ntp.time.in.ua
|
||||||
|
- time.google.com
|
||||||
|
- time.cloudflare.com
|
||||||
|
chrony_allow_networks:
|
||||||
|
- 192.168.0.0/24
|
||||||
|
|
||||||
|
# ansible-playbook playbooks/ntp/chrony/setup_edge_ntp_node.yml -i inventory.ini
|
||||||
14
ansible/playbooks/ntp/chrony/01_setup_core_ntp_node.yml
Normal file
14
ansible/playbooks/ntp/chrony/01_setup_core_ntp_node.yml
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
---
|
||||||
|
- name: setup core ntp node
|
||||||
|
hosts:
|
||||||
|
- dev-kyiv01-vm-ntp-main-01
|
||||||
|
become: true
|
||||||
|
roles:
|
||||||
|
- role: ntp/chrony
|
||||||
|
vars:
|
||||||
|
chrony_upstream_sources:
|
||||||
|
- ntp-edge.infra.hran
|
||||||
|
chrony_allow_networks:
|
||||||
|
- 192.168.0.0/24
|
||||||
|
|
||||||
|
# ansible-playbook playbooks/ntp/chrony/setup_core_ntp_node.yml -i inventory.ini
|
||||||
19
ansible/playbooks/ntp/chrony/02_setup_client_ntp_node.yml
Normal file
19
ansible/playbooks/ntp/chrony/02_setup_client_ntp_node.yml
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
---
|
||||||
|
- name: setup core ntp node
|
||||||
|
hosts:
|
||||||
|
- dev-kyiv01-vm-dns-main-01
|
||||||
|
- dev-kyiv01-vm-ceph-main-01
|
||||||
|
- dev-kyiv01-vm-ceph-main-02
|
||||||
|
- dev-kyiv01-vm-ceph-main-03
|
||||||
|
- dev-kyiv01-vm-k8s-master-01
|
||||||
|
- dev-kyiv01-vm-k8s-worker-01
|
||||||
|
become: true
|
||||||
|
roles:
|
||||||
|
- role: ntp/chrony
|
||||||
|
vars:
|
||||||
|
chrony_upstream_sources:
|
||||||
|
- ntp-core.infra.hran
|
||||||
|
chrony_allow_networks:
|
||||||
|
- 192.168.0.0/24
|
||||||
|
|
||||||
|
# ansible-playbook playbooks/ntp/chrony/setup_client_ntp_node.yml -i inventory.ini
|
||||||
9
ansible/playbooks/packer/install.yml
Normal file
9
ansible/playbooks/packer/install.yml
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
---
|
||||||
|
- name: install packer
|
||||||
|
hosts:
|
||||||
|
- dev-kyiv01-lxc-packer-main-01
|
||||||
|
become: true
|
||||||
|
roles:
|
||||||
|
- role: packer/install
|
||||||
|
|
||||||
|
# ansible-playbook playbooks/packer/install.yml -i inventory.ini
|
||||||
11
ansible/playbooks/packer/run.yml
Normal file
11
ansible/playbooks/packer/run.yml
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
---
|
||||||
|
- name: run packer
|
||||||
|
hosts:
|
||||||
|
- dev-kyiv01-lxc-packer-main-01
|
||||||
|
become: true
|
||||||
|
roles:
|
||||||
|
- role: packer/run
|
||||||
|
vars:
|
||||||
|
packer_config_dir: "/opt/packer/proxmox/debian13"
|
||||||
|
|
||||||
|
# ansible-playbook playbooks/packer/run.yml -i inventory.ini
|
||||||
9
ansible/playbooks/proxmox/enable_snippets.yml
Normal file
9
ansible/playbooks/proxmox/enable_snippets.yml
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
---
|
||||||
|
- name: enable snippets
|
||||||
|
hosts:
|
||||||
|
- dev-kyiv01-psy-proxmox-main-01
|
||||||
|
become: true
|
||||||
|
roles:
|
||||||
|
- role: proxmox/enable_snippets
|
||||||
|
|
||||||
|
# ansible-playbook playbooks/proxmox/enable_snippets.yml -i inventory.ini
|
||||||
11
ansible/playbooks/proxmox/lxc/download_template.yml
Normal file
11
ansible/playbooks/proxmox/lxc/download_template.yml
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
---
|
||||||
|
- name: download lxc template
|
||||||
|
hosts:
|
||||||
|
- dev-kyiv01-psy-proxmox-main-01
|
||||||
|
become: true
|
||||||
|
roles:
|
||||||
|
- role: proxmox/lxc/download_template
|
||||||
|
vars:
|
||||||
|
lxc_template_name: "debian-12-standard_12.12-1_amd64.tar.zst"
|
||||||
|
|
||||||
|
# ansible-playbook playbooks/proxmox/lxc/download_template.yml -i inventory.ini
|
||||||
11
ansible/playbooks/proxmox/lxc/shutdown.yml
Normal file
11
ansible/playbooks/proxmox/lxc/shutdown.yml
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
---
|
||||||
|
- name: shutdown lxc container
|
||||||
|
hosts:
|
||||||
|
- dev-kyiv01-psy-proxmox-main-01
|
||||||
|
become: true
|
||||||
|
roles:
|
||||||
|
- role: proxmox/lxc/shutdown
|
||||||
|
vars:
|
||||||
|
lxc_id: 200
|
||||||
|
|
||||||
|
# ansible-playbook playbooks/proxmox/lxc/shutdown.yml -i inventory.ini
|
||||||
@@ -0,0 +1,9 @@
|
|||||||
|
---
|
||||||
|
- name: configure proxmox no-subscription repo
|
||||||
|
hosts:
|
||||||
|
- dev-kyiv01-psy-proxmox-main-01
|
||||||
|
become: true
|
||||||
|
roles:
|
||||||
|
- proxmox/setup_no_subscription_repository
|
||||||
|
|
||||||
|
# ansible-playbook playbooks/proxmox/setup_proxmox_no_subscription_repository.yml -i inventory.ini
|
||||||
12
ansible/playbooks/proxmox/vm/download_iso.yml
Normal file
12
ansible/playbooks/proxmox/vm/download_iso.yml
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
---
|
||||||
|
- name: download vm iso
|
||||||
|
hosts:
|
||||||
|
- dev-kyiv01-psy-proxmox-main-01
|
||||||
|
become: true
|
||||||
|
roles:
|
||||||
|
- role: proxmox/vm/download_iso
|
||||||
|
vars:
|
||||||
|
vm_iso_name: "debian-13.2.0-amd64-netinst.iso"
|
||||||
|
vm_iso_url: "https://cdimage.debian.org/debian-cd/current/amd64/iso-cd/{{ vm_iso_name }}"
|
||||||
|
|
||||||
|
# ansible-playbook playbooks/proxmox/vm/download_iso.yml -i inventory.ini
|
||||||
43
ansible/roles/ceph/00_install/tasks/main.yml
Normal file
43
ansible/roles/ceph/00_install/tasks/main.yml
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
---
|
||||||
|
- name: apt update
|
||||||
|
ansible.builtin.apt:
|
||||||
|
update_cache: true
|
||||||
|
|
||||||
|
- name: apt upgrade
|
||||||
|
ansible.builtin.apt:
|
||||||
|
upgrade: dist
|
||||||
|
|
||||||
|
- name: install base packages
|
||||||
|
ansible.builtin.apt:
|
||||||
|
name:
|
||||||
|
- ca-certificates
|
||||||
|
- curl
|
||||||
|
- gnupg
|
||||||
|
- lvm2
|
||||||
|
- podman
|
||||||
|
state: present
|
||||||
|
|
||||||
|
- name: swapoff
|
||||||
|
ansible.builtin.command: swapoff -a
|
||||||
|
changed_when: true
|
||||||
|
|
||||||
|
- name: comment swap in /etc/fstab
|
||||||
|
ansible.builtin.replace:
|
||||||
|
path: /etc/fstab
|
||||||
|
regexp: '^([^#].*\s+swap\s+.*)$'
|
||||||
|
replace: '# \1'
|
||||||
|
|
||||||
|
- name: install cephadm and ceph-common
|
||||||
|
ansible.builtin.apt:
|
||||||
|
name:
|
||||||
|
- cephadm
|
||||||
|
- ceph-common
|
||||||
|
state: present
|
||||||
|
|
||||||
|
- name: cephadm version
|
||||||
|
ansible.builtin.command: cephadm version
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: ceph -v
|
||||||
|
ansible.builtin.command: ceph -v
|
||||||
|
changed_when: false
|
||||||
9
ansible/roles/ceph/01_bootstrap/tasks/main.yml
Normal file
9
ansible/roles/ceph/01_bootstrap/tasks/main.yml
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
---
|
||||||
|
- name: cephadm bootstrap
|
||||||
|
ansible.builtin.command: >
|
||||||
|
cephadm bootstrap
|
||||||
|
--mon-ip 192.168.0.102
|
||||||
|
--initial-dashboard-user admin
|
||||||
|
--initial-dashboard-password password
|
||||||
|
--allow-fqdn-hostname
|
||||||
|
changed_when: true
|
||||||
25
ansible/roles/ceph/02_share_pubkey/tasks/main.yml
Normal file
25
ansible/roles/ceph/02_share_pubkey/tasks/main.yml
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
---
|
||||||
|
- name: get cephadm pub key (run once on ceph01)
|
||||||
|
ansible.builtin.command: ceph cephadm get-pub-key
|
||||||
|
register: ceph_pubkey_cmd
|
||||||
|
changed_when: false
|
||||||
|
delegate_to: dev-kyiv01-vm-ceph-main-01
|
||||||
|
run_once: true
|
||||||
|
|
||||||
|
- name: set ceph pubkey fact for this play
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
ceph_pubkey: "{{ ceph_pubkey_cmd.stdout }}"
|
||||||
|
run_once: true
|
||||||
|
|
||||||
|
- name: add ceph pub key to root authorized_keys
|
||||||
|
ansible.posix.authorized_key:
|
||||||
|
user: root
|
||||||
|
key: "{{ ceph_pubkey }}"
|
||||||
|
state: present
|
||||||
|
when: inventory_hostname in ["dev-kyiv01-vm-ceph-main-02", "dev-kyiv01-vm-ceph-main-03"]
|
||||||
|
|
||||||
|
- name: restart ssh
|
||||||
|
ansible.builtin.service:
|
||||||
|
name: ssh
|
||||||
|
state: restarted
|
||||||
|
when: inventory_hostname in ["dev-kyiv01-vm-ceph-main-02", "dev-kyiv01-vm-ceph-main-03"]
|
||||||
40
ansible/roles/ceph/03_setup_cluster/tasks/main.yml
Normal file
40
ansible/roles/ceph/03_setup_cluster/tasks/main.yml
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
---
|
||||||
|
- name: add host ceph02
|
||||||
|
ansible.builtin.command: >
|
||||||
|
ceph orch host add dev-kyiv01-vm-ceph-main-02 192.168.0.103
|
||||||
|
changed_when: true
|
||||||
|
|
||||||
|
- name: add host ceph03
|
||||||
|
ansible.builtin.command: >
|
||||||
|
ceph orch host add dev-kyiv01-vm-ceph-main-03 192.168.0.104
|
||||||
|
changed_when: true
|
||||||
|
|
||||||
|
- name: add osd ceph01 sdb
|
||||||
|
ansible.builtin.command: >
|
||||||
|
ceph orch daemon add osd dev-kyiv01-vm-ceph-main-01:/dev/sdb
|
||||||
|
changed_when: true
|
||||||
|
|
||||||
|
- name: add osd ceph01 sdc
|
||||||
|
ansible.builtin.command: >
|
||||||
|
ceph orch daemon add osd dev-kyiv01-vm-ceph-main-01:/dev/sdc
|
||||||
|
changed_when: true
|
||||||
|
|
||||||
|
- name: add osd ceph02 sdb
|
||||||
|
ansible.builtin.command: >
|
||||||
|
ceph orch daemon add osd dev-kyiv01-vm-ceph-main-02:/dev/sdb
|
||||||
|
changed_when: true
|
||||||
|
|
||||||
|
- name: add osd ceph02 sdc
|
||||||
|
ansible.builtin.command: >
|
||||||
|
ceph orch daemon add osd dev-kyiv01-vm-ceph-main-02:/dev/sdc
|
||||||
|
changed_when: true
|
||||||
|
|
||||||
|
- name: add osd ceph03 sdb
|
||||||
|
ansible.builtin.command: >
|
||||||
|
ceph orch daemon add osd dev-kyiv01-vm-ceph-main-03:/dev/sdb
|
||||||
|
changed_when: true
|
||||||
|
|
||||||
|
- name: add osd ceph03 sdc
|
||||||
|
ansible.builtin.command: >
|
||||||
|
ceph orch daemon add osd dev-kyiv01-vm-ceph-main-03:/dev/sdc
|
||||||
|
changed_when: true
|
||||||
48
ansible/roles/ceph/04_setup_rgw/readme.md
Normal file
48
ansible/roles/ceph/04_setup_rgw/readme.md
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
# "[1/8] Проверка кластера"
|
||||||
|
ceph -s
|
||||||
|
ceph orch status
|
||||||
|
|
||||||
|
# "[2/8] Создаём realm/zonegroup/zone (если уже есть — будет ошибка; можно игнорировать или сначала проверить list)"
|
||||||
|
radosgw-admin realm create --rgw-realm=default --default || true
|
||||||
|
radosgw-admin zonegroup create --rgw-zonegroup=default --master --default || true
|
||||||
|
radosgw-admin zone create \
|
||||||
|
--rgw-zonegroup=default \
|
||||||
|
--rgw-zone=default \
|
||||||
|
--master \
|
||||||
|
--default || true
|
||||||
|
|
||||||
|
# "[3/8] Коммит периода (обновляем конфиг мультисайта)"
|
||||||
|
radosgw-admin period update --commit
|
||||||
|
|
||||||
|
# "[4/8] Проверка realm/zone"
|
||||||
|
radosgw-admin realm list
|
||||||
|
radosgw-admin zone list
|
||||||
|
|
||||||
|
# "[5/8] Деплой RGW сервисом через cephadm/orchestrator"
|
||||||
|
ceph orch apply rgw default --placement="1"
|
||||||
|
|
||||||
|
# "[6/8] Проверка что RGW поднялся"
|
||||||
|
ceph orch ls
|
||||||
|
ceph orch ps --service-name rgw.default
|
||||||
|
ss -lntp | grep -E 'rgw|civetweb|beast|7480|80|443' || true
|
||||||
|
|
||||||
|
# "[7/8] Создаём admin (system) пользователя — ТОЛЬКО для ops"
|
||||||
|
# Важно: system user не для приложений, а для админских операций/автоматизации ops
|
||||||
|
radosgw-admin user create \
|
||||||
|
--uid="admin" \
|
||||||
|
--display-name="RGW Admin (system)" \
|
||||||
|
--system || true
|
||||||
|
|
||||||
|
# "[8/8] Создаём пользователя для Crunchy pgBackRest + бакет"
|
||||||
|
# Создаём отдельного юзера под pgBackRest
|
||||||
|
radosgw-admin user create \
|
||||||
|
--uid="crunchy-backup" \
|
||||||
|
--display-name="Crunchy pgBackRest" || true
|
||||||
|
|
||||||
|
# Создаём бакет и назначаем владельца (uid должен существовать)
|
||||||
|
radosgw-admin bucket create \
|
||||||
|
--bucket="crunchy-pgbackrest" \
|
||||||
|
--uid="crunchy-backup" || true
|
||||||
|
|
||||||
|
# "=== Итог: креды для Crunchy ==="
|
||||||
|
radosgw-admin user info --uid="crunchy-backup"
|
||||||
28
ansible/roles/ceph/05_create_k8s_pool/readme.md
Normal file
28
ansible/roles/ceph/05_create_k8s_pool/readme.md
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
```bash
|
||||||
|
ceph -s
|
||||||
|
ceph fsid
|
||||||
|
ceph mon dump | egrep 'mon\.' -n
|
||||||
|
ceph osd pool ls
|
||||||
|
|
||||||
|
# создать pool (pg_num подбирай под размер кластера; для старта можно 64/128)
|
||||||
|
ceph osd pool create k8s-rbd 128
|
||||||
|
|
||||||
|
# включить application "rbd" (важно для CSI)
|
||||||
|
ceph osd pool application enable k8s-rbd rbd
|
||||||
|
|
||||||
|
# (опционально) инициализировать rbd метаданные
|
||||||
|
rbd pool init k8s-rbd
|
||||||
|
|
||||||
|
# (опционально) выставить репликацию size=3 (или как у тебя принято)
|
||||||
|
ceph osd pool set k8s-rbd size 3
|
||||||
|
|
||||||
|
ceph auth get-or-create client.k8s-rbd-csi \
|
||||||
|
mon 'profile rbd' \
|
||||||
|
osd "profile rbd pool=k8s-rbd" \
|
||||||
|
mgr "profile rbd"
|
||||||
|
|
||||||
|
# посмотреть ключ
|
||||||
|
ceph auth get client.k8s-rbd-csi
|
||||||
|
|
||||||
|
|
||||||
|
```
|
||||||
85
ansible/roles/ceph/readme.md
Normal file
85
ansible/roles/ceph/readme.md
Normal file
@@ -0,0 +1,85 @@
|
|||||||
|
# ЭТАП 0. Подготовка ОС (на всех Ceph-нодах)
|
||||||
|
|
||||||
|
## обновление системы
|
||||||
|
apt update && apt upgrade -y
|
||||||
|
|
||||||
|
## базовые пакеты (без chrony/dns/hosts)
|
||||||
|
apt install -y \
|
||||||
|
ca-certificates \
|
||||||
|
curl \
|
||||||
|
gnupg \
|
||||||
|
lvm2 \
|
||||||
|
podman
|
||||||
|
|
||||||
|
## отключаем swap (ОБЯЗАТЕЛЬНО для k8s; для Ceph не строго, но лучше сразу)
|
||||||
|
swapoff -a
|
||||||
|
sed -i '/ swap / s/^/#/' /etc/fstab
|
||||||
|
|
||||||
|
## проверка дисков (убедись, что OSD диски пустые)
|
||||||
|
lsblk
|
||||||
|
|
||||||
|
# ЭТАП 1. Установка Cephadm (на bootstrap-ноде и затем на всех нодах)
|
||||||
|
|
||||||
|
apt install -y cephadm ceph-common
|
||||||
|
cephadm version
|
||||||
|
ceph -v
|
||||||
|
|
||||||
|
# ЭТАП 2. Bootstrap кластера (только на первой ноде / mon)
|
||||||
|
|
||||||
|
cephadm bootstrap \
|
||||||
|
--mon-ip 192.168.0.102 \
|
||||||
|
--initial-dashboard-user admin \
|
||||||
|
--initial-dashboard-password password \
|
||||||
|
--allow-fqdn-hostname
|
||||||
|
|
||||||
|
ceph -s
|
||||||
|
ceph orch ps
|
||||||
|
|
||||||
|
# ЭТАП 3. Добавляем остальные ноды в orchestrator
|
||||||
|
|
||||||
|
ceph cephadm get-pub-key
|
||||||
|
systemctl restart ssh
|
||||||
|
|
||||||
|
ceph orch host add dev-kyiv01-vm-ceph-main-02 192.168.0.103
|
||||||
|
ceph orch host add dev-kyiv01-vm-ceph-main-03 192.168.0.104
|
||||||
|
|
||||||
|
ceph orch host ls
|
||||||
|
|
||||||
|
|
||||||
|
# ЭТАП 4. Добавляем OSD (на каждой ноде)
|
||||||
|
|
||||||
|
## bootstrap-node (локальная)
|
||||||
|
ceph orch daemon add osd dev-kyiv01-vm-ceph-main-01:/dev/sdb
|
||||||
|
ceph orch daemon add osd dev-kyiv01-vm-ceph-main-01:/dev/sdc
|
||||||
|
|
||||||
|
## vm-ceph-kyiv-02
|
||||||
|
ceph orch daemon add osd dev-kyiv01-vm-ceph-main-02:/dev/sdb
|
||||||
|
ceph orch daemon add osd dev-kyiv01-vm-ceph-main-02:/dev/sdc
|
||||||
|
|
||||||
|
## vm-ceph-kyiv-03
|
||||||
|
ceph orch daemon add osd dev-kyiv01-vm-ceph-main-03:/dev/sdb
|
||||||
|
ceph orch daemon add osd dev-kyiv01-vm-ceph-main-03:/dev/sdc
|
||||||
|
|
||||||
|
## Проверка:
|
||||||
|
|
||||||
|
ceph osd tree
|
||||||
|
ceph -s
|
||||||
|
|
||||||
|
|
||||||
|
# ЭТАП 5. Пул под k8s RBD
|
||||||
|
|
||||||
|
ceph osd pool create k8s-rbd 64
|
||||||
|
ceph osd pool application enable k8s-rbd rbd
|
||||||
|
|
||||||
|
ceph osd pool ls
|
||||||
|
ceph osd pool get k8s-rbd all
|
||||||
|
|
||||||
|
## Мини-чеклист
|
||||||
|
|
||||||
|
ceph -s
|
||||||
|
ceph orch host ls
|
||||||
|
ceph orch ps
|
||||||
|
ceph osd tree
|
||||||
|
|
||||||
|
# Delete broken cluster
|
||||||
|
cephadm rm-cluster --force --fsid e3b4050a-e8be-11f0-84c2-027a4c119066
|
||||||
38
ansible/roles/dns/push_powerdns_configs_to_node/readme.md
Normal file
38
ansible/roles/dns/push_powerdns_configs_to_node/readme.md
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
# example dns path in Debian13
|
||||||
|
App → glibc resolver → /etc/resolv.conf (127.0.0.53) → systemd-resolved → 192.168.0.1 (Proxmox)
|
||||||
|
|
||||||
|
# before role running
|
||||||
|
```bash
|
||||||
|
sudo systemctl disable --now systemd-resolved
|
||||||
|
|
||||||
|
sudo rm -f /etc/resolv.conf
|
||||||
|
echo -e "nameserver 1.1.1.1\nnameserver 8.8.8.8" | sudo tee /etc/resolv.conf
|
||||||
|
|
||||||
|
docker compose down
|
||||||
|
docker compose up -d
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# pdns-auth web/api через nginx
|
||||||
|
curl -i -H 'Host: auth.infra.hran' http://127.0.0.1/
|
||||||
|
|
||||||
|
# recursor web/api через nginx
|
||||||
|
curl -i -H 'Host: recursor.infra.hran' http://127.0.0.1/
|
||||||
|
|
||||||
|
# dnsdist web через nginx
|
||||||
|
curl -i -H 'Host: dnsdist.infra.hran' http://127.0.0.1/
|
||||||
|
curl -i -u 'admin:CHANGE_ME_DNSDIST_WEB_PASSWORD' -H 'Host: dnsdist.infra.hran' http://127.0.0.1/
|
||||||
|
|
||||||
|
# windows
|
||||||
|
C:\Windows\System32\drivers\etc\hosts
|
||||||
|
|
||||||
|
127.0.0.1 auth.infra.hran
|
||||||
|
127.0.0.1 recursor.infra.hran
|
||||||
|
127.0.0.1 dnsdist.infra.hran:8084
|
||||||
|
|
||||||
|
# check from browser
|
||||||
|
http://dnsdist.infra.hran:8080/
|
||||||
|
http://auth.infra.hran:8080/
|
||||||
|
http://recursor.infra.hran:8080/
|
||||||
|
```
|
||||||
|
|
||||||
@@ -0,0 +1,37 @@
|
|||||||
|
- name: ensure directory structure exists
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ item }}"
|
||||||
|
state: directory
|
||||||
|
owner: "root"
|
||||||
|
group: "root"
|
||||||
|
mode: "0755"
|
||||||
|
loop:
|
||||||
|
- "{{ dns_stack_root }}"
|
||||||
|
- "{{ dns_stack_root }}/postgres/initdb"
|
||||||
|
- "{{ dns_stack_root }}/pdns-auth"
|
||||||
|
- "{{ dns_stack_root }}/pdns-recursor"
|
||||||
|
- "{{ dns_stack_root }}/dnsdist"
|
||||||
|
- "{{ dns_stack_root }}/nginx"
|
||||||
|
|
||||||
|
- name: render stack files
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: "{{ item.src }}"
|
||||||
|
dest: "{{ dns_stack_root }}/{{ item.dest }}"
|
||||||
|
owner: "root"
|
||||||
|
group: "root"
|
||||||
|
mode: "0644"
|
||||||
|
loop:
|
||||||
|
- { src: "docker-compose.yml.j2", dest: "docker-compose.yml" }
|
||||||
|
- { src: ".env.j2", dest: ".env", mode: "0600" }
|
||||||
|
- {
|
||||||
|
src: "postgres/initdb/01-pdns-schema.sql.j2",
|
||||||
|
dest: "postgres/initdb/01-pdns-schema.sql",
|
||||||
|
}
|
||||||
|
- { src: "pdns-auth/pdns.conf.j2", dest: "pdns-auth/pdns.conf" }
|
||||||
|
- {
|
||||||
|
src: "pdns-recursor/recursor.conf.j2",
|
||||||
|
dest: "pdns-recursor/recursor.conf",
|
||||||
|
}
|
||||||
|
- { src: "dnsdist/dnsdist.conf.j2", dest: "dnsdist/dnsdist.conf" }
|
||||||
|
- { src: "nginx/nginx.conf.j2", dest: "nginx/nginx.conf" }
|
||||||
|
register: rendered
|
||||||
@@ -0,0 +1,41 @@
|
|||||||
|
addLocal("0.0.0.0:53")
|
||||||
|
addLocal("[::]:53")
|
||||||
|
|
||||||
|
-- ACL для клиентов, которым вообще можно отвечать
|
||||||
|
addACL("127.0.0.0/8") -- localhost на IPv4 (машина сама себе).
|
||||||
|
addACL("10.0.0.0/8") -- приватные сети RFC1918 (часто VPN/корп сеть).
|
||||||
|
addACL("172.16.0.0/12") -- приватные 172.16–172.31 (сюда попадает и 172.30.x, docker-сеть).
|
||||||
|
addACL("192.168.0.0/16") -- типичная домашняя LAN.
|
||||||
|
addACL("::1/128") -- localhost на IPv6.
|
||||||
|
addACL("fc00::/7") -- IPv6 ULA (аналог приватных)
|
||||||
|
addACL("fe80::/10") --IPv6 link-local (адреса “на линке”, часто у интерфейса).
|
||||||
|
|
||||||
|
newServer({
|
||||||
|
address="172.30.0.11:5300",
|
||||||
|
pool="auth",
|
||||||
|
name="pdns-auth"
|
||||||
|
})
|
||||||
|
|
||||||
|
newServer({
|
||||||
|
address="172.30.0.12:5301",
|
||||||
|
pool="recursor",
|
||||||
|
name="pdns-recursor"
|
||||||
|
})
|
||||||
|
|
||||||
|
-- Авторитативные зоны -> в pool auth, остальное -> recursor
|
||||||
|
local authZones = newSuffixMatchNode()
|
||||||
|
authZones:add("infra.hran.")
|
||||||
|
|
||||||
|
pc = newPacketCache(100000, {maxTTL=86400, minTTL=0, temporaryFailureTTL=60})
|
||||||
|
getPool("recursor"):setCache(pc)
|
||||||
|
getPool("auth"):setCache(pc)
|
||||||
|
|
||||||
|
addAction(SuffixMatchNodeRule(authZones), PoolAction("auth"))
|
||||||
|
addAction(AllRule(), PoolAction("recursor"))
|
||||||
|
|
||||||
|
webserver("0.0.0.0:8084")
|
||||||
|
setWebserverConfig({
|
||||||
|
password="CHANGE_ME_DNSDIST_WEB_PASSWORD",
|
||||||
|
apiKey="CHANGE_ME_DNSDIST_KEY",
|
||||||
|
acl="127.0.0.0/8, 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16, ::1/128, fc00::/7, fe80::/10"
|
||||||
|
})
|
||||||
@@ -0,0 +1,142 @@
|
|||||||
|
services:
|
||||||
|
postgres:
|
||||||
|
image: postgres:16
|
||||||
|
container_name: dnsstack-postgres
|
||||||
|
restart: unless-stopped
|
||||||
|
environment:
|
||||||
|
TZ: Europe/Kyiv
|
||||||
|
POSTGRES_DB: pdns
|
||||||
|
POSTGRES_USER: pdns
|
||||||
|
POSTGRES_PASSWORD: CHANGE_ME_POSTGRES_PASSWORD
|
||||||
|
volumes:
|
||||||
|
- /opt/dns-stack/postgres/data:/var/lib/postgresql/data
|
||||||
|
- ./postgres/initdb:/docker-entrypoint-initdb.d:ro
|
||||||
|
networks:
|
||||||
|
dnsnet:
|
||||||
|
ipv4_address: "172.30.0.10"
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD-SHELL", "pg_isready -U $${POSTGRES_USER} -d $${POSTGRES_DB} -h 127.0.0.1 -p 5432"]
|
||||||
|
interval: 2s
|
||||||
|
timeout: 3s
|
||||||
|
retries: 30
|
||||||
|
start_period: 10s
|
||||||
|
logging:
|
||||||
|
driver: "json-file"
|
||||||
|
options:
|
||||||
|
tag: "dnsstack.postgres"
|
||||||
|
max-size: "20m"
|
||||||
|
max-file: "10"
|
||||||
|
|
||||||
|
pdns-auth:
|
||||||
|
image: powerdns/pdns-auth-50:latest
|
||||||
|
container_name: dnsstack-pdns-auth
|
||||||
|
restart: unless-stopped
|
||||||
|
depends_on:
|
||||||
|
postgres:
|
||||||
|
condition: service_healthy
|
||||||
|
environment:
|
||||||
|
TZ: Europe/Kyiv
|
||||||
|
volumes:
|
||||||
|
- ./pdns-auth/pdns.conf:/etc/powerdns/pdns.conf:ro
|
||||||
|
networks:
|
||||||
|
dnsnet:
|
||||||
|
ipv4_address: "172.30.0.11"
|
||||||
|
expose:
|
||||||
|
- "5300"
|
||||||
|
- "8083"
|
||||||
|
ulimits:
|
||||||
|
nofile:
|
||||||
|
soft: 10064
|
||||||
|
hard: 10064
|
||||||
|
logging:
|
||||||
|
driver: "json-file"
|
||||||
|
options:
|
||||||
|
tag: "dnsstack.pdns-auth"
|
||||||
|
max-size: "20m"
|
||||||
|
max-file: "10"
|
||||||
|
|
||||||
|
pdns-recursor:
|
||||||
|
image: powerdns/pdns-recursor-53:latest
|
||||||
|
container_name: dnsstack-pdns-recursor
|
||||||
|
restart: unless-stopped
|
||||||
|
environment:
|
||||||
|
TZ: Europe/Kyiv
|
||||||
|
volumes:
|
||||||
|
- ./pdns-recursor/recursor.conf:/etc/powerdns/recursor.conf:ro
|
||||||
|
networks:
|
||||||
|
dnsnet:
|
||||||
|
ipv4_address: "172.30.0.12"
|
||||||
|
expose:
|
||||||
|
- "5301"
|
||||||
|
- "8082"
|
||||||
|
ulimits:
|
||||||
|
nofile:
|
||||||
|
soft: 10064
|
||||||
|
hard: 10064
|
||||||
|
logging:
|
||||||
|
driver: "json-file"
|
||||||
|
options:
|
||||||
|
tag: "dnsstack.pdns-recursor"
|
||||||
|
max-size: "20m"
|
||||||
|
max-file: "10"
|
||||||
|
|
||||||
|
dnsdist:
|
||||||
|
image: powerdns/dnsdist-20:latest
|
||||||
|
container_name: dnsstack-dnsdist
|
||||||
|
restart: unless-stopped
|
||||||
|
depends_on:
|
||||||
|
- pdns-auth
|
||||||
|
- pdns-recursor
|
||||||
|
environment:
|
||||||
|
TZ: Europe/Kyiv
|
||||||
|
volumes:
|
||||||
|
- ./dnsdist/dnsdist.conf:/etc/dnsdist/dnsdist.conf:ro
|
||||||
|
networks:
|
||||||
|
dnsnet:
|
||||||
|
ipv4_address: "172.30.0.2"
|
||||||
|
ports:
|
||||||
|
- "53:53/udp"
|
||||||
|
- "53:53/tcp"
|
||||||
|
expose:
|
||||||
|
- "8084"
|
||||||
|
ulimits:
|
||||||
|
nofile:
|
||||||
|
soft: 65535
|
||||||
|
hard: 65535
|
||||||
|
logging:
|
||||||
|
driver: "json-file"
|
||||||
|
options:
|
||||||
|
tag: "dnsstack.dnsdist"
|
||||||
|
max-size: "50m"
|
||||||
|
max-file: "10"
|
||||||
|
|
||||||
|
nginx:
|
||||||
|
image: nginx:1.27-alpine
|
||||||
|
container_name: dnsstack-nginx
|
||||||
|
restart: unless-stopped
|
||||||
|
depends_on:
|
||||||
|
- pdns-auth
|
||||||
|
- pdns-recursor
|
||||||
|
- dnsdist
|
||||||
|
environment:
|
||||||
|
TZ: Europe/Kyiv
|
||||||
|
volumes:
|
||||||
|
- ./nginx/nginx.conf:/etc/nginx/nginx.conf:ro
|
||||||
|
networks:
|
||||||
|
dnsnet:
|
||||||
|
ipv4_address: "172.30.0.3"
|
||||||
|
ports:
|
||||||
|
- "80:80/tcp"
|
||||||
|
logging:
|
||||||
|
driver: "json-file"
|
||||||
|
options:
|
||||||
|
tag: "dnsstack.nginx"
|
||||||
|
max-size: "20m"
|
||||||
|
max-file: "10"
|
||||||
|
|
||||||
|
networks:
|
||||||
|
dnsnet:
|
||||||
|
driver: bridge
|
||||||
|
ipam:
|
||||||
|
config:
|
||||||
|
- subnet: "172.30.0.0/24"
|
||||||
@@ -0,0 +1,53 @@
|
|||||||
|
worker_processes auto;
|
||||||
|
|
||||||
|
events { worker_connections 1024; }
|
||||||
|
|
||||||
|
http {
|
||||||
|
access_log /var/log/nginx/access.log;
|
||||||
|
error_log /var/log/nginx/error.log warn;
|
||||||
|
|
||||||
|
# auth.infra.hran -> pdns-auth:8083
|
||||||
|
server {
|
||||||
|
listen 80;
|
||||||
|
server_name auth.infra.hran;
|
||||||
|
|
||||||
|
location / {
|
||||||
|
proxy_http_version 1.1;
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
|
proxy_pass http://pdns-auth:8083;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# recursor.infra.hran -> pdns-recursor:8082
|
||||||
|
server {
|
||||||
|
listen 80;
|
||||||
|
server_name recursor.infra.hran;
|
||||||
|
|
||||||
|
location / {
|
||||||
|
proxy_http_version 1.1;
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
|
proxy_pass http://pdns-recursor:8082;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# dnsdist.infra.hran -> dnsdist:8084
|
||||||
|
server {
|
||||||
|
listen 80;
|
||||||
|
server_name dnsdist.infra.hran;
|
||||||
|
|
||||||
|
location / {
|
||||||
|
proxy_http_version 1.1;
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
|
proxy_pass http://dnsdist:8084;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,21 @@
|
|||||||
|
local-address=0.0.0.0,::
|
||||||
|
local-port=5300
|
||||||
|
|
||||||
|
launch=gpgsql
|
||||||
|
gpgsql-host=postgres
|
||||||
|
gpgsql-port=5432
|
||||||
|
gpgsql-dbname=pdns
|
||||||
|
gpgsql-user=pdns
|
||||||
|
gpgsql-password=CHANGE_ME_POSTGRES_PASSWORD
|
||||||
|
|
||||||
|
api=yes
|
||||||
|
api-key=CHANGE_ME_PDNS_API_KEY
|
||||||
|
|
||||||
|
webserver=yes
|
||||||
|
webserver-address=0.0.0.0
|
||||||
|
webserver-port=8083
|
||||||
|
webserver-allow-from=127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16
|
||||||
|
|
||||||
|
disable-axfr=yes
|
||||||
|
version-string=anonymous
|
||||||
|
loglevel=4
|
||||||
@@ -0,0 +1,46 @@
|
|||||||
|
# PowerDNS Recursor 5.1+ YAML config
|
||||||
|
|
||||||
|
incoming:
|
||||||
|
listen:
|
||||||
|
- "0.0.0.0:5301"
|
||||||
|
- "[::]:5301"
|
||||||
|
allow_from:
|
||||||
|
- "127.0.0.0/8"
|
||||||
|
- "10.0.0.0/8"
|
||||||
|
- "172.16.0.0/12"
|
||||||
|
- "192.168.0.0/16"
|
||||||
|
- "::1/128"
|
||||||
|
- "fc00::/7"
|
||||||
|
- "fe80::/10"
|
||||||
|
|
||||||
|
outgoing:
|
||||||
|
source_address:
|
||||||
|
- "0.0.0.0"
|
||||||
|
- "::"
|
||||||
|
|
||||||
|
webservice:
|
||||||
|
webserver: true
|
||||||
|
address: "0.0.0.0"
|
||||||
|
port: 8082
|
||||||
|
api_key: "CHANGE_ME_RECURSOR_API_KEY"
|
||||||
|
allow_from:
|
||||||
|
- "127.0.0.0/8"
|
||||||
|
- "10.0.0.0/8"
|
||||||
|
- "172.16.0.0/12"
|
||||||
|
- "192.168.0.0/16"
|
||||||
|
- "::1/128"
|
||||||
|
- "fc00::/7"
|
||||||
|
- "fe80::/10"
|
||||||
|
|
||||||
|
logging:
|
||||||
|
loglevel: 6
|
||||||
|
quiet: false
|
||||||
|
|
||||||
|
recursor:
|
||||||
|
version_string: "anonymous"
|
||||||
|
|
||||||
|
forward_zones_recurse:
|
||||||
|
- zone: "."
|
||||||
|
forwarders:
|
||||||
|
- "1.1.1.1"
|
||||||
|
- "8.8.8.8"
|
||||||
@@ -0,0 +1,103 @@
|
|||||||
|
-- PowerDNS Generic PostgreSQL schema (gpgsql)
|
||||||
|
-- Source: PowerDNS pdns/modules/gpgsqlbackend/schema.pgsql.sql
|
||||||
|
|
||||||
|
CREATE TABLE domains (
|
||||||
|
id SERIAL PRIMARY KEY,
|
||||||
|
name VARCHAR(255) NOT NULL,
|
||||||
|
master VARCHAR(128) DEFAULT NULL,
|
||||||
|
last_check INT DEFAULT NULL,
|
||||||
|
type TEXT NOT NULL,
|
||||||
|
notified_serial INT DEFAULT NULL,
|
||||||
|
account VARCHAR(40) DEFAULT NULL,
|
||||||
|
options TEXT DEFAULT NULL,
|
||||||
|
catalog VARCHAR(255) DEFAULT NULL
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE UNIQUE INDEX name_index ON domains(name);
|
||||||
|
CREATE INDEX catalog_idx ON domains(catalog);
|
||||||
|
|
||||||
|
CREATE TABLE records (
|
||||||
|
id BIGSERIAL PRIMARY KEY,
|
||||||
|
domain_id INT DEFAULT NULL,
|
||||||
|
name VARCHAR(255) DEFAULT NULL,
|
||||||
|
type VARCHAR(10) DEFAULT NULL,
|
||||||
|
content VARCHAR(65535) DEFAULT NULL,
|
||||||
|
ttl INT DEFAULT NULL,
|
||||||
|
prio INT DEFAULT NULL,
|
||||||
|
disabled BOOL DEFAULT 'f',
|
||||||
|
ordername VARCHAR(255),
|
||||||
|
auth BOOL DEFAULT 't'
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE INDEX rec_name_index ON records(name);
|
||||||
|
CREATE INDEX nametype_index ON records(name, type);
|
||||||
|
CREATE INDEX domain_id ON records(domain_id);
|
||||||
|
CREATE INDEX ordername ON records(ordername);
|
||||||
|
|
||||||
|
CREATE TABLE supermasters (
|
||||||
|
ip INET NOT NULL,
|
||||||
|
nameserver VARCHAR(255) NOT NULL,
|
||||||
|
account VARCHAR(40) NOT NULL,
|
||||||
|
PRIMARY KEY (ip, nameserver)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE comments (
|
||||||
|
id SERIAL PRIMARY KEY,
|
||||||
|
domain_id INT NOT NULL,
|
||||||
|
name VARCHAR(255) NOT NULL,
|
||||||
|
type VARCHAR(10) NOT NULL,
|
||||||
|
modified_at INT NOT NULL,
|
||||||
|
account VARCHAR(40) DEFAULT NULL,
|
||||||
|
comment VARCHAR(65535) NOT NULL
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE INDEX comments_domain_id_idx ON comments(domain_id);
|
||||||
|
CREATE INDEX comments_name_type_idx ON comments(name, type);
|
||||||
|
CREATE INDEX comments_order_idx ON comments(domain_id, modified_at);
|
||||||
|
|
||||||
|
CREATE TABLE domainmetadata (
|
||||||
|
id SERIAL PRIMARY KEY,
|
||||||
|
domain_id INT NOT NULL,
|
||||||
|
kind VARCHAR(32),
|
||||||
|
content TEXT
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE INDEX domainmetadata_idx ON domainmetadata(domain_id, kind);
|
||||||
|
|
||||||
|
CREATE TABLE cryptokeys (
|
||||||
|
id SERIAL PRIMARY KEY,
|
||||||
|
domain_id INT NOT NULL,
|
||||||
|
flags INT NOT NULL,
|
||||||
|
active BOOL,
|
||||||
|
published BOOL DEFAULT TRUE,
|
||||||
|
content TEXT
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE INDEX domainidindex ON cryptokeys(domain_id);
|
||||||
|
|
||||||
|
CREATE TABLE tsigkeys (
|
||||||
|
id SERIAL PRIMARY KEY,
|
||||||
|
name VARCHAR(255),
|
||||||
|
algorithm VARCHAR(50),
|
||||||
|
secret VARCHAR(255)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE UNIQUE INDEX namealgoindex ON tsigkeys(name, algorithm);
|
||||||
|
|
||||||
|
CREATE TABLE luarecords (
|
||||||
|
id SERIAL PRIMARY KEY,
|
||||||
|
domain_id INT NOT NULL,
|
||||||
|
name VARCHAR(255) NOT NULL,
|
||||||
|
type VARCHAR(10) NOT NULL,
|
||||||
|
content VARCHAR(65535) NOT NULL,
|
||||||
|
ttl INT NOT NULL,
|
||||||
|
prio INT DEFAULT NULL,
|
||||||
|
disabled BOOL DEFAULT 'f',
|
||||||
|
ordername VARCHAR(255),
|
||||||
|
auth BOOL DEFAULT 't'
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE INDEX luarecord_name_index ON luarecords(name);
|
||||||
|
CREATE INDEX luarecord_nametype_index ON luarecords(name, type);
|
||||||
|
CREATE INDEX luarecord_domain_id ON luarecords(domain_id);
|
||||||
|
CREATE INDEX luarecord_ordername ON luarecords(ordername);
|
||||||
@@ -0,0 +1,9 @@
|
|||||||
|
---
|
||||||
|
- name: restart dhcpcd
|
||||||
|
ansible.builtin.shell: |
|
||||||
|
set -euo pipefail
|
||||||
|
dhcpcd -k eth0 || true
|
||||||
|
sleep 1
|
||||||
|
dhcpcd -f /etc/dhcpcd.conf eth0
|
||||||
|
args:
|
||||||
|
executable: /bin/bash
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
```bash
|
||||||
|
cat /etc/resolv.conf
|
||||||
|
getent hosts ntp-edge.infra.hran
|
||||||
|
```
|
||||||
@@ -0,0 +1,9 @@
|
|||||||
|
---
|
||||||
|
- name: render dhcpcd.conf (DNS override)
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: dhcpcd.conf.j2
|
||||||
|
dest: /etc/dhcpcd.conf
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0644"
|
||||||
|
notify: restart dhcpcd
|
||||||
@@ -0,0 +1,45 @@
|
|||||||
|
# A sample configuration for dhcpcd.
|
||||||
|
# See dhcpcd.conf(5) for details.
|
||||||
|
|
||||||
|
# Allow users of this group to interact with dhcpcd via the control socket.
|
||||||
|
#controlgroup wheel
|
||||||
|
|
||||||
|
# Inform the DHCP server of our hostname for DDNS.
|
||||||
|
hostname
|
||||||
|
|
||||||
|
# Use the hardware address of the interface for the Client ID.
|
||||||
|
#clientid
|
||||||
|
# or
|
||||||
|
# Use the same DUID + IAID as set in DHCPv6 for DHCPv4 ClientID as per RFC4361.
|
||||||
|
# Some non-RFC compliant DHCP servers do not reply with this set.
|
||||||
|
# In this case, comment out duid and enable clientid above.
|
||||||
|
duid
|
||||||
|
|
||||||
|
# Persist interface configuration when dhcpcd exits.
|
||||||
|
persistent
|
||||||
|
|
||||||
|
# vendorclassid is set to blank to avoid sending the default of
|
||||||
|
# dhcpcd-<version>:<os>:<machine>:<platform>
|
||||||
|
vendorclassid
|
||||||
|
|
||||||
|
# A list of options to request from the DHCP server.
|
||||||
|
option domain_name_servers, domain_name, domain_search
|
||||||
|
option classless_static_routes
|
||||||
|
# Respect the network MTU. This is applied to DHCP routes.
|
||||||
|
option interface_mtu
|
||||||
|
|
||||||
|
# Request a hostname from the network
|
||||||
|
option host_name
|
||||||
|
|
||||||
|
# Most distributions have NTP support.
|
||||||
|
#option ntp_servers
|
||||||
|
|
||||||
|
# A ServerID is required by RFC2131.
|
||||||
|
require dhcp_server_identifier
|
||||||
|
|
||||||
|
# Generate SLAAC address using the Hardware Address of the interface
|
||||||
|
#slaac hwaddr
|
||||||
|
# OR generate Stable Private IPv6 Addresses based from the DUID
|
||||||
|
slaac private
|
||||||
|
|
||||||
|
static domain_name_servers=192.168.0.100 1.1.1.1 8.8.8.8
|
||||||
4
ansible/roles/docker/handlers/main.yml
Normal file
4
ansible/roles/docker/handlers/main.yml
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
---
|
||||||
|
- name: update apt cache
|
||||||
|
apt:
|
||||||
|
update_cache: yes
|
||||||
74
ansible/roles/docker/tasks/main.yml
Normal file
74
ansible/roles/docker/tasks/main.yml
Normal file
@@ -0,0 +1,74 @@
|
|||||||
|
---
|
||||||
|
# 1) Чистим потенциально битый repo-файл (как у тебя было)
|
||||||
|
- name: remove broken docker repo if exists
|
||||||
|
file:
|
||||||
|
path: /etc/apt/sources.list.d/docker.list
|
||||||
|
state: absent
|
||||||
|
|
||||||
|
# 2) Минимум нужных пакетов
|
||||||
|
- name: install prerequisites
|
||||||
|
apt:
|
||||||
|
name:
|
||||||
|
- ca-certificates
|
||||||
|
- curl
|
||||||
|
- gnupg
|
||||||
|
state: present
|
||||||
|
update_cache: yes
|
||||||
|
|
||||||
|
# 3) Keyring + ключ
|
||||||
|
- name: ensure keyrings dir exists
|
||||||
|
file:
|
||||||
|
path: /etc/apt/keyrings
|
||||||
|
state: directory
|
||||||
|
mode: "0755"
|
||||||
|
|
||||||
|
- name: download docker GPG key
|
||||||
|
get_url:
|
||||||
|
url: https://download.docker.com/linux/debian/gpg
|
||||||
|
dest: /etc/apt/keyrings/docker.gpg
|
||||||
|
mode: "0644"
|
||||||
|
|
||||||
|
# 4) Repo (архитектура через ansible_architecture -> amd64)
|
||||||
|
- name: add docker apt repository
|
||||||
|
copy:
|
||||||
|
dest: /etc/apt/sources.list.d/docker.list
|
||||||
|
content: |
|
||||||
|
deb [arch={{ 'amd64' if ansible_architecture in ['x86_64','amd64'] else ansible_architecture }} signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian {{ ansible_lsb.codename }} stable
|
||||||
|
|
||||||
|
# 5) Пробуем поставить containerd.io, перебирая версии (и сразу держим)
|
||||||
|
- name: install first working containerd.io (skip broken versions) and hold
|
||||||
|
shell: |
|
||||||
|
set -euo pipefail
|
||||||
|
apt-get update
|
||||||
|
mapfile -t versions < <(apt-cache madison containerd.io | awk '{print $3}' | sort -V | tac)
|
||||||
|
|
||||||
|
for v in "${versions[@]}"; do
|
||||||
|
echo "Trying containerd.io=$v"
|
||||||
|
if apt-get install -y "containerd.io=$v"; then
|
||||||
|
apt-mark hold containerd.io
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "No working containerd.io version found in repo"
|
||||||
|
exit 1
|
||||||
|
args:
|
||||||
|
executable: /bin/bash
|
||||||
|
changed_when: true
|
||||||
|
|
||||||
|
# 6) Docker пакеты (containerd.io уже стоит/held)
|
||||||
|
- name: install docker packages
|
||||||
|
apt:
|
||||||
|
name:
|
||||||
|
- docker-ce
|
||||||
|
- docker-ce-cli
|
||||||
|
- docker-buildx-plugin
|
||||||
|
- docker-compose-plugin
|
||||||
|
state: present
|
||||||
|
update_cache: yes
|
||||||
|
|
||||||
|
- name: enable & start docker service
|
||||||
|
service:
|
||||||
|
name: docker
|
||||||
|
state: started
|
||||||
|
enabled: yes
|
||||||
109
ansible/roles/gitea/README.md
Normal file
109
ansible/roles/gitea/README.md
Normal file
@@ -0,0 +1,109 @@
|
|||||||
|
# Gitea Setup Notes
|
||||||
|
|
||||||
|
## 1️⃣ Добавление HTTPS сертификата (Let's Encrypt + Nginx)
|
||||||
|
|
||||||
|
### Установка certbot
|
||||||
|
ставим certbot на хост (НЕ в контейнер)
|
||||||
|
|
||||||
|
``` bash
|
||||||
|
sudo apt update
|
||||||
|
sudo apt install certbot python3-certbot-nginx -y
|
||||||
|
```
|
||||||
|
|
||||||
|
### Базовый nginx конфиг (HTTP → прокси в Gitea)
|
||||||
|
|
||||||
|
Файл: `./nginx/nginx.conf`
|
||||||
|
|
||||||
|
``` nginx
|
||||||
|
server {
|
||||||
|
listen 80;
|
||||||
|
server_name gitea.quietblock.net;
|
||||||
|
|
||||||
|
location / {
|
||||||
|
proxy_pass http://gitea:3000;
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Получение сертификата
|
||||||
|
|
||||||
|
``` bash
|
||||||
|
sudo certbot certonly --standalone -d gitea.quietblock.net
|
||||||
|
```
|
||||||
|
|
||||||
|
Запрашивает SSL сертификат для домена через standalone режим.
|
||||||
|
|
||||||
|
После успеха сертификаты будут:
|
||||||
|
|
||||||
|
/etc/letsencrypt/live/gitea.quietblock.net/fullchain.pem
|
||||||
|
/etc/letsencrypt/live/gitea.quietblock.net/privkey.pem
|
||||||
|
|
||||||
|
### Docker nginx сервис
|
||||||
|
|
||||||
|
``` yaml
|
||||||
|
nginx:
|
||||||
|
image: nginx:stable
|
||||||
|
container_name: nginx
|
||||||
|
restart: always
|
||||||
|
|
||||||
|
ports:
|
||||||
|
- "80:80"
|
||||||
|
- "443:443"
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
- ./nginx:/etc/nginx/conf.d
|
||||||
|
- /etc/letsencrypt:/etc/letsencrypt:ro
|
||||||
|
|
||||||
|
depends_on:
|
||||||
|
- gitea
|
||||||
|
```
|
||||||
|
|
||||||
|
### Финальный nginx конфиг (HTTP → HTTPS + SSL)
|
||||||
|
|
||||||
|
``` nginx
|
||||||
|
server {
|
||||||
|
listen 80;
|
||||||
|
server_name gitea.quietblock.net;
|
||||||
|
return 301 https://$host$request_uri;
|
||||||
|
}
|
||||||
|
|
||||||
|
server {
|
||||||
|
listen 443 ssl;
|
||||||
|
server_name gitea.quietblock.net;
|
||||||
|
|
||||||
|
ssl_certificate /etc/letsencrypt/live/gitea.quietblock.net/fullchain.pem;
|
||||||
|
ssl_certificate_key /etc/letsencrypt/live/gitea.quietblock.net/privkey.pem;
|
||||||
|
|
||||||
|
location / {
|
||||||
|
proxy_pass http://gitea:3000;
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-Proto https;
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Что происходит: - HTTP редиректится на HTTPS - nginx использует SSL
|
||||||
|
сертификаты - HTTPS проксируется в контейнер gitea
|
||||||
|
|
||||||
|
------------------------------------------------------------------------
|
||||||
|
|
||||||
|
## 2️⃣ Создание администратора в Gitea
|
||||||
|
|
||||||
|
### Зайти внутрь контейнера
|
||||||
|
|
||||||
|
``` bash
|
||||||
|
docker exec -it --user git gitea /bin/bash
|
||||||
|
```
|
||||||
|
|
||||||
|
Открывает shell внутри контейнера gitea от пользователя git.
|
||||||
|
|
||||||
|
### Создать администратора
|
||||||
|
|
||||||
|
``` bash
|
||||||
|
gitea admin user create --username adminuser --password 14881488 --email you@mail.com --admin
|
||||||
|
```
|
||||||
23
ansible/roles/gitea/tasks/main.yml
Normal file
23
ansible/roles/gitea/tasks/main.yml
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
- name: ensure directory structure exists
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ item }}"
|
||||||
|
state: directory
|
||||||
|
owner: "root"
|
||||||
|
group: "root"
|
||||||
|
mode: "0755"
|
||||||
|
loop:
|
||||||
|
- "/opt/gitea"
|
||||||
|
- "/opt/gitea/nginx"
|
||||||
|
|
||||||
|
- name: render stack files
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: "{{ item.src }}"
|
||||||
|
dest: "/opt/gitea/{{ item.dest }}"
|
||||||
|
owner: "root"
|
||||||
|
group: "root"
|
||||||
|
mode: "0644"
|
||||||
|
loop:
|
||||||
|
- { src: "docker-compose.yml.j2", dest: "docker-compose.yml" }
|
||||||
|
- { src: ".env.j2", dest: ".env", mode: "0600" }
|
||||||
|
- { src: "nginx/nginx.conf.j2", dest: "nginx/nginx.conf" }
|
||||||
|
register: rendered
|
||||||
78
ansible/roles/gitea/templates/docker-compose.yml.j2
Normal file
78
ansible/roles/gitea/templates/docker-compose.yml.j2
Normal file
@@ -0,0 +1,78 @@
|
|||||||
|
version: "3.9"
|
||||||
|
|
||||||
|
services:
|
||||||
|
postgres:
|
||||||
|
image: postgres:15
|
||||||
|
container_name: postgres
|
||||||
|
restart: always
|
||||||
|
|
||||||
|
environment:
|
||||||
|
POSTGRES_DB: ${POSTGRES_DB}
|
||||||
|
POSTGRES_USER: ${POSTGRES_USER}
|
||||||
|
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
- ./data/postgres:/var/lib/postgresql/data
|
||||||
|
|
||||||
|
networks:
|
||||||
|
- gitea_net
|
||||||
|
|
||||||
|
gitea:
|
||||||
|
image: gitea/gitea:latest
|
||||||
|
container_name: gitea
|
||||||
|
restart: always
|
||||||
|
|
||||||
|
environment:
|
||||||
|
- USER_UID=1000
|
||||||
|
- USER_GID=1000
|
||||||
|
|
||||||
|
# DB
|
||||||
|
- GITEA__database__DB_TYPE=postgres
|
||||||
|
- GITEA__database__HOST=postgres:5432
|
||||||
|
- GITEA__database__NAME=${POSTGRES_DB}
|
||||||
|
- GITEA__database__USER=${POSTGRES_USER}
|
||||||
|
- GITEA__database__PASSWD=${POSTGRES_PASSWORD}
|
||||||
|
|
||||||
|
# basic
|
||||||
|
- GITEA__server__DOMAIN=${GITEA_URL}
|
||||||
|
- GITEA__server__ROOT_URL=https://${GITEA_URL}/
|
||||||
|
- GITEA__server__SSH_DOMAIN=${GITEA_URL}
|
||||||
|
- GITEA__server__HTTP_PORT=3000
|
||||||
|
- GITEA__server__SSH_PORT=2222
|
||||||
|
|
||||||
|
# security
|
||||||
|
- GITEA__security__INSTALL_LOCK=true
|
||||||
|
- GITEA__service__DISABLE_REGISTRATION=true
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
- ./data/gitea:/data
|
||||||
|
- /etc/timezone:/etc/timezone:ro
|
||||||
|
- /etc/localtime:/etc/localtime:ro
|
||||||
|
|
||||||
|
depends_on:
|
||||||
|
- postgres
|
||||||
|
|
||||||
|
networks:
|
||||||
|
- gitea_net
|
||||||
|
|
||||||
|
nginx:
|
||||||
|
image: nginx:stable
|
||||||
|
container_name: nginx
|
||||||
|
restart: always
|
||||||
|
|
||||||
|
ports:
|
||||||
|
- "80:80"
|
||||||
|
- "443:443"
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
- ./nginx:/etc/nginx/conf.d
|
||||||
|
- /etc/letsencrypt:/etc/letsencrypt:ro
|
||||||
|
|
||||||
|
depends_on:
|
||||||
|
- gitea
|
||||||
|
|
||||||
|
networks:
|
||||||
|
- gitea_net
|
||||||
|
|
||||||
|
networks:
|
||||||
|
gitea_net:
|
||||||
23
ansible/roles/gitea/templates/nginx/nginx.conf.j2
Normal file
23
ansible/roles/gitea/templates/nginx/nginx.conf.j2
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
server {
|
||||||
|
listen 80;
|
||||||
|
server_name gitea.quietblock.net;
|
||||||
|
return 301 https://$host$request_uri;
|
||||||
|
}
|
||||||
|
|
||||||
|
server {
|
||||||
|
listen 443 ssl;
|
||||||
|
server_name gitea.quietblock.net;
|
||||||
|
|
||||||
|
ssl_certificate /etc/letsencrypt/live/gitea.quietblock.net/fullchain.pem;
|
||||||
|
ssl_certificate_key /etc/letsencrypt/live/gitea.quietblock.net/privkey.pem;
|
||||||
|
|
||||||
|
location / {
|
||||||
|
proxy_pass http://gitea:3000;
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-Proto https;
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
}
|
||||||
|
|
||||||
|
client_max_body_size 50M;
|
||||||
|
}
|
||||||
14
ansible/roles/harden/fail2ban/handlers/main.yml
Normal file
14
ansible/roles/harden/fail2ban/handlers/main.yml
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
---
|
||||||
|
- name: validate fail2ban config
|
||||||
|
listen: "validate and restart fail2ban"
|
||||||
|
become: true
|
||||||
|
ansible.builtin.command: fail2ban-client -t
|
||||||
|
register: f2b_validate
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: restart fail2ban
|
||||||
|
listen: "validate and restart fail2ban"
|
||||||
|
become: true
|
||||||
|
ansible.builtin.systemd:
|
||||||
|
name: fail2ban
|
||||||
|
state: restarted
|
||||||
58
ansible/roles/harden/fail2ban/tasks/main.yml
Normal file
58
ansible/roles/harden/fail2ban/tasks/main.yml
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
---
|
||||||
|
- name: install fail2ban + deps
|
||||||
|
ansible.builtin.apt:
|
||||||
|
name:
|
||||||
|
- fail2ban
|
||||||
|
- python3
|
||||||
|
- python3-systemd
|
||||||
|
- nftables
|
||||||
|
state: present
|
||||||
|
update_cache: true
|
||||||
|
become: true
|
||||||
|
|
||||||
|
- name: enable & start nftables
|
||||||
|
ansible.builtin.systemd:
|
||||||
|
name: nftables
|
||||||
|
enabled: true
|
||||||
|
state: started
|
||||||
|
become: true
|
||||||
|
|
||||||
|
- name: ensure fail2ban directories exist
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ item }}"
|
||||||
|
state: directory
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0755"
|
||||||
|
loop:
|
||||||
|
- /etc/fail2ban
|
||||||
|
- /etc/fail2ban/jail.d
|
||||||
|
- /etc/fail2ban/filter.d
|
||||||
|
become: true
|
||||||
|
|
||||||
|
- name: deploy /etc/fail2ban/fail2ban.local
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: fail2ban.local.j2
|
||||||
|
dest: /etc/fail2ban/fail2ban.local
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0644"
|
||||||
|
notify: validate and restart fail2ban
|
||||||
|
become: true
|
||||||
|
|
||||||
|
- name: deploy /etc/fail2ban/jail.local
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: jail.local.j2
|
||||||
|
dest: /etc/fail2ban/jail.local
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0644"
|
||||||
|
notify: validate and restart fail2ban
|
||||||
|
become: true
|
||||||
|
|
||||||
|
- name: ensure fail2ban enabled and started
|
||||||
|
ansible.builtin.systemd:
|
||||||
|
name: fail2ban
|
||||||
|
enabled: true
|
||||||
|
state: started
|
||||||
|
become: true
|
||||||
@@ -0,0 +1,6 @@
|
|||||||
|
[Definition]
|
||||||
|
loglevel = INFO
|
||||||
|
logtarget = /var/log/fail2ban.log
|
||||||
|
socket = /run/fail2ban/fail2ban.sock
|
||||||
|
pidfile = /run/fail2ban/fail2ban.pid
|
||||||
|
dbpurgeage = 86400
|
||||||
18
ansible/roles/harden/fail2ban/templates/jail.local.j2
Normal file
18
ansible/roles/harden/fail2ban/templates/jail.local.j2
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
[DEFAULT]
|
||||||
|
ignoreip = 127.0.0.1/8 ::1
|
||||||
|
|
||||||
|
findtime = 600
|
||||||
|
maxretry = 5
|
||||||
|
bantime = 1h
|
||||||
|
|
||||||
|
backend = systemd
|
||||||
|
banaction = nftables[type=multiport]
|
||||||
|
|
||||||
|
[sshd]
|
||||||
|
enabled = true
|
||||||
|
port = 25105
|
||||||
|
filter = sshd
|
||||||
|
maxretry = 5
|
||||||
|
findtime = 600
|
||||||
|
bantime = 1h
|
||||||
|
mode = aggressive
|
||||||
12
ansible/roles/harden/nftables/handlers/main.yml
Normal file
12
ansible/roles/harden/nftables/handlers/main.yml
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
---
|
||||||
|
- name: validate nftables config
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: nft -c -f /etc/nftables.conf
|
||||||
|
listen: apply nftables
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: reload nftables
|
||||||
|
ansible.builtin.systemd:
|
||||||
|
name: nftables
|
||||||
|
state: reloaded
|
||||||
|
listen: apply nftables
|
||||||
22
ansible/roles/harden/nftables/tasks/main.yml
Normal file
22
ansible/roles/harden/nftables/tasks/main.yml
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
---
|
||||||
|
- name: install nftables
|
||||||
|
ansible.builtin.apt:
|
||||||
|
name: nftables
|
||||||
|
state: present
|
||||||
|
update_cache: true
|
||||||
|
notify: apply nftables
|
||||||
|
|
||||||
|
- name: deploy nftables config
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: "{{ nftables_conf_name }}"
|
||||||
|
dest: /etc/nftables.conf
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0644"
|
||||||
|
notify: apply nftables
|
||||||
|
|
||||||
|
- name: enable and start nftables service
|
||||||
|
ansible.builtin.systemd:
|
||||||
|
name: nftables
|
||||||
|
enabled: true
|
||||||
|
state: started
|
||||||
36
ansible/roles/harden/nftables/templates/proxmox-nftables.j2
Normal file
36
ansible/roles/harden/nftables/templates/proxmox-nftables.j2
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
#!/usr/sbin/nft -f
|
||||||
|
|
||||||
|
flush ruleset
|
||||||
|
|
||||||
|
table inet filter {
|
||||||
|
chain input {
|
||||||
|
type filter hook input priority 0;
|
||||||
|
policy drop;
|
||||||
|
|
||||||
|
iif "lo" accept
|
||||||
|
ct state established,related accept
|
||||||
|
|
||||||
|
# SSH
|
||||||
|
tcp dport {{ ssh_port }} accept
|
||||||
|
|
||||||
|
# ICMP
|
||||||
|
ip protocol icmp accept
|
||||||
|
ip6 nexthdr icmpv6 accept
|
||||||
|
|
||||||
|
# Proxmox Web/API (LAN only)
|
||||||
|
ip saddr 192.168.0.0/24 tcp dport 8006 accept
|
||||||
|
|
||||||
|
# NTP
|
||||||
|
ip saddr 192.168.0.0/24 udp dport {{ ntp_port }} accept
|
||||||
|
}
|
||||||
|
|
||||||
|
chain forward {
|
||||||
|
type filter hook forward priority 0;
|
||||||
|
policy drop;
|
||||||
|
}
|
||||||
|
|
||||||
|
chain output {
|
||||||
|
type filter hook output priority 0;
|
||||||
|
policy accept;
|
||||||
|
}
|
||||||
|
}
|
||||||
32
ansible/roles/harden/nftables/templates/vm-nftables.conf.j2
Normal file
32
ansible/roles/harden/nftables/templates/vm-nftables.conf.j2
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
#!/usr/sbin/nft -f
|
||||||
|
|
||||||
|
flush ruleset
|
||||||
|
|
||||||
|
table inet filter {
|
||||||
|
chain input {
|
||||||
|
type filter hook input priority 0;
|
||||||
|
policy drop;
|
||||||
|
|
||||||
|
iif "lo" accept
|
||||||
|
ct state established,related accept
|
||||||
|
|
||||||
|
# SSH
|
||||||
|
tcp dport {{ ssh_port }} accept
|
||||||
|
|
||||||
|
# udp dport {{ ntp_port }} accept
|
||||||
|
|
||||||
|
# ICMP
|
||||||
|
ip protocol icmp accept
|
||||||
|
ip6 nexthdr icmpv6 accept
|
||||||
|
}
|
||||||
|
|
||||||
|
chain forward {
|
||||||
|
type filter hook forward priority 0;
|
||||||
|
policy drop;
|
||||||
|
}
|
||||||
|
|
||||||
|
chain output {
|
||||||
|
type filter hook output priority 0;
|
||||||
|
policy accept;
|
||||||
|
}
|
||||||
|
}
|
||||||
25
ansible/roles/harden/sshd_config/tasks/main.yml
Normal file
25
ansible/roles/harden/sshd_config/tasks/main.yml
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
---
|
||||||
|
- name: ensure sshd_config.d directory exists
|
||||||
|
become: true
|
||||||
|
file:
|
||||||
|
path: "/etc/ssh/sshd_config.d"
|
||||||
|
state: directory
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0755"
|
||||||
|
|
||||||
|
- name: deploy sshd config file
|
||||||
|
become: true
|
||||||
|
template:
|
||||||
|
src: "00-sshd_config-hardening.conf.j2"
|
||||||
|
dest: "/etc/ssh/sshd_config.d/00-sshd_config-hardening.conf"
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0644"
|
||||||
|
validate: "sshd -t -f %s"
|
||||||
|
|
||||||
|
- name: restart SSH service
|
||||||
|
become: true
|
||||||
|
service:
|
||||||
|
name: ssh
|
||||||
|
state: restarted
|
||||||
@@ -0,0 +1,107 @@
|
|||||||
|
# --- MAIN ---
|
||||||
|
|
||||||
|
# Change default port 22 → {{ ssh_port }} (reduces noise from scanners)
|
||||||
|
Port {{ ssh_port }}
|
||||||
|
|
||||||
|
# Optionally limit interfaces (default is all)
|
||||||
|
# ListenAddress 0.0.0.0 # IPv4
|
||||||
|
# ListenAddress :: # IPv6
|
||||||
|
|
||||||
|
# Allow only SSH protocol version 2 (v1 is insecure)
|
||||||
|
Protocol 2
|
||||||
|
|
||||||
|
|
||||||
|
# --- AUTHENTICATION ---
|
||||||
|
|
||||||
|
# Disable root login (only via sudo)
|
||||||
|
PermitRootLogin prohibit-password
|
||||||
|
|
||||||
|
# Disable password login (keys only)
|
||||||
|
PasswordAuthentication no
|
||||||
|
|
||||||
|
# Disable interactive keyboard auth (OTP, TOTP, etc.)
|
||||||
|
KbdInteractiveAuthentication no
|
||||||
|
|
||||||
|
# Disable challenge-response auth (legacy)
|
||||||
|
ChallengeResponseAuthentication no
|
||||||
|
|
||||||
|
# Enable public key authentication (main method)
|
||||||
|
PubkeyAuthentication yes
|
||||||
|
|
||||||
|
|
||||||
|
# --- ACCESS ---
|
||||||
|
|
||||||
|
# Allow only specific user
|
||||||
|
# AllowUsers adminuser
|
||||||
|
# Or alternatively allow a group:
|
||||||
|
# AllowGroups sshusers
|
||||||
|
|
||||||
|
|
||||||
|
# --- FUNCTION RESTRICTIONS ---
|
||||||
|
|
||||||
|
# Disallow empty passwords
|
||||||
|
PermitEmptyPasswords no
|
||||||
|
|
||||||
|
# Disallow user environment modification (~/.ssh/environment)
|
||||||
|
PermitUserEnvironment no
|
||||||
|
|
||||||
|
# Disable X11 forwarding (no GUI sessions)
|
||||||
|
X11Forwarding no
|
||||||
|
|
||||||
|
# Disable TCP forwarding (no tunnels)
|
||||||
|
AllowTcpForwarding yes
|
||||||
|
|
||||||
|
# Disable gateway ports (no external binding)
|
||||||
|
GatewayPorts no
|
||||||
|
|
||||||
|
# Disable VPN tunnels via SSH
|
||||||
|
PermitTunnel no
|
||||||
|
|
||||||
|
# Disable SSH agent forwarding
|
||||||
|
AllowAgentForwarding yes
|
||||||
|
|
||||||
|
|
||||||
|
# --- ANTI-BRUTEFORCE & STABILITY ---
|
||||||
|
|
||||||
|
# Login timeout (20 seconds)
|
||||||
|
LoginGraceTime 20
|
||||||
|
|
||||||
|
# Max 3 auth attempts per connection
|
||||||
|
MaxAuthTries 3
|
||||||
|
|
||||||
|
# Limit simultaneous connections
|
||||||
|
# Allow 10 new, start dropping at 30, max 60 queued
|
||||||
|
MaxStartups 10:30:60
|
||||||
|
|
||||||
|
|
||||||
|
# --- SESSION ACTIVITY ---
|
||||||
|
|
||||||
|
# Ping client every 300s (5 minutes)
|
||||||
|
ClientAliveInterval 300
|
||||||
|
|
||||||
|
# Disconnect if no response twice
|
||||||
|
ClientAliveCountMax 2
|
||||||
|
|
||||||
|
# Disable TCP keepalive
|
||||||
|
TCPKeepAlive no
|
||||||
|
|
||||||
|
# Skip DNS checks for faster login
|
||||||
|
UseDNS no
|
||||||
|
|
||||||
|
|
||||||
|
# --- SFTP ---
|
||||||
|
|
||||||
|
# Use internal SFTP subsystem
|
||||||
|
Subsystem sftp internal-sftp
|
||||||
|
|
||||||
|
|
||||||
|
# --- CRYPTOGRAPHY (optional) ---
|
||||||
|
|
||||||
|
# Modern key exchange algorithms (if supported)
|
||||||
|
# KexAlgorithms sntrup761x25519-sha512@openssh.com,curve25519-sha256
|
||||||
|
|
||||||
|
# Modern ciphers
|
||||||
|
# Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes256-ctr
|
||||||
|
|
||||||
|
# Modern MAC algorithms
|
||||||
|
# MACs umac-128-etm@openssh.com,hmac-sha2-256-etm@openssh.com
|
||||||
15
ansible/roles/harden/unattended_upgrades/handlers/main.yml
Normal file
15
ansible/roles/harden/unattended_upgrades/handlers/main.yml
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
---
|
||||||
|
- name: restart unattended-upgrades
|
||||||
|
ansible.builtin.service:
|
||||||
|
name: unattended-upgrades
|
||||||
|
state: restarted
|
||||||
|
enabled: true
|
||||||
|
|
||||||
|
- name: restart apt timers
|
||||||
|
ansible.builtin.systemd:
|
||||||
|
name: "{{ item }}"
|
||||||
|
state: restarted
|
||||||
|
enabled: true
|
||||||
|
loop:
|
||||||
|
- apt-daily.timer
|
||||||
|
- apt-daily-upgrade.timer
|
||||||
17
ansible/roles/harden/unattended_upgrades/readme.md
Normal file
17
ansible/roles/harden/unattended_upgrades/readme.md
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
```bash
|
||||||
|
## Проверить, что таймеры включены и “тикают”
|
||||||
|
systemctl status apt-daily.timer apt-daily-upgrade.timer
|
||||||
|
systemctl list-timers --all | egrep 'apt-daily|apt-daily-upgrade'
|
||||||
|
|
||||||
|
## Проверить, что unattended-upgrades реально запускался
|
||||||
|
systemctl status unattended-upgrades.service
|
||||||
|
journalctl -u unattended-upgrades --no-pager -n 200
|
||||||
|
|
||||||
|
## Проверить логи и фактические действия
|
||||||
|
ls -l /var/log/unattended-upgrades/
|
||||||
|
tail -n 200 /var/log/unattended-upgrades/unattended-upgrades.log
|
||||||
|
tail -n 200 /var/log/unattended-upgrades/unattended-upgrades-dpkg.log
|
||||||
|
|
||||||
|
## Быстрый “самотест” (прогон в dry-run)
|
||||||
|
unattended-upgrade --dry-run --debug
|
||||||
|
```
|
||||||
49
ansible/roles/harden/unattended_upgrades/tasks/main.yml
Normal file
49
ansible/roles/harden/unattended_upgrades/tasks/main.yml
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
---
|
||||||
|
- name: ensure required packages are present
|
||||||
|
ansible.builtin.apt:
|
||||||
|
name:
|
||||||
|
- unattended-upgrades
|
||||||
|
- apt-listchanges
|
||||||
|
- gpg
|
||||||
|
state: present
|
||||||
|
update_cache: true
|
||||||
|
|
||||||
|
- name: ensure debian-security repo is present
|
||||||
|
ansible.builtin.apt_repository:
|
||||||
|
repo: >-
|
||||||
|
deb http://deb.debian.org/debian-security
|
||||||
|
{{ ansible_facts.lsb.codename | default(ansible_facts.distribution_release) }}-security
|
||||||
|
main contrib non-free non-free-firmware
|
||||||
|
state: present
|
||||||
|
filename: debian-security
|
||||||
|
update_cache: true
|
||||||
|
notify: restart apt timers
|
||||||
|
|
||||||
|
- name: deploy /etc/apt/apt.conf.d/50unattended-upgrades
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: 50unattended-upgrades.j2
|
||||||
|
dest: /etc/apt/apt.conf.d/50unattended-upgrades
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0644"
|
||||||
|
notify: restart unattended-upgrades
|
||||||
|
|
||||||
|
- name: deploy /etc/apt/apt.conf.d/20auto-upgrades
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: 20auto-upgrades.j2
|
||||||
|
dest: /etc/apt/apt.conf.d/20auto-upgrades
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0644"
|
||||||
|
notify:
|
||||||
|
- restart unattended-upgrades
|
||||||
|
- restart apt timers
|
||||||
|
|
||||||
|
- name: enable & start apt timers
|
||||||
|
ansible.builtin.systemd:
|
||||||
|
name: "{{ item }}"
|
||||||
|
state: started
|
||||||
|
enabled: true
|
||||||
|
loop:
|
||||||
|
- apt-daily.timer
|
||||||
|
- apt-daily-upgrade.timer
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
APT::Periodic::Update-Package-Lists "1";
|
||||||
|
APT::Periodic::Download-Upgradeable-Packages "1";
|
||||||
|
APT::Periodic::Unattended-Upgrade "1";
|
||||||
|
APT::Periodic::AutocleanInterval "7";
|
||||||
@@ -0,0 +1,10 @@
|
|||||||
|
Unattended-Upgrade::Origins-Pattern {
|
||||||
|
"origin=Debian,codename=${distro_codename}-security";
|
||||||
|
};
|
||||||
|
|
||||||
|
Unattended-Upgrade::Automatic-Reboot "false";
|
||||||
|
Unattended-Upgrade::Automatic-Reboot-Time "03:30";
|
||||||
|
Unattended-Upgrade::Automatic-Reboot-WithUsers "false";
|
||||||
|
|
||||||
|
Unattended-Upgrade::Remove-Unused-Dependencies "true";
|
||||||
|
Unattended-Upgrade::MinimalSteps "true";
|
||||||
8
ansible/roles/k8s/install/00_python/tasks/main.yml
Normal file
8
ansible/roles/k8s/install/00_python/tasks/main.yml
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
---
|
||||||
|
- name: Ensure required Python libraries are installed
|
||||||
|
ansible.builtin.apt:
|
||||||
|
name:
|
||||||
|
- python3-pip
|
||||||
|
- python3-kubernetes
|
||||||
|
state: present
|
||||||
|
update_cache: yes
|
||||||
3
ansible/roles/k8s/install/01_helm/install-helm.md
Normal file
3
ansible/roles/k8s/install/01_helm/install-helm.md
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
```bash
|
||||||
|
curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash
|
||||||
|
```
|
||||||
20
ansible/roles/k8s/install/01_helm/tasks/main.yml
Normal file
20
ansible/roles/k8s/install/01_helm/tasks/main.yml
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
---
|
||||||
|
- name: Download Helm install script
|
||||||
|
ansible.builtin.get_url:
|
||||||
|
url: https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3
|
||||||
|
dest: /tmp/get-helm-3.sh
|
||||||
|
mode: '0755'
|
||||||
|
|
||||||
|
- name: Install Helm
|
||||||
|
ansible.builtin.command: /tmp/get-helm-3.sh
|
||||||
|
args:
|
||||||
|
creates: /usr/local/bin/helm
|
||||||
|
|
||||||
|
- name: Verify Helm installation
|
||||||
|
ansible.builtin.command: helm version
|
||||||
|
register: helm_version_output
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: Show Helm version
|
||||||
|
ansible.builtin.debug:
|
||||||
|
var: helm_version_output.stdout
|
||||||
172
ansible/roles/k8s/install/02_common/tasks/main.yml
Normal file
172
ansible/roles/k8s/install/02_common/tasks/main.yml
Normal file
@@ -0,0 +1,172 @@
|
|||||||
|
# roles/k8s/k8scommon/tasks/main.yml
|
||||||
|
---
|
||||||
|
# === 1. Обновление пакетов и базовые утилиты ===
|
||||||
|
- name: Install base packages
|
||||||
|
ansible.builtin.apt:
|
||||||
|
update_cache: yes
|
||||||
|
name:
|
||||||
|
- apt-transport-https
|
||||||
|
- ca-certificates
|
||||||
|
- curl
|
||||||
|
- gnupg
|
||||||
|
- lsb-release
|
||||||
|
state: present
|
||||||
|
|
||||||
|
# === 2. Отключить swap ===
|
||||||
|
- name: Disable swap immediately
|
||||||
|
ansible.builtin.command: swapoff -a
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: Backup fstab
|
||||||
|
ansible.builtin.copy:
|
||||||
|
src: /etc/fstab
|
||||||
|
dest: /etc/fstab.bak
|
||||||
|
remote_src: yes
|
||||||
|
force: no
|
||||||
|
|
||||||
|
- name: Comment out swap entries in fstab
|
||||||
|
ansible.builtin.replace:
|
||||||
|
path: /etc/fstab
|
||||||
|
regexp: '^\s*([^#].*\s+swap\s+.*)$'
|
||||||
|
replace: '# \1'
|
||||||
|
|
||||||
|
# === 3. Модули ядра ===
|
||||||
|
- name: Write kernel modules config for Kubernetes
|
||||||
|
ansible.builtin.copy:
|
||||||
|
dest: /etc/modules-load.d/k8s.conf
|
||||||
|
content: |
|
||||||
|
overlay
|
||||||
|
br_netfilter
|
||||||
|
|
||||||
|
- name: Load overlay module
|
||||||
|
ansible.builtin.command: modprobe overlay
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: Load br_netfilter module
|
||||||
|
ansible.builtin.command: modprobe br_netfilter
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
# === 4. sysctl для Kubernetes / containerd ===
|
||||||
|
- name: Configure Kubernetes sysctl params
|
||||||
|
ansible.builtin.copy:
|
||||||
|
dest: /etc/sysctl.d/99-kubernetes-cri.conf
|
||||||
|
content: |
|
||||||
|
net.bridge.bridge-nf-call-iptables = 1
|
||||||
|
net.bridge.bridge-nf-call-ip6tables = 1
|
||||||
|
net.ipv4.ip_forward = 1
|
||||||
|
|
||||||
|
- name: Apply sysctl settings
|
||||||
|
ansible.builtin.command: sysctl --system
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
# === 5. Установить containerd ===
|
||||||
|
- name: Install containerd
|
||||||
|
ansible.builtin.apt:
|
||||||
|
update_cache: yes
|
||||||
|
name: containerd
|
||||||
|
state: present
|
||||||
|
|
||||||
|
- name: Ensure containerd config directory exists
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: /etc/containerd
|
||||||
|
state: directory
|
||||||
|
mode: '0755'
|
||||||
|
|
||||||
|
# ВАЖНО: всегда пересоздаём config.toml, как в manual script
|
||||||
|
- name: Generate default containerd config (overwrite)
|
||||||
|
ansible.builtin.shell: |
|
||||||
|
set -o errexit
|
||||||
|
containerd config default > /etc/containerd/config.toml
|
||||||
|
args:
|
||||||
|
executable: /bin/bash
|
||||||
|
|
||||||
|
- name: Enable SystemdCgroup in containerd config
|
||||||
|
ansible.builtin.replace:
|
||||||
|
path: /etc/containerd/config.toml
|
||||||
|
regexp: 'SystemdCgroup = false'
|
||||||
|
replace: 'SystemdCgroup = true'
|
||||||
|
|
||||||
|
- name: Set correct CNI bin_dir in containerd config
|
||||||
|
ansible.builtin.replace:
|
||||||
|
path: /etc/containerd/config.toml
|
||||||
|
regexp: 'bin_dir = .*'
|
||||||
|
replace: 'bin_dir = "/opt/cni/bin"'
|
||||||
|
|
||||||
|
- name: Set correct CNI conf_dir in containerd config
|
||||||
|
ansible.builtin.replace:
|
||||||
|
path: /etc/containerd/config.toml
|
||||||
|
regexp: 'conf_dir = .*'
|
||||||
|
replace: 'conf_dir = "/etc/cni/net.d"'
|
||||||
|
|
||||||
|
- name: Enable and restart containerd
|
||||||
|
ansible.builtin.systemd:
|
||||||
|
name: containerd
|
||||||
|
enabled: true
|
||||||
|
state: restarted
|
||||||
|
|
||||||
|
# === 6. Подготовить директории для CNI ===
|
||||||
|
- name: Ensure CNI directories exist
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ item }}"
|
||||||
|
state: directory
|
||||||
|
mode: '0755'
|
||||||
|
loop:
|
||||||
|
- /opt/cni/bin
|
||||||
|
- /etc/cni/net.d
|
||||||
|
|
||||||
|
# /usr/lib/cni → /opt/cni/bin, только если /usr/lib/cni не существует
|
||||||
|
- name: Check if /usr/lib/cni exists
|
||||||
|
ansible.builtin.stat:
|
||||||
|
path: /usr/lib/cni
|
||||||
|
register: cni_usr_lib
|
||||||
|
|
||||||
|
- name: Create symlink /usr/lib/cni -> /opt/cni/bin (if not exists)
|
||||||
|
ansible.builtin.file:
|
||||||
|
src: /opt/cni/bin
|
||||||
|
dest: /usr/lib/cni
|
||||||
|
state: link
|
||||||
|
when: not cni_usr_lib.stat.exists
|
||||||
|
|
||||||
|
# === 7. Репозиторий Kubernetes v1.34 ===
|
||||||
|
- name: Ensure apt keyrings directory exists
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: /etc/apt/keyrings
|
||||||
|
state: directory
|
||||||
|
mode: '0755'
|
||||||
|
|
||||||
|
- name: Download Kubernetes repo key
|
||||||
|
ansible.builtin.shell: |
|
||||||
|
set -o errexit
|
||||||
|
curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.34/deb/Release.key \
|
||||||
|
| gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
|
||||||
|
args:
|
||||||
|
executable: /bin/bash
|
||||||
|
creates: /etc/apt/keyrings/kubernetes-apt-keyring.gpg
|
||||||
|
|
||||||
|
- name: Add Kubernetes apt repository
|
||||||
|
ansible.builtin.copy:
|
||||||
|
dest: /etc/apt/sources.list.d/kubernetes.list
|
||||||
|
content: |
|
||||||
|
deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.34/deb/ /
|
||||||
|
|
||||||
|
- name: Update apt cache after adding Kubernetes repo
|
||||||
|
ansible.builtin.apt:
|
||||||
|
update_cache: yes
|
||||||
|
|
||||||
|
# === 8. Установить kubelet, kubeadm, kubectl и зафиксировать версии ===
|
||||||
|
- name: Install kubelet, kubeadm, kubectl
|
||||||
|
ansible.builtin.apt:
|
||||||
|
name:
|
||||||
|
- kubelet
|
||||||
|
- kubeadm
|
||||||
|
- kubectl
|
||||||
|
state: present
|
||||||
|
update_cache: yes
|
||||||
|
|
||||||
|
- name: Hold Kubernetes packages
|
||||||
|
ansible.builtin.command: apt-mark hold kubelet kubeadm kubectl
|
||||||
|
register: hold_result
|
||||||
|
changed_when: >-
|
||||||
|
'hold' in hold_result.stdout
|
||||||
|
or 'marked' in hold_result.stdout
|
||||||
|
or hold_result.rc == 0
|
||||||
136
ansible/roles/k8s/install/03_master/tasks/main.yml
Normal file
136
ansible/roles/k8s/install/03_master/tasks/main.yml
Normal file
@@ -0,0 +1,136 @@
|
|||||||
|
# roles/k8s/k8smaster/tasks/main.yml
|
||||||
|
---
|
||||||
|
# === 9. kubeadm init (аналог шага 14) ===
|
||||||
|
- name: Initialize Kubernetes control plane (kubeadm init)
|
||||||
|
ansible.builtin.command: >
|
||||||
|
kubeadm init
|
||||||
|
--apiserver-advertise-address={{ ansible_default_ipv4.address }}
|
||||||
|
--pod-network-cidr=10.244.0.0/16
|
||||||
|
args:
|
||||||
|
creates: /etc/kubernetes/admin.conf
|
||||||
|
|
||||||
|
# === 10. kubeconfig для root и пользователя ===
|
||||||
|
- name: Ensure kubeconfig directory for root exists
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: /root/.kube
|
||||||
|
state: directory
|
||||||
|
mode: "0700"
|
||||||
|
|
||||||
|
- name: Copy admin kubeconfig for root
|
||||||
|
ansible.builtin.copy:
|
||||||
|
src: /etc/kubernetes/admin.conf
|
||||||
|
dest: /root/.kube/config
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0600"
|
||||||
|
remote_src: yes
|
||||||
|
|
||||||
|
- name: Ensure kubeconfig directory for user exists
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "/home/adminuser/.kube"
|
||||||
|
state: directory
|
||||||
|
owner: "adminuser"
|
||||||
|
group: "adminuser"
|
||||||
|
mode: "0700"
|
||||||
|
|
||||||
|
- name: Copy admin kubeconfig to user home
|
||||||
|
ansible.builtin.copy:
|
||||||
|
src: /etc/kubernetes/admin.conf
|
||||||
|
dest: "/home/adminuser/.kube/config"
|
||||||
|
owner: "adminuser"
|
||||||
|
group: "adminuser"
|
||||||
|
mode: "0600"
|
||||||
|
remote_src: yes
|
||||||
|
|
||||||
|
# === 11. Ждём API-сервер ===
|
||||||
|
- name: Wait for Kubernetes API to become reachable
|
||||||
|
ansible.builtin.command: kubectl get --raw=/healthz
|
||||||
|
register: api_health
|
||||||
|
until: api_health.rc == 0
|
||||||
|
retries: 30
|
||||||
|
delay: 10
|
||||||
|
environment:
|
||||||
|
KUBECONFIG: /etc/kubernetes/admin.conf
|
||||||
|
|
||||||
|
# === 12. Ставим Flannel CNI (НЕ ждём Ready ноды до него) ===
|
||||||
|
- name: Install Flannel CNI
|
||||||
|
ansible.builtin.command: >
|
||||||
|
kubectl apply --validate=false
|
||||||
|
-f https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml
|
||||||
|
register: flannel_result
|
||||||
|
until: flannel_result.rc == 0
|
||||||
|
retries: 10
|
||||||
|
delay: 6
|
||||||
|
environment:
|
||||||
|
KUBECONFIG: /etc/kubernetes/admin.conf
|
||||||
|
|
||||||
|
- name: Wait for flannel DaemonSet to be Ready
|
||||||
|
ansible.builtin.command: >
|
||||||
|
kubectl -n kube-flannel rollout status daemonset/kube-flannel-ds --timeout=300s
|
||||||
|
register: flannel_rollout
|
||||||
|
until: flannel_rollout.rc == 0
|
||||||
|
retries: 5
|
||||||
|
delay: 15
|
||||||
|
environment:
|
||||||
|
KUBECONFIG: /etc/kubernetes/admin.conf
|
||||||
|
|
||||||
|
# === 13. Теперь ждём, пока нода станет Ready ===
|
||||||
|
- name: Wait for control-plane node to become Ready
|
||||||
|
ansible.builtin.shell: |
|
||||||
|
kubectl get node "$(hostname -s)" \
|
||||||
|
-o jsonpath='{.status.conditions[?(@.type=="Ready")].status}'
|
||||||
|
register: node_ready
|
||||||
|
until: node_ready.stdout == "True"
|
||||||
|
retries: 30
|
||||||
|
delay: 10
|
||||||
|
environment:
|
||||||
|
KUBECONFIG: /etc/kubernetes/admin.conf
|
||||||
|
|
||||||
|
# === 14. Ждём CoreDNS ===
|
||||||
|
- name: Wait for CoreDNS deployment to be Ready
|
||||||
|
ansible.builtin.command: >
|
||||||
|
kubectl -n kube-system rollout status deployment/coredns --timeout=300s
|
||||||
|
register: coredns_rollout
|
||||||
|
until: coredns_rollout.rc == 0
|
||||||
|
retries: 5
|
||||||
|
delay: 15
|
||||||
|
environment:
|
||||||
|
KUBECONFIG: /etc/kubernetes/admin.conf
|
||||||
|
|
||||||
|
# === 14. Разрешаем поды на master (как шаг 18), если нужно ===
|
||||||
|
- name: Allow scheduling pods on control-plane node
|
||||||
|
ansible.builtin.command: >
|
||||||
|
kubectl taint nodes --all node-role.kubernetes.io/control-plane-
|
||||||
|
environment:
|
||||||
|
KUBECONFIG: /etc/kubernetes/admin.conf
|
||||||
|
when: false
|
||||||
|
|
||||||
|
# === 15. Проверка статуса кластера ===
|
||||||
|
- name: Get nodes
|
||||||
|
ansible.builtin.command: kubectl get nodes
|
||||||
|
register: nodes_out
|
||||||
|
environment:
|
||||||
|
KUBECONFIG: /etc/kubernetes/admin.conf
|
||||||
|
|
||||||
|
- name: Show nodes
|
||||||
|
ansible.builtin.debug:
|
||||||
|
var: nodes_out.stdout
|
||||||
|
|
||||||
|
- name: Get all pods in all namespaces
|
||||||
|
ansible.builtin.command: kubectl get pods -A
|
||||||
|
register: pods_out
|
||||||
|
environment:
|
||||||
|
KUBECONFIG: /etc/kubernetes/admin.conf
|
||||||
|
|
||||||
|
- name: Show pods
|
||||||
|
ansible.builtin.debug:
|
||||||
|
var: pods_out.stdout
|
||||||
|
|
||||||
|
# === 16. Вывести join-команду (как шаг 20) ===
|
||||||
|
- name: Get kubeadm join command
|
||||||
|
ansible.builtin.command: kubeadm token create --print-join-command
|
||||||
|
register: join_cmd
|
||||||
|
|
||||||
|
- name: Show join command
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: "Use this command on workers: {{ join_cmd.stdout }}"
|
||||||
13
ansible/roles/k8s/install/04_worker/tasks/main.yml
Normal file
13
ansible/roles/k8s/install/04_worker/tasks/main.yml
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
---
|
||||||
|
# === 2. Join в кластер (аналог kubeadm join в ручном скрипте) ===
|
||||||
|
- name: Join node to Kubernetes cluster
|
||||||
|
ansible.builtin.command: "{{ k8s_kubeadm_join_command }}"
|
||||||
|
args:
|
||||||
|
creates: /etc/kubernetes/kubelet.conf
|
||||||
|
|
||||||
|
# === 3. Убедиться, что kubelet включён и работает ===
|
||||||
|
- name: Ensure kubelet is enabled and running
|
||||||
|
ansible.builtin.systemd:
|
||||||
|
name: kubelet
|
||||||
|
enabled: true
|
||||||
|
state: started
|
||||||
109
ansible/roles/k8s/readme/install-k8scommon.md
Normal file
109
ansible/roles/k8s/readme/install-k8scommon.md
Normal file
@@ -0,0 +1,109 @@
|
|||||||
|
```bash
|
||||||
|
# === Стать root (если ещё не) ===
|
||||||
|
sudo -i
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# === 1. Обновление пакетов и базовые утилиты ===
|
||||||
|
apt-get update -y
|
||||||
|
apt-get install -y apt-transport-https ca-certificates curl gnupg lsb-release
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# === 2. Отключить swap немедленно ===
|
||||||
|
swapoff -a
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# === 3. Убрать swap из /etc/fstab (чтобы не включался после перезагрузки) ===
|
||||||
|
cp /etc/fstab /etc/fstab.bak
|
||||||
|
sed -i '/ swap / s/^/#/' /etc/fstab
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# === 4. Включить модули ядра overlay и br_netfilter ===
|
||||||
|
cat <<EOF >/etc/modules-load.d/k8s.conf
|
||||||
|
overlay
|
||||||
|
br_netfilter
|
||||||
|
EOF
|
||||||
|
|
||||||
|
modprobe overlay
|
||||||
|
modprobe br_netfilter
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# === 5. Настроить sysctl для Kubernetes и containerd ===
|
||||||
|
cat <<EOF >/etc/sysctl.d/99-kubernetes-cri.conf
|
||||||
|
net.bridge.bridge-nf-call-iptables = 1
|
||||||
|
net.bridge.bridge-nf-call-ip6tables = 1
|
||||||
|
net.ipv4.ip_forward = 1
|
||||||
|
EOF
|
||||||
|
|
||||||
|
sysctl --system
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# === 6. Установить containerd ===
|
||||||
|
apt-get install -y containerd
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# === 7. Сгенерировать конфиг containerd и включить SystemdCgroup ===
|
||||||
|
mkdir -p /etc/containerd
|
||||||
|
containerd config default >/etc/containerd/config.toml
|
||||||
|
|
||||||
|
# Включаем SystemdCgroup
|
||||||
|
sed -i 's/SystemdCgroup = false/SystemdCgroup = true/' /etc/containerd/config.toml
|
||||||
|
|
||||||
|
# (Опционально) Убедиться, что пути CNI прописаны как /opt/cni/bin и /etc/cni/net.d
|
||||||
|
sed -i 's@bin_dir = .*@bin_dir = "/opt/cni/bin"@' /etc/containerd/config.toml
|
||||||
|
sed -i 's@conf_dir = .*@conf_dir = "/etc/cni/net.d"@' /etc/containerd/config.toml
|
||||||
|
|
||||||
|
systemctl restart containerd
|
||||||
|
systemctl enable containerd
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# === 8. Подготовить директории для CNI-плагинов ===
|
||||||
|
mkdir -p /opt/cni/bin
|
||||||
|
mkdir -p /etc/cni/net.d
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# === 9. Фикс пути для flannel: /usr/lib/cni → /opt/cni/bin ===
|
||||||
|
# ВАЖНО: если каталог /usr/lib/cni уже существует — ЭТУ команду пропусти
|
||||||
|
ln -s /opt/cni/bin /usr/lib/cni
|
||||||
|
```
|
||||||
|
|
||||||
|
<!-- # === 9. Установить CNI-плагины (официальный набор) ===
|
||||||
|
```bash
|
||||||
|
curl -L -o /tmp/cni-plugins.tgz \
|
||||||
|
"https://github.com/containernetworking/plugins/releases/download/v1.5.1/cni-plugins-linux-amd64-v1.5.1.tgz"
|
||||||
|
|
||||||
|
tar -C /opt/cni/bin -xzvf /tmp/cni-plugins.tgz
|
||||||
|
``` -->
|
||||||
|
|
||||||
|
<!-- # === 10. (Опционально) Симлинк /usr/lib/cni -> /opt/cni/bin, если НЕ существует ===
|
||||||
|
if [ ! -e /usr/lib/cni ]; then
|
||||||
|
ln -s /opt/cni/bin /usr/lib/cni
|
||||||
|
fi -->
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# === 10. Добавить официальный репозиторий Kubernetes (pkgs.k8s.io, ветка v1.34) ===
|
||||||
|
mkdir -p /etc/apt/keyrings
|
||||||
|
|
||||||
|
curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.34/deb/Release.key \
|
||||||
|
| gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
|
||||||
|
|
||||||
|
cat <<EOF >/etc/apt/sources.list.d/kubernetes.list
|
||||||
|
deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.34/deb/ /
|
||||||
|
EOF
|
||||||
|
|
||||||
|
apt-get update -y
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# === 11. Установить kubelet, kubeadm, kubectl и зафиксировать версии ===
|
||||||
|
apt-get install -y kubelet kubeadm kubectl
|
||||||
|
apt-mark hold kubelet kubeadm kubectl
|
||||||
|
```
|
||||||
53
ansible/roles/k8s/readme/install-k8smaster.md
Normal file
53
ansible/roles/k8s/readme/install-k8smaster.md
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
```bash
|
||||||
|
# === 13. Посмотреть IP адреса мастера ===
|
||||||
|
hostname -I
|
||||||
|
|
||||||
|
# Запомни нужный IP (например, 192.168.0.26) и подставь его в следующую команду.
|
||||||
|
# POD CIDR под Flannel — 10.244.0.0/16
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# === 14. Инициализация control-plane (kubeadm init) ===
|
||||||
|
kubeadm init \
|
||||||
|
--apiserver-advertise-address=192.168.0.154 \
|
||||||
|
--pod-network-cidr=10.244.0.0/16
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# === 15. Настроить kubeconfig для root (чтобы kubectl работал без доп. флагов) ===
|
||||||
|
mkdir -p /root/.kube
|
||||||
|
cp /etc/kubernetes/admin.conf /root/.kube/config
|
||||||
|
chown root:root /root/.kube/config
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# === 16. (Опционально) Скопировать kubeconfig обычному пользователю adminuser ===
|
||||||
|
# ЗАМЕНИ adminuser на своё имя пользователя
|
||||||
|
mkdir -p /home/adminuser/.kube
|
||||||
|
cp /etc/kubernetes/admin.conf /home/adminuser/.kube/config
|
||||||
|
chown adminuser:adminuser /home/adminuser/.kube/config
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# === 17. Установить Flannel как CNI-плагин ===
|
||||||
|
kubectl apply -f https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# === 18. (Опционально) Разрешить запуск pod'ов на master (single-node кластер) ===
|
||||||
|
# Если хочешь использовать мастер и как worker:
|
||||||
|
kubectl taint nodes --all node-role.kubernetes.io/control-plane-
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# === 19. Проверить статус кластера ===
|
||||||
|
kubectl get nodes
|
||||||
|
kubectl get pods -A
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# === 20. Получить команду для присоединения worker-узлов ===
|
||||||
|
kubeadm token create --print-join-command
|
||||||
|
|
||||||
|
# Скопируй полностью выведенную команду "kubeadm join ..." — она понадобится на worker.
|
||||||
|
```
|
||||||
14
ansible/roles/k8s/readme/install-k8sworker.md
Normal file
14
ansible/roles/k8s/readme/install-k8sworker.md
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
```bash
|
||||||
|
# === 21. Выполнить join-команду, полученную на мастере ===
|
||||||
|
# Пример (ЭТО ТОЛЬКО ПРИМЕР, ИСПОЛЬЗУЙ СВОЮ КОМАНДУ ИЗ ШАГА 20):
|
||||||
|
|
||||||
|
kubeadm join 192.168.0.154:6443 --token 9jz5xr.xvwirgtsp2v2brge \
|
||||||
|
--discovery-token-ca-cert-hash sha256:e09d4918b52e647af493e8345504ecb9907e79637a52932e730df350d3f76ede
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# === 22. Проверить с мастера, что worker добавился в кластер ===
|
||||||
|
# Команды выполняются на master-узле:
|
||||||
|
kubectl get nodes
|
||||||
|
kubectl get pods -A
|
||||||
|
```
|
||||||
87
ansible/roles/k8s/readme/install-keyclock.md
Normal file
87
ansible/roles/k8s/readme/install-keyclock.md
Normal file
@@ -0,0 +1,87 @@
|
|||||||
|
```bash
|
||||||
|
helm repo add codecentric https://codecentric.github.io/helm-charts
|
||||||
|
helm repo update
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
kubectl create namespace keycloak
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
vim values-keycloak.yaml
|
||||||
|
|
||||||
|
# Какой именно Keycloak ставим
|
||||||
|
image:
|
||||||
|
repository: quay.io/keycloak/keycloak
|
||||||
|
# Подставь нужную версию, например ту, которую ты хочешь зафиксировать
|
||||||
|
# (пример — 26.0.7, но лучше глянуть актуальные теги на quay.io/keycloak/keycloak)
|
||||||
|
tag: "26.0.7"
|
||||||
|
pullPolicy: IfNotPresent
|
||||||
|
|
||||||
|
replicas: 1
|
||||||
|
|
||||||
|
# HTTP-путь, по которому будет доступен Keycloak
|
||||||
|
http:
|
||||||
|
# "/" или "/auth" — на твой вкус, я делаю "/" для простоты
|
||||||
|
relativePath: "/"
|
||||||
|
|
||||||
|
# Подключение к внешней БД PostgreSQL
|
||||||
|
database:
|
||||||
|
vendor: postgres
|
||||||
|
hostname: postgres-postgresql.postgres.svc.cluster.local
|
||||||
|
port: 5432
|
||||||
|
database: keycloak
|
||||||
|
username: keycloak_user
|
||||||
|
password: "password"
|
||||||
|
|
||||||
|
# Команда запуска Keycloak (рекомендуемый стиль с kc.sh)
|
||||||
|
command:
|
||||||
|
- "/opt/keycloak/bin/kc.sh"
|
||||||
|
- "start"
|
||||||
|
- "--http-enabled=true"
|
||||||
|
- "--http-port=8080"
|
||||||
|
- "--hostname-strict=false"
|
||||||
|
- "--hostname-strict-https=false"
|
||||||
|
- "--proxy=edge"
|
||||||
|
|
||||||
|
# Ingress NGINX на keycloak.local
|
||||||
|
ingress:
|
||||||
|
enabled: true
|
||||||
|
ingressClassName: "nginx"
|
||||||
|
annotations:
|
||||||
|
nginx.ingress.kubernetes.io/ssl-redirect: "false"
|
||||||
|
rules:
|
||||||
|
- host: "keycloak.local"
|
||||||
|
paths:
|
||||||
|
- path: '{{ tpl .Values.http.relativePath $ | trimSuffix "/" }}/'
|
||||||
|
pathType: Prefix
|
||||||
|
tls: [] # позже можно включить TLS через cert-manager
|
||||||
|
|
||||||
|
# Переменные окружения Keycloak
|
||||||
|
extraEnv: |
|
||||||
|
# Админ и пароль
|
||||||
|
- name: KEYCLOAK_ADMIN
|
||||||
|
value: admin
|
||||||
|
- name: KEYCLOAK_ADMIN_PASSWORD
|
||||||
|
value: password
|
||||||
|
|
||||||
|
# Настройки прокси / hostname
|
||||||
|
- name: KC_PROXY
|
||||||
|
value: edge
|
||||||
|
- name: KC_HOSTNAME
|
||||||
|
value: "keycloak.local"
|
||||||
|
|
||||||
|
# JGroups discovery через headless-сервис чарта
|
||||||
|
- name: JAVA_OPTS_APPEND
|
||||||
|
value: >-
|
||||||
|
-XX:+UseContainerSupport
|
||||||
|
-XX:MaxRAMPercentage=50.0
|
||||||
|
-Djava.awt.headless=true
|
||||||
|
-Djgroups.dns.query={{ include "keycloak.fullname" . }}-headless
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
helm install keycloak codecentric/keycloakx \
|
||||||
|
--namespace keycloak \
|
||||||
|
--values values-keycloak.yaml
|
||||||
|
```
|
||||||
288
ansible/roles/k8s/readme/install-netbird.md
Normal file
288
ansible/roles/k8s/readme/install-netbird.md
Normal file
@@ -0,0 +1,288 @@
|
|||||||
|
```bash
|
||||||
|
helm repo add jaconi https://charts.jaconi.io
|
||||||
|
helm repo update
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
fullnameOverride: "netbird"
|
||||||
|
|
||||||
|
config:
|
||||||
|
database:
|
||||||
|
DB_TYPE: postgres
|
||||||
|
HOST: postgres-postgresql.postgres.svc.cluster.local
|
||||||
|
PORT: 5432
|
||||||
|
NAME: netbird
|
||||||
|
USER: netbird_user
|
||||||
|
PASSWD: password
|
||||||
|
|
||||||
|
relay:
|
||||||
|
enabled: true
|
||||||
|
config:
|
||||||
|
NB_EXPOSED_ADDRESS: "netbird-relay.netbird.svc.cluster.local:33080"
|
||||||
|
|
||||||
|
signal:
|
||||||
|
enabled: true
|
||||||
|
|
||||||
|
management:
|
||||||
|
enabled: true
|
||||||
|
config:
|
||||||
|
NETBIRD_SIGNAL_URI: "netbird-signal.netbird.svc.cluster.local:10000"
|
||||||
|
NETBIRD_SIGNAL_PROTOCOL: "https"
|
||||||
|
NETBIRD_RELAY_DOMAIN: "netbird-relay.netbird.svc.cluster.local"
|
||||||
|
NETBIRD_RELAY_PORT: "33080"
|
||||||
|
NETBIRD_STUN_URI: "stun:netbird-signal.netbird.svc.cluster.local:3478"
|
||||||
|
NETBIRD_TURN_URI: "turn:netbird-signal.netbird.svc.cluster.local:3478"
|
||||||
|
|
||||||
|
dashboard:
|
||||||
|
enabled: true
|
||||||
|
service:
|
||||||
|
type: ClusterIP
|
||||||
|
ingress:
|
||||||
|
enabled: false
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
openssl rand -hex 32
|
||||||
|
|
||||||
|
kubectl create secret generic netbird-relay-secret \
|
||||||
|
-n netbird \
|
||||||
|
--from-literal=netbird-relay-secret-key="8626c1ed1c8cfcb13df6c65819042771a2bf7a280c16f0ba54abea8cde7b560d"
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
helm install netbird jaconi/netbird \
|
||||||
|
-n netbird \
|
||||||
|
--create-namespace \
|
||||||
|
-f netbird-values.yaml
|
||||||
|
|
||||||
|
or
|
||||||
|
|
||||||
|
helm upgrade netbird jaconi/netbird \
|
||||||
|
-n netbird \
|
||||||
|
-f netbird-values.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
kubectl -n netbird get pods
|
||||||
|
kubectl -n netbird get svc
|
||||||
|
kubectl -n netbird get ingress
|
||||||
|
```
|
||||||
|
|
||||||
|
<!-- dashboard -->
|
||||||
|
```bash
|
||||||
|
vim netbird-dashboard-deployment.yaml
|
||||||
|
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: netbird-dashboard
|
||||||
|
namespace: netbird
|
||||||
|
labels:
|
||||||
|
app: netbird-dashboard
|
||||||
|
spec:
|
||||||
|
replicas: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: netbird-dashboard
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: netbird-dashboard
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: dashboard
|
||||||
|
image: netbirdio/dashboard:0.45.1
|
||||||
|
ports:
|
||||||
|
- containerPort: 80
|
||||||
|
env:
|
||||||
|
- name: NB_MANAGEMENT_API_ENDPOINT
|
||||||
|
value: "http://netbird.local:30830"
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
vim netbird-dashboard-service.yaml
|
||||||
|
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: netbird-dashboard
|
||||||
|
namespace: netbird
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
app: netbird-dashboard
|
||||||
|
ports:
|
||||||
|
- protocol: TCP
|
||||||
|
port: 80
|
||||||
|
targetPort: 80
|
||||||
|
type: ClusterIP
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
vim netbird-dashboard-ingress.yaml
|
||||||
|
|
||||||
|
apiVersion: networking.k8s.io/v1
|
||||||
|
kind: Ingress
|
||||||
|
metadata:
|
||||||
|
name: netbird-dashboard
|
||||||
|
namespace: netbird
|
||||||
|
spec:
|
||||||
|
ingressClassName: nginx
|
||||||
|
rules:
|
||||||
|
- host: netbird.local
|
||||||
|
http:
|
||||||
|
paths:
|
||||||
|
- path: /
|
||||||
|
pathType: Prefix
|
||||||
|
backend:
|
||||||
|
service:
|
||||||
|
name: netbird-dashboard
|
||||||
|
port:
|
||||||
|
number: 80
|
||||||
|
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
kubectl apply -f netbird-dashboard-deployment.yaml
|
||||||
|
kubectl apply -f netbird-dashboard-service.yaml
|
||||||
|
kubectl apply -f netbird-dashboard-ingress.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
C:\Windows\System32\drivers\etc\hosts
|
||||||
|
```
|
||||||
|
|
||||||
|
# k8s
|
||||||
|
|
||||||
|
```bash
|
||||||
|
vim netbird-application.yaml
|
||||||
|
|
||||||
|
apiVersion: argoproj.io/v1alpha1
|
||||||
|
kind: Application
|
||||||
|
metadata:
|
||||||
|
name: netbird # как будет называться приложение в ArgoCD
|
||||||
|
namespace: argocd # namespace, где установлен ArgoCD
|
||||||
|
spec:
|
||||||
|
project: default
|
||||||
|
|
||||||
|
source:
|
||||||
|
repoURL: https://charts.jaconi.io # тот самый helm repo
|
||||||
|
chart: netbird # имя чарта
|
||||||
|
targetRevision: "*" # можно зафиксировать версию, пока пусть будет любая
|
||||||
|
helm:
|
||||||
|
releaseName: netbird # как будто ты делал "helm install netbird ..."
|
||||||
|
values: |-
|
||||||
|
fullnameOverride: "netbird"
|
||||||
|
|
||||||
|
config:
|
||||||
|
database:
|
||||||
|
DB_TYPE: postgres
|
||||||
|
HOST: postgres-postgresql.postgres.svc.cluster.local
|
||||||
|
PORT: 5432
|
||||||
|
NAME: netbird
|
||||||
|
USER: netbird_user
|
||||||
|
PASSWD: password
|
||||||
|
|
||||||
|
relay:
|
||||||
|
enabled: true
|
||||||
|
config:
|
||||||
|
NB_EXPOSED_ADDRESS: "netbird-relay.netbird.svc.cluster.local:33080"
|
||||||
|
|
||||||
|
signal:
|
||||||
|
enabled: true
|
||||||
|
|
||||||
|
management:
|
||||||
|
enabled: true
|
||||||
|
config:
|
||||||
|
NETBIRD_SIGNAL_URI: "netbird-signal.netbird.svc.cluster.local:10000"
|
||||||
|
NETBIRD_SIGNAL_PROTOCOL: "https"
|
||||||
|
NETBIRD_RELAY_DOMAIN: "netbird-relay.netbird.svc.cluster.local"
|
||||||
|
NETBIRD_RELAY_PORT: "33080"
|
||||||
|
NETBIRD_STUN_URI: "stun:netbird-signal.netbird.svc.cluster.local:3478"
|
||||||
|
NETBIRD_TURN_URI: "turn:netbird-signal.netbird.svc.cluster.local:3478"
|
||||||
|
|
||||||
|
dashboard:
|
||||||
|
enabled: true
|
||||||
|
service:
|
||||||
|
type: ClusterIP
|
||||||
|
ingress:
|
||||||
|
enabled: true
|
||||||
|
className: nginx
|
||||||
|
hosts:
|
||||||
|
- host: netbird.local
|
||||||
|
paths:
|
||||||
|
- path: /
|
||||||
|
pathType: Prefix
|
||||||
|
|
||||||
|
destination:
|
||||||
|
server: https://kubernetes.default.svc
|
||||||
|
namespace: netbird # сюда чарты будут ставиться
|
||||||
|
|
||||||
|
syncPolicy:
|
||||||
|
automated:
|
||||||
|
prune: true
|
||||||
|
selfHeal: true
|
||||||
|
syncOptions:
|
||||||
|
- CreateNamespace=true
|
||||||
|
|
||||||
|
kubectl apply -f netbird-application.yaml -n argocd
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
kubectl create namespace netbird || true
|
||||||
|
|
||||||
|
kubectl create secret generic netbird-relay-secret \
|
||||||
|
-n netbird \
|
||||||
|
--from-literal=netbird-relay-secret-key="8626c1ed1c8cfcb13df6c65819042771a2bf7a280c16f0ba54abea8cde7b560d"
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
helm repo add jaconi https://charts.jaconi.io
|
||||||
|
helm repo update
|
||||||
|
|
||||||
|
vim netbird-dashboard-values.yaml
|
||||||
|
|
||||||
|
image:
|
||||||
|
# Версия образа UI; есть тег v2.22.2 на Docker Hub
|
||||||
|
# см. netbirdio/dashboard:v2.22.2 :contentReference[oaicite:2]{index=2}
|
||||||
|
tag: v2.22.2
|
||||||
|
|
||||||
|
auth:
|
||||||
|
# OIDC-провайдер (например, Keycloak)
|
||||||
|
authority: https://keycloak.example.com/realms/homelab
|
||||||
|
audience: netbird
|
||||||
|
clientID: netbird
|
||||||
|
supportedScopes: >
|
||||||
|
openid profile email offline_access netbird-api
|
||||||
|
|
||||||
|
netbird:
|
||||||
|
# HTTP API management-сервиса NetBird (тот же, к которому коннектятся клиенты)
|
||||||
|
managementApiEndpoint: https://netbird.example.com
|
||||||
|
# gRPC endpoint того же сервиса
|
||||||
|
managementGrpcApiEndpoint: https://netbird.example.com
|
||||||
|
|
||||||
|
ingress:
|
||||||
|
enabled: true
|
||||||
|
className: nginx
|
||||||
|
annotations:
|
||||||
|
# Пример для cert-manager, можно убрать если не используешь
|
||||||
|
cert-manager.io/cluster-issuer: letsencrypt
|
||||||
|
hosts:
|
||||||
|
- host: netbird.example.com
|
||||||
|
paths:
|
||||||
|
- path: /
|
||||||
|
pathType: Prefix
|
||||||
|
tls:
|
||||||
|
- secretName: netbird-tls-certificate
|
||||||
|
hosts:
|
||||||
|
- netbird.example.com
|
||||||
|
|
||||||
|
# namespace можно выбрать любой, но обычно используют netbird
|
||||||
|
kubectl create namespace netbird --dry-run=client -o yaml | kubectl apply -f -
|
||||||
|
|
||||||
|
helm install netbird-dashboard jaconi/netbird-dashboard \
|
||||||
|
--namespace netbird \
|
||||||
|
--values netbird-dashboard-values.yaml
|
||||||
|
|
||||||
|
```
|
||||||
20
ansible/roles/node/change_hostname/main.yml
Normal file
20
ansible/roles/node/change_hostname/main.yml
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
---
|
||||||
|
- name: Set hostname
|
||||||
|
ansible.builtin.hostname:
|
||||||
|
name: "{{ hostname }}"
|
||||||
|
|
||||||
|
- name: Ensure /etc/hosts has proper hostname entry
|
||||||
|
ansible.builtin.lineinfile:
|
||||||
|
path: /etc/hosts
|
||||||
|
regexp: "^127\\.0\\.1\\.1"
|
||||||
|
line: "127.0.1.1 {{ hostname }}"
|
||||||
|
create: yes
|
||||||
|
backup: yes
|
||||||
|
|
||||||
|
- name: Reboot system
|
||||||
|
ansible.builtin.reboot:
|
||||||
|
msg: "Rebooting after hostname change"
|
||||||
|
connect_timeout: 5
|
||||||
|
reboot_timeout: 300
|
||||||
|
pre_reboot_delay: 0
|
||||||
|
post_reboot_delay: 10
|
||||||
5
ansible/roles/node/execute_command/tasks/main.yml
Normal file
5
ansible/roles/node/execute_command/tasks/main.yml
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
- name: execute [ {{ command }} ] command
|
||||||
|
ansible.builtin.command: "{{ command }}"
|
||||||
|
args:
|
||||||
|
chdir: "{{ chdir | default(omit) }}"
|
||||||
7
ansible/roles/node/push_dir/tasks/main.yml
Normal file
7
ansible/roles/node/push_dir/tasks/main.yml
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
---
|
||||||
|
- name: copy local directory to remote node (recursive)
|
||||||
|
ansible.builtin.copy:
|
||||||
|
src: "{{ resource_dir }}"
|
||||||
|
dest: "{{ target_dir }}"
|
||||||
|
mode: "0644"
|
||||||
|
directory_mode: "0755"
|
||||||
5
ansible/roles/node/remove_file/tasks/main.yml
Normal file
5
ansible/roles/node/remove_file/tasks/main.yml
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
- name: remove file
|
||||||
|
become: true
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ file_path }}"
|
||||||
|
state: absent
|
||||||
6
ansible/roles/node/remove_user/defaults/main.yml
Normal file
6
ansible/roles/node/remove_user/defaults/main.yml
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
---
|
||||||
|
# Удалять ли домашнюю директорию и почту (/var/mail/<user>)
|
||||||
|
remove_user_home: true
|
||||||
|
|
||||||
|
# Форсировать удаление даже если есть процессы (полезно для билд-юнитов/packer)
|
||||||
|
remove_user_force: true
|
||||||
13
ansible/roles/node/remove_user/tasks/main.yml
Normal file
13
ansible/roles/node/remove_user/tasks/main.yml
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
---
|
||||||
|
- name: remove sudoers drop-in for {{ remove_user }} user (if exists)
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "/etc/sudoers.d/{{ remove_user }}"
|
||||||
|
state: absent
|
||||||
|
mode: "0440"
|
||||||
|
|
||||||
|
- name: remove {{ remove_user }} user
|
||||||
|
ansible.builtin.user:
|
||||||
|
name: "{{ remove_user }}"
|
||||||
|
state: absent
|
||||||
|
remove: "{{ remove_user_home }}"
|
||||||
|
force: "{{ remove_user_force }}"
|
||||||
5
ansible/roles/ntp/chrony/handlers/main.yml
Normal file
5
ansible/roles/ntp/chrony/handlers/main.yml
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
- name: restart chrony
|
||||||
|
ansible.builtin.service:
|
||||||
|
name: chrony
|
||||||
|
state: restarted
|
||||||
74
ansible/roles/ntp/chrony/tasks/main.yml
Normal file
74
ansible/roles/ntp/chrony/tasks/main.yml
Normal file
@@ -0,0 +1,74 @@
|
|||||||
|
---
|
||||||
|
- name: install chrony
|
||||||
|
ansible.builtin.apt:
|
||||||
|
name:
|
||||||
|
- chrony
|
||||||
|
state: present
|
||||||
|
update_cache: true
|
||||||
|
|
||||||
|
# чтобы не было “двух клиентов времени” (минимально и без сложных проверок)
|
||||||
|
- name: stop and disable systemd-timesyncd (if exists)
|
||||||
|
ansible.builtin.service:
|
||||||
|
name: systemd-timesyncd
|
||||||
|
state: stopped
|
||||||
|
enabled: false
|
||||||
|
ignore_errors: true
|
||||||
|
|
||||||
|
- name: ensure /etc/chrony/sources.d exists
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: /etc/chrony/sources.d
|
||||||
|
state: directory
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0755"
|
||||||
|
|
||||||
|
- name: ensure /etc/chrony/conf.d exists
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: /etc/chrony/conf.d
|
||||||
|
state: directory
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0755"
|
||||||
|
|
||||||
|
- name: deploy /etc/chrony/chrony.conf
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: chrony.conf.j2
|
||||||
|
dest: /etc/chrony/chrony.conf
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0644"
|
||||||
|
notify: restart chrony
|
||||||
|
|
||||||
|
- name: configure upstream sources
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: 00-upstream.sources.j2
|
||||||
|
dest: /etc/chrony/sources.d/00-upstream.sources
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0644"
|
||||||
|
notify: restart chrony
|
||||||
|
|
||||||
|
# server-mode: allow clients (опционально)
|
||||||
|
- name: configure allowed client networks (optional)
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: 00-allow.conf.j2
|
||||||
|
dest: /etc/chrony/conf.d/00-allow.conf
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0644"
|
||||||
|
when: chrony_allow_networks | length > 0
|
||||||
|
notify: restart chrony
|
||||||
|
|
||||||
|
# если раньше был allow, а теперь роль как client — подчистим файл
|
||||||
|
- name: remove allow config when not needed
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: /etc/chrony/conf.d/00-allow.conf
|
||||||
|
state: absent
|
||||||
|
when: chrony_allow_networks | length == 0
|
||||||
|
notify: restart chrony
|
||||||
|
|
||||||
|
- name: ensure chrony is enabled and started
|
||||||
|
ansible.builtin.service:
|
||||||
|
name: chrony
|
||||||
|
enabled: true
|
||||||
|
state: started
|
||||||
5
ansible/roles/ntp/chrony/templates/00-allow.conf.j2
Normal file
5
ansible/roles/ntp/chrony/templates/00-allow.conf.j2
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
# Managed by Ansible: allow NTP clients (server)
|
||||||
|
deny all
|
||||||
|
{% for net in chrony_allow_networks %}
|
||||||
|
allow {{ net }}
|
||||||
|
{% endfor %}
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
# Managed by Ansible: upstream NTP sources
|
||||||
|
{% for s in chrony_upstream_sources %}
|
||||||
|
server {{ s }} iburst
|
||||||
|
{% endfor %}
|
||||||
47
ansible/roles/ntp/chrony/templates/chrony.conf.j2
Normal file
47
ansible/roles/ntp/chrony/templates/chrony.conf.j2
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
# Welcome to the chrony configuration file. See chrony.conf(5) for more
|
||||||
|
# information about usable directives.
|
||||||
|
|
||||||
|
# Use Debian vendor zone.
|
||||||
|
# pool 2.debian.pool.ntp.org iburst
|
||||||
|
|
||||||
|
# Use time sources from DHCP.
|
||||||
|
# sourcedir /run/chrony-dhcp
|
||||||
|
|
||||||
|
# Use NTP sources found in /etc/chrony/sources.d.
|
||||||
|
sourcedir /etc/chrony/sources.d
|
||||||
|
|
||||||
|
# This directive specifies the location of the file containing ID/key pairs for
|
||||||
|
# NTP authentication.
|
||||||
|
keyfile /etc/chrony/chrony.keys
|
||||||
|
|
||||||
|
# This directive specifies the file into which chronyd will store the rate
|
||||||
|
# information.
|
||||||
|
driftfile /var/lib/chrony/chrony.drift
|
||||||
|
|
||||||
|
# Save NTS keys and cookies.
|
||||||
|
ntsdumpdir /var/lib/chrony
|
||||||
|
|
||||||
|
# Uncomment the following line to turn logging on.
|
||||||
|
#log tracking measurements statistics
|
||||||
|
|
||||||
|
# Log files location.
|
||||||
|
logdir /var/log/chrony
|
||||||
|
|
||||||
|
# Stop bad estimates upsetting machine clock.
|
||||||
|
maxupdateskew 100.0
|
||||||
|
|
||||||
|
# This directive enables kernel synchronisation (every 11 minutes) of the
|
||||||
|
# real-time clock. Note that it can't be used along with the 'rtcfile' directive.
|
||||||
|
rtcsync
|
||||||
|
|
||||||
|
# Step the system clock instead of slewing it if the adjustment is larger than
|
||||||
|
# one second, but only in the first three clock updates.
|
||||||
|
makestep 1 3
|
||||||
|
|
||||||
|
# Get TAI-UTC offset and leap seconds from the system tz database.
|
||||||
|
# This directive must be commented out when using time sources serving
|
||||||
|
# leap-smeared time.
|
||||||
|
leapseclist /usr/share/zoneinfo/leap-seconds.list
|
||||||
|
|
||||||
|
# Include configuration files found in /etc/chrony/conf.d.
|
||||||
|
confdir /etc/chrony/conf.d
|
||||||
20
ansible/roles/ntp/readme.md
Normal file
20
ansible/roles/ntp/readme.md
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
```bash
|
||||||
|
vim /etc/chrony/chrony.conf
|
||||||
|
|
||||||
|
# закоментить
|
||||||
|
pool 2.debian.pool.ntp.org iburst
|
||||||
|
sourcedir /run/chrony-dhcp
|
||||||
|
|
||||||
|
# задать внешние апстримы отдельным файлом
|
||||||
|
cat >/etc/chrony/sources.d/00-upstream.sources <<'EOF'
|
||||||
|
server ntp.time.in.ua iburst
|
||||||
|
server ntp2.time.in.ua iburst
|
||||||
|
server time.google.com iburst
|
||||||
|
server time.cloudflare.com iburst
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# применить и проверить
|
||||||
|
systemctl restart chrony
|
||||||
|
chronyc sources -v
|
||||||
|
chronyc tracking
|
||||||
|
```
|
||||||
48
ansible/roles/packer/install/tasks/main.yml
Normal file
48
ansible/roles/packer/install/tasks/main.yml
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
---
|
||||||
|
- name: install base deps for HashiCorp repo
|
||||||
|
ansible.builtin.apt:
|
||||||
|
update_cache: true
|
||||||
|
name:
|
||||||
|
- ca-certificates # чтобы качать по HTTPS
|
||||||
|
- curl # чтобы скачать packer/плагины
|
||||||
|
- gnupg
|
||||||
|
- lsb-release
|
||||||
|
- unzip # packer часто в zip
|
||||||
|
state: present
|
||||||
|
|
||||||
|
- name: ensure keyrings dir exists
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: /usr/share/keyrings
|
||||||
|
state: directory
|
||||||
|
mode: "0755"
|
||||||
|
|
||||||
|
- name: add HashiCorp GPG key (dearmored)
|
||||||
|
ansible.builtin.shell: |
|
||||||
|
set -euo pipefail
|
||||||
|
curl -fsSL https://apt.releases.hashicorp.com/gpg \
|
||||||
|
| gpg --dearmor -o /usr/share/keyrings/hashicorp-archive-keyring.gpg
|
||||||
|
args:
|
||||||
|
executable: /bin/bash
|
||||||
|
creates: /usr/share/keyrings/hashicorp-archive-keyring.gpg
|
||||||
|
|
||||||
|
- name: add HashiCorp APT repository
|
||||||
|
ansible.builtin.copy:
|
||||||
|
dest: /etc/apt/sources.list.d/hashicorp.list
|
||||||
|
mode: "0644"
|
||||||
|
content: |
|
||||||
|
deb [signed-by=/usr/share/keyrings/hashicorp-archive-keyring.gpg] https://apt.releases.hashicorp.com {{ ansible_distribution_release }} main
|
||||||
|
|
||||||
|
- name: install packer
|
||||||
|
ansible.builtin.apt:
|
||||||
|
update_cache: true
|
||||||
|
name: packer
|
||||||
|
state: present
|
||||||
|
|
||||||
|
- name: check packer version
|
||||||
|
ansible.builtin.command: packer version
|
||||||
|
register: packer_version
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: print packer version
|
||||||
|
ansible.builtin.debug:
|
||||||
|
var: packer_version.stdout
|
||||||
33
ansible/roles/packer/run/tasks/main.yml
Normal file
33
ansible/roles/packer/run/tasks/main.yml
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
---
|
||||||
|
- name: ensure packer exists
|
||||||
|
ansible.builtin.command: packer version
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: packer init
|
||||||
|
ansible.builtin.command: packer init .
|
||||||
|
args:
|
||||||
|
chdir: "{{ packer_config_dir }}"
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: packer fmt
|
||||||
|
ansible.builtin.command: packer fmt -recursive .
|
||||||
|
args:
|
||||||
|
chdir: "{{ packer_config_dir }}"
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: packer validate
|
||||||
|
ansible.builtin.command: packer validate .
|
||||||
|
args:
|
||||||
|
chdir: "{{ packer_config_dir }}"
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: packer build
|
||||||
|
ansible.builtin.shell: |
|
||||||
|
set -euo pipefail
|
||||||
|
stdbuf -oL -eL packer build -on-error=cleanup -timestamp-ui .
|
||||||
|
args:
|
||||||
|
chdir: "{{ packer_config_dir }}"
|
||||||
|
executable: /bin/bash
|
||||||
|
environment:
|
||||||
|
PACKER_LOG: "1"
|
||||||
|
PACKER_LOG_PATH: ""
|
||||||
4
ansible/roles/proxmox/enable_snippets/tasks/main.yml
Normal file
4
ansible/roles/proxmox/enable_snippets/tasks/main.yml
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
---
|
||||||
|
- name: enable snippets on storage "local"
|
||||||
|
ansible.builtin.command: >
|
||||||
|
pvesm set local --content backup,iso,vztmpl,snippets
|
||||||
41
ansible/roles/proxmox/install_nvidia_driver/readme.md
Normal file
41
ansible/roles/proxmox/install_nvidia_driver/readme.md
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
## 1.0 Быстрая проверка, что GPU видна хосту
|
||||||
|
lspci -nn | grep -i nvidia
|
||||||
|
|
||||||
|
## 1.1 GRUB
|
||||||
|
nano /etc/default/grub
|
||||||
|
GRUB_CMDLINE_LINUX_DEFAULT="quiet iommu=pt"
|
||||||
|
update-grub
|
||||||
|
reboot
|
||||||
|
|
||||||
|
## 1.2 VFIO модули
|
||||||
|
nano /etc/modules-load.d/vfio.conf
|
||||||
|
vfio
|
||||||
|
vfio_iommu_type1
|
||||||
|
vfio_pci
|
||||||
|
vfio_virqfd
|
||||||
|
|
||||||
|
## 1.3 Привязать GPU к vfio-pci по ID
|
||||||
|
nano /etc/modprobe.d/vfio.conf
|
||||||
|
options vfio-pci ids=10de:2d58,10de:22eb disable_vga=1
|
||||||
|
|
||||||
|
## 1.4 Заблэклистить nouveau (и не ставить nvidia на хост, если passthrough нужен “чисто”)
|
||||||
|
nano /etc/modprobe.d/blacklist-nouveau.conf
|
||||||
|
blacklist nouveau
|
||||||
|
options nouveau modeset=0
|
||||||
|
|
||||||
|
## 1.5 Пересобрать initramfs и перезагрузиться
|
||||||
|
update-initramfs -u -k all
|
||||||
|
reboot
|
||||||
|
|
||||||
|
## 1.6 Проверка: GPU реально ушла в VFIO
|
||||||
|
dmesg | grep -E "AMD-Vi|IOMMU" | tail -n 50
|
||||||
|
lspci -nnk -s 01:00.0
|
||||||
|
lspci -nnk -s 01:00.1
|
||||||
|
|
||||||
|
## В Proxmox создай PCI mapping для RTX 5070
|
||||||
|
Datacenter → Resource Mapping → PCI Devices → Add
|
||||||
|
Сделай маппинг:
|
||||||
|
rtx5070_gpu → 0000:01:00
|
||||||
|
|
||||||
|
dmesg | grep -E "IOMMU|AMD-Vi"
|
||||||
|
|
||||||
10
ansible/roles/proxmox/lxc/download_template/tasks/main.yml
Normal file
10
ansible/roles/proxmox/lxc/download_template/tasks/main.yml
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
---
|
||||||
|
- name: update LXC template index
|
||||||
|
ansible.builtin.command: pveam update
|
||||||
|
register: pveam_update
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: download LXC template
|
||||||
|
ansible.builtin.command: "pveam download local {{ lxc_template_name }}"
|
||||||
|
args:
|
||||||
|
creates: "/var/lib/vz/template/cache/{{ lxc_template_name }}"
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user