commit f243f440c3c86e2c4349ae7266ea76adce1d244e Author: Hrankin, Aleksandr (contracted) Date: Thu Feb 19 11:34:13 2026 +0000 init diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile new file mode 100755 index 0000000..959291e --- /dev/null +++ b/.devcontainer/Dockerfile @@ -0,0 +1,53 @@ +FROM debian:bookworm-slim + +ARG DEBIAN_FRONTEND=noninteractive +ARG TERRAFORM_VERSION=1.8.5 + +RUN apt-get update && apt-get install -y --no-install-recommends \ + ca-certificates curl unzip git \ + make openssh-client \ + python3 python3-pip python3-venv \ + locales gnupg \ + && rm -rf /var/lib/apt/lists/* + +# Генерируем UTF-8 локаль +RUN sed -i 's/^# *\(en_US.UTF-8 UTF-8\)/\1/' /etc/locale.gen \ + && locale-gen + +ENV LANG=en_US.UTF-8 \ + LANGUAGE=en_US:en \ + LC_ALL=en_US.UTF-8 + +# --- Packer (через HashiCorp APT repo) --- +RUN set -eux; \ + curl -fsSL https://apt.releases.hashicorp.com/gpg | gpg --dearmor -o /usr/share/keyrings/hashicorp-archive-keyring.gpg; \ + codename="$(. /etc/os-release && echo "$VERSION_CODENAME")"; \ + echo "deb [signed-by=/usr/share/keyrings/hashicorp-archive-keyring.gpg] https://apt.releases.hashicorp.com ${codename} main" > /etc/apt/sources.list.d/hashicorp.list; \ + apt-get update; \ + apt-get install -y --no-install-recommends packer; \ + rm -rf /var/lib/apt/lists/*; \ + packer version + +# --- Ansible (в venv) --- +RUN python3 -m venv /opt/ansible \ + && /opt/ansible/bin/pip install --no-cache-dir --upgrade pip \ + && /opt/ansible/bin/pip install --no-cache-dir ansible \ + && ln -sf /opt/ansible/bin/ansible /usr/local/bin/ansible \ + && ln -sf /opt/ansible/bin/ansible-playbook /usr/local/bin/ansible-playbook \ + && ln -sf /opt/ansible/bin/ansible-galaxy /usr/local/bin/ansible-galaxy \ + && ansible --version + +# --- Terraform --- +RUN set -eux; \ + arch="$(dpkg --print-architecture)"; \ + case "$arch" in \ + amd64) tf_arch="amd64" ;; \ + arm64) tf_arch="arm64" ;; \ + *) echo "Unsupported arch: $arch"; exit 1 ;; \ + esac; \ + curl -fsSL "https://releases.hashicorp.com/terraform/${TERRAFORM_VERSION}/terraform_${TERRAFORM_VERSION}_linux_${tf_arch}.zip" -o /tmp/terraform.zip; \ + unzip /tmp/terraform.zip -d /usr/local/bin; \ + rm -f /tmp/terraform.zip; \ + terraform version + +WORKDIR /work diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100755 index 0000000..abd7e97 --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,39 @@ +{ + "name": "debian-devops", + "build": { + "dockerfile": "Dockerfile" + }, + "remoteUser": "root", + "forwardPorts": [ + 8006 + ], + "portsAttributes": { + "8006": { + "label": "Proxmox 8006" + } + }, + "customizations": { + "vscode": { + "extensions": [ + "saoudrizwan.claude-dev", + "hashicorp.terraform", + "redhat.vscode-yaml", + "EditorConfig.EditorConfig", + "eamodio.gitlens", + "bierner.markdown-preview-github-styles" + ], + "settings": { + "editor.formatOnSave": true, + "[terraform]": { + "editor.defaultFormatter": "hashicorp.terraform", + "editor.formatOnSave": true + }, + "[terraform-vars]": { + "editor.defaultFormatter": "hashicorp.terraform", + "editor.formatOnSave": true + } + } + } + }, + "postCreateCommand": "git --version && ansible --version && terraform version && packer version" +} \ No newline at end of file diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..cb6ccab --- /dev/null +++ b/.gitignore @@ -0,0 +1,18 @@ +.ssh +.env* +inventory* +terraform.tfvars + +# Terraform +**/.terraform/* +**/*.tfstate +**/*.tfstate.* +**/*.tfplan +**/crash.log +**/crash.*.log +*.auto.tfvars +*.auto.tfvars.json +**/.terraform.lock.hcl +**/.terraform + +# **/terraform.tfvars \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..0a815ad --- /dev/null +++ b/README.md @@ -0,0 +1,271 @@ +# 🧠 DevOps Infra Stack --- Proxmox + Ceph + Kubernetes + DNS + +Fully automated self-hosted infrastructure. + +![Project Logo](documentation/images/arch-diagram.jpg) + +The project deploys: +- Proxmox infrastructure +- Golden VM templates via +Packer - VM provisioning via Terraform +- Hardened nodes (SSH, nftables, +fail2ban) +- DNS (PowerDNS) +- NTP (chrony hierarchy) +- Ceph cluster +- Kubernetes cluster +- K8s apps (MetalLB, ingress, postgres operator, +valkey) + +Everything is deployed via Makefile + Ansible + Terraform + Packer. + +------------------------------------------------------------------------ + +# 🏗 Architecture + +Infrastructure components: + +- Proxmox host (bare metal) +- LXC packer builder +- Golden VM templates +- VM nodes: + - DNS + - NTP + - Ceph (3 nodes) + - Kubernetes master + - Kubernetes worker +- K8s stack: + - MetalLB + - nginx ingress + - Crunchy Postgres Operator + - Valkey (Redis alternative) + +------------------------------------------------------------------------ + +# 📦 Technology Stack + +- Proxmox VE +- Terraform +- Ansible +- Packer +- Docker + Docker Compose (for DNS) +- Ceph +- Kubernetes +- Helm +- PowerDNS +- Chrony +- nftables + fail2ban hardening + +------------------------------------------------------------------------ + +# 🚀 Full Infrastructure Bootstrap + +Main entrypoint: + +``` bash +make -f bootstrap.mk +``` + +It will execute: + +1. VM creation +2. Hardening +3. DNS setup +4. NTP setup +5. Ceph cluster + +------------------------------------------------------------------------ + +# 🧱 Deployment Stages + +## 0. Create LXC + Packer + +``` bash +make -f 00_create_and_setup_lxc_container_with_packer.mk +``` + +- Download LXC template +- Create LXC via Terraform +- Install packer inside LXC + +------------------------------------------------------------------------ + +## 1. Golden VM template + +``` bash +make -f 01_create_vm_golden_template.mk +``` + +- Download ISO +- Upload packer config +- Build golden image +- Shut down packer LXC + +------------------------------------------------------------------------ + +## 2. Create VMs + +``` bash +make -f 02_create_vms.mk +``` + +- Enable cloud-init snippets +- Terraform creates VMs + +------------------------------------------------------------------------ + +## 3. Harden nodes + +``` bash +make -f 03_harden_vms.mk +``` + +- Remove packer user +- SSH hardening +- nftables +- fail2ban + +------------------------------------------------------------------------ + +## 4. DNS + +``` bash +make -f 04_setup_dns.mk +``` + +- PowerDNS install +- Zones + records via Terraform +- systemd-resolved config + +------------------------------------------------------------------------ + +## 5. NTP + +``` bash +make -f 05_setup_ntp.mk +``` + +Hierarchy: +- edge NTP server (proxmox) +- core NTP server +- clients use core NTP server + +------------------------------------------------------------------------ + +## 6. Ceph + +``` bash +make -f 06_setup_ceph.mk +``` + +- install +- bootstrap +- share keys +- cluster init + +------------------------------------------------------------------------ + +## 7. Kubernetes + +``` bash +make -f 07_setup_k8s.mk +``` + +After installation: + +``` bash +ssh user@k8smasternode -p 10525 +``` + +Replace cluster endpoint with localhost tunnel. + +Then: + +``` bash +terraform apply -target=module.metallb_helm +terraform apply -target=module.crunchy_operator +terraform apply +``` + +Get credentials: + +``` bash +# postgres +kubectl -n postgres-operator get secret hippo-pguser-gitlab -o jsonpath='{.data.user}' | base64 -d; echo + +# valkey +kubectl -n valkey get secret valkey-users -o jsonpath='{.data.default}' | base64 -d; echo +``` + +------------------------------------------------------------------------ + +# 📁 Project Structure + + ansible/ + terraform/ + packer/ + makefiles/ + bootstrap.mk + +------------------------------------------------------------------------ + +# 🔐 Requirements + +Before running: + +- SSH access to Proxmox +- Proxmox API token +- terraform.tfvars filled +- inventory.ini filled +- kubeconfig path specified + +------------------------------------------------------------------------ + +# 🔭 Planned Services & Future Stack + +The following services are planned for the next deployment stages: + +- **NetBird** --- internal VPN mesh network (currently working on this + stage) +- **Keycloak** --- unified authentication and identity provider across + services +- **Monitoring stack (Grafana, Loki, Prometheus, Trickster)** --- + monitoring and observability tools\ + *(previously deployed, but not yet integrated into this project)* +- **FreeIPA** --- centralized user and identity management inside + operating systems +- **Vault** --- centralized storage for passwords, tokens, and + operational credentials +- **OpenNebula** --- additional virtualization layer for providing + user VM spaces\ + *(similar to AWS EC2 for internal infrastructure)* +- **Nextcloud + LibreOffice** --- Google Cloud alternative for + collaborative document editing\ + *(Nextcloud deployed previously, but not yet within this project)* +- **Element + Matrix** --- Telegram-like communication platform\ + *(stack deployed previously, but not yet integrated into this + project)* +- **LLM (local language model)** --- neural network for text + processing\ + *(GPT‑2 already tested; LLaMA 7B planned as MVP depending on + available resources)*\ + Future usage: + - LibreOffice document assistant + - Matrix/Element chatbot integration +- **Kafka** --- message queue layer between LibreOffice, Element, and + LLM services\ + Ensures reliable request delivery and acts as a service integration + layer +- **OCR tools** --- document recognition and conversion pipeline\ + Enables transforming documents into formats suitable for LLM + processing and search + +------------------------------------------------------------------------ + +# 🧠 Project Idea + +Self-hosted cloud platform, own mini cloud. Fully autonomous infrastructure. + +# 👤 Author + +Aleksandr Hrankin diff --git a/ansible/ansible.cfg b/ansible/ansible.cfg new file mode 100644 index 0000000..4b7ff20 --- /dev/null +++ b/ansible/ansible.cfg @@ -0,0 +1,6 @@ +[defaults] +inventory = ./inventory.ini +roles_path = ./roles +host_key_checking = False +deprecation_warnings = False +interpreter_python = auto diff --git a/ansible/playbooks/ceph/00_install.yml b/ansible/playbooks/ceph/00_install.yml new file mode 100644 index 0000000..e6ab32f --- /dev/null +++ b/ansible/playbooks/ceph/00_install.yml @@ -0,0 +1,12 @@ +--- +- name: install ceph + hosts: + - dev-kyiv01-vm-ceph-main-01 + - dev-kyiv01-vm-ceph-main-02 + - dev-kyiv01-vm-ceph-main-03 + become: true + + roles: + - role: ceph/00_install + + # ansible-playbook playbooks/ceph/00_install.yml -i inventory.ini diff --git a/ansible/playbooks/ceph/01_bootstrap.yml b/ansible/playbooks/ceph/01_bootstrap.yml new file mode 100644 index 0000000..d964c93 --- /dev/null +++ b/ansible/playbooks/ceph/01_bootstrap.yml @@ -0,0 +1,10 @@ +--- +- name: bootstrap ceph + hosts: + - dev-kyiv01-vm-ceph-main-01 + become: true + + roles: + - role: ceph/01_bootstrap + + # ansible-playbook playbooks/ceph/01_bootstrap.yml -i inventory.ini diff --git a/ansible/playbooks/ceph/02_share_pubkey.yml b/ansible/playbooks/ceph/02_share_pubkey.yml new file mode 100644 index 0000000..3cfdc9d --- /dev/null +++ b/ansible/playbooks/ceph/02_share_pubkey.yml @@ -0,0 +1,12 @@ +--- +- name: share ceph pubkey + hosts: + - dev-kyiv01-vm-ceph-main-01 + - dev-kyiv01-vm-ceph-main-02 + - dev-kyiv01-vm-ceph-main-03 + become: true + + roles: + - role: ceph/02_share_pubkey + + # ansible-playbook playbooks/ceph/02_share_pubkey.yml -i inventory.ini diff --git a/ansible/playbooks/ceph/03_setup_cluster.yml b/ansible/playbooks/ceph/03_setup_cluster.yml new file mode 100644 index 0000000..611187b --- /dev/null +++ b/ansible/playbooks/ceph/03_setup_cluster.yml @@ -0,0 +1,10 @@ +--- +- name: setup ceph cluster + hosts: + - dev-kyiv01-vm-ceph-main-01 + become: true + + roles: + - role: ceph/03_setup_cluster + + # ansible-playbook playbooks/ceph/03_setup_cluster.yml -i inventory.ini diff --git a/ansible/playbooks/dns/install_powerdns.yml b/ansible/playbooks/dns/install_powerdns.yml new file mode 100644 index 0000000..f0bf212 --- /dev/null +++ b/ansible/playbooks/dns/install_powerdns.yml @@ -0,0 +1,19 @@ +--- +- name: setup powerdns + hosts: + - dev-kyiv01-vm-dns-main-01 + become: true + + roles: + - role: install_docker + + - role: dns/push_powerdns_configs_to_node + vars: + dns_stack_root: /opt/dns-stack + + - role: node/execute_command + vars: + chdir: "/opt/dns-stack" + command: "docker compose up -d" + + # ansible-playbook playbooks/dns/install_powerdns.yml -i inventory.ini diff --git a/ansible/playbooks/dns/setup_systemd_resolved_config.yml b/ansible/playbooks/dns/setup_systemd_resolved_config.yml new file mode 100644 index 0000000..fc1d5ae --- /dev/null +++ b/ansible/playbooks/dns/setup_systemd_resolved_config.yml @@ -0,0 +1,17 @@ +--- +- name: setup systemd resolved config + hosts: + - dev-kyiv01-vm-dns-main-01 + - dev-kyiv01-vm-ntp-main-01 + - dev-kyiv01-vm-ceph-main-01 + - dev-kyiv01-vm-ceph-main-02 + - dev-kyiv01-vm-ceph-main-03 + - dev-kyiv01-vm-k8s-master-01 + - dev-kyiv01-vm-k8s-worker-01 + become: true + roles: + - role: dns/setup_systemd_resolved_config + vars: + dns_ip: 192.168.0.100 + + # ansible-playbook playbooks/dns/setup_systemd_resolved_config.yml -i inventory.ini diff --git a/ansible/playbooks/docker/install.yml b/ansible/playbooks/docker/install.yml new file mode 100644 index 0000000..f7e4482 --- /dev/null +++ b/ansible/playbooks/docker/install.yml @@ -0,0 +1,9 @@ +--- +- name: install docker + hosts: + - ec2 + become: true + roles: + - role: docker + + # ansible-playbook playbooks/docker/install.yml -i inventory.ec2.ini diff --git a/ansible/playbooks/gitea/main.yml b/ansible/playbooks/gitea/main.yml new file mode 100644 index 0000000..eac0b49 --- /dev/null +++ b/ansible/playbooks/gitea/main.yml @@ -0,0 +1,9 @@ +--- +- name: copy gitea configs to node + hosts: + - ec2 + become: true + roles: + - role: gitea + + # ansible-playbook playbooks/gitea/main.yml -i inventory.ec2.ini diff --git a/ansible/playbooks/harden/harden_node.yml b/ansible/playbooks/harden/harden_node.yml new file mode 100644 index 0000000..948ad62 --- /dev/null +++ b/ansible/playbooks/harden/harden_node.yml @@ -0,0 +1,20 @@ +--- +- name: harden node + hosts: ec2 + become: true + roles: + - role: harden/fail2ban + + - role: harden/unattended_upgrades + + - role: harden/sshd_config + vars: + ssh_port: "{{ ssh_port }}" + + - role: harden/nftables + vars: + ssh_port: "{{ ssh_port }}" + # ntp_port: "{{ ntp_port }}" + nftables_conf_name: "vm-nftables.conf.j2" + + # ansible-playbook playbooks/harden/harden_node.yml -i inventory.ec2.ini -e "ssh_port=25105" diff --git a/ansible/playbooks/harden/harden_proxmox.yml b/ansible/playbooks/harden/harden_proxmox.yml new file mode 100644 index 0000000..79fa909 --- /dev/null +++ b/ansible/playbooks/harden/harden_proxmox.yml @@ -0,0 +1,21 @@ +--- +- name: harden node + hosts: + - dev-kyiv01-psy-proxmox-main-01 + become: true + roles: + - role: harden/fail2ban + + - role: harden/unattended_upgrades + + - role: harden/sshd_config + vars: + ssh_port: "25105" + + - role: harden/nftables + vars: + nftables_conf_name: "proxmox-nftables.conf.j2" + ssh_port: "25105" + ntp_port: "123" + + # ansible-playbook playbooks/harden/harden_proxmox.yml -i inventory.ini diff --git a/ansible/playbooks/k8s/install/k8s_master.yml b/ansible/playbooks/k8s/install/k8s_master.yml new file mode 100644 index 0000000..304089a --- /dev/null +++ b/ansible/playbooks/k8s/install/k8s_master.yml @@ -0,0 +1,12 @@ +--- +- name: install k8s master + hosts: dev-kyiv01-vm-k8s-master-01 + become: true + + roles: + - role: k8s/install/00_python + - role: k8s/install/01_helm + - role: k8s/install/02_common + - role: k8s/install/03_master + + # ansible-playbook playbooks/k8s/install/k8s_master.yml -i inventory.ini diff --git a/ansible/playbooks/k8s/install/k8s_worker.yml b/ansible/playbooks/k8s/install/k8s_worker.yml new file mode 100644 index 0000000..76e780a --- /dev/null +++ b/ansible/playbooks/k8s/install/k8s_worker.yml @@ -0,0 +1,10 @@ +- name: install k8s master + hosts: dev-kyiv01-vm-k8s-worker-01 + become: true + roles: + - role: k8s/install/02_common + - role: k8s/install/04_worker + vars: + k8s_kubeadm_join_command: "kubeadm join 192.168.0.105:6443 --token 5n2fv0.w67ya3tqfz8ucsae --discovery-token-ca-cert-hash sha256:9e944ac89557d42bd335ef175d232b3d78fd4b2af5935db23d52e443de539aad" + + # ansible-playbook playbooks/k8s/install/k8s_worker.yml -i inventory.ini diff --git a/ansible/playbooks/node/change_hostname.yml b/ansible/playbooks/node/change_hostname.yml new file mode 100644 index 0000000..57861b9 --- /dev/null +++ b/ansible/playbooks/node/change_hostname.yml @@ -0,0 +1,11 @@ +--- +- name: change hostname + hosts: test + become: true + gather_facts: false + roles: + - role: node/change_hostname + vars: + hostname: "dev-lviv01-vm-k8s-worker-01" + + # ansible-playbook playbooks/node/change_hostname.yml -i inventory-local.ini diff --git a/ansible/playbooks/node/execute_command.yml b/ansible/playbooks/node/execute_command.yml new file mode 100644 index 0000000..6f55068 --- /dev/null +++ b/ansible/playbooks/node/execute_command.yml @@ -0,0 +1,11 @@ +--- +- name: execute command + hosts: all + become: true + gather_facts: false + roles: + - role: node/execute_command + vars: + command: "{{ command }}" + + # ansible-playbook playbooks/node/execute_command.yml -i inventory.ini diff --git a/ansible/playbooks/node/push_dir.yml b/ansible/playbooks/node/push_dir.yml new file mode 100644 index 0000000..fe454f3 --- /dev/null +++ b/ansible/playbooks/node/push_dir.yml @@ -0,0 +1,11 @@ +--- +- name: push dir + hosts: all + become: true + roles: + - role: node/push_dir + vars: + resource_dir: "{{ resource_dir }}" + target_dir: "{{ target_dir }}" + + # ansible-playbook playbooks/node/push_dir.yml -i inventory.ini diff --git a/ansible/playbooks/node/remove_file.yml b/ansible/playbooks/node/remove_file.yml new file mode 100644 index 0000000..916143e --- /dev/null +++ b/ansible/playbooks/node/remove_file.yml @@ -0,0 +1,10 @@ +--- +- name: remove file + hosts: all + become: true + roles: + - role: node/remove_file + vars: + file_path: "{{ file_path }}" + + # ansible-playbook playbooks/node/remove_file.yml -i inventory.ini diff --git a/ansible/playbooks/node/remove_user.yml b/ansible/playbooks/node/remove_user.yml new file mode 100644 index 0000000..55f73ea --- /dev/null +++ b/ansible/playbooks/node/remove_user.yml @@ -0,0 +1,10 @@ +--- +- name: remove user + hosts: all + become: true + roles: + - role: node/remove_user + vars: + user_name: "{{ remove_user }}" + + # ansible-playbook playbooks/node/remove_user.yml -i inventory.ini diff --git a/ansible/playbooks/ntp/chrony/00_setup_edge_ntp_node.yml b/ansible/playbooks/ntp/chrony/00_setup_edge_ntp_node.yml new file mode 100644 index 0000000..ce8884d --- /dev/null +++ b/ansible/playbooks/ntp/chrony/00_setup_edge_ntp_node.yml @@ -0,0 +1,16 @@ +--- +- name: setup edge ntp node + hosts: + - dev-kyiv01-psy-proxmox-main-01 + become: true + roles: + - role: ntp/chrony + vars: + chrony_upstream_sources: + - ntp.time.in.ua + - time.google.com + - time.cloudflare.com + chrony_allow_networks: + - 192.168.0.0/24 + + # ansible-playbook playbooks/ntp/chrony/setup_edge_ntp_node.yml -i inventory.ini diff --git a/ansible/playbooks/ntp/chrony/01_setup_core_ntp_node.yml b/ansible/playbooks/ntp/chrony/01_setup_core_ntp_node.yml new file mode 100644 index 0000000..c8119b7 --- /dev/null +++ b/ansible/playbooks/ntp/chrony/01_setup_core_ntp_node.yml @@ -0,0 +1,14 @@ +--- +- name: setup core ntp node + hosts: + - dev-kyiv01-vm-ntp-main-01 + become: true + roles: + - role: ntp/chrony + vars: + chrony_upstream_sources: + - ntp-edge.infra.hran + chrony_allow_networks: + - 192.168.0.0/24 + + # ansible-playbook playbooks/ntp/chrony/setup_core_ntp_node.yml -i inventory.ini diff --git a/ansible/playbooks/ntp/chrony/02_setup_client_ntp_node.yml b/ansible/playbooks/ntp/chrony/02_setup_client_ntp_node.yml new file mode 100644 index 0000000..1f62eb7 --- /dev/null +++ b/ansible/playbooks/ntp/chrony/02_setup_client_ntp_node.yml @@ -0,0 +1,19 @@ +--- +- name: setup core ntp node + hosts: + - dev-kyiv01-vm-dns-main-01 + - dev-kyiv01-vm-ceph-main-01 + - dev-kyiv01-vm-ceph-main-02 + - dev-kyiv01-vm-ceph-main-03 + - dev-kyiv01-vm-k8s-master-01 + - dev-kyiv01-vm-k8s-worker-01 + become: true + roles: + - role: ntp/chrony + vars: + chrony_upstream_sources: + - ntp-core.infra.hran + chrony_allow_networks: + - 192.168.0.0/24 + + # ansible-playbook playbooks/ntp/chrony/setup_client_ntp_node.yml -i inventory.ini diff --git a/ansible/playbooks/packer/install.yml b/ansible/playbooks/packer/install.yml new file mode 100644 index 0000000..9c83426 --- /dev/null +++ b/ansible/playbooks/packer/install.yml @@ -0,0 +1,9 @@ +--- +- name: install packer + hosts: + - dev-kyiv01-lxc-packer-main-01 + become: true + roles: + - role: packer/install + + # ansible-playbook playbooks/packer/install.yml -i inventory.ini diff --git a/ansible/playbooks/packer/run.yml b/ansible/playbooks/packer/run.yml new file mode 100644 index 0000000..2c1499f --- /dev/null +++ b/ansible/playbooks/packer/run.yml @@ -0,0 +1,11 @@ +--- +- name: run packer + hosts: + - dev-kyiv01-lxc-packer-main-01 + become: true + roles: + - role: packer/run + vars: + packer_config_dir: "/opt/packer/proxmox/debian13" + + # ansible-playbook playbooks/packer/run.yml -i inventory.ini diff --git a/ansible/playbooks/proxmox/enable_snippets.yml b/ansible/playbooks/proxmox/enable_snippets.yml new file mode 100644 index 0000000..9d46a4d --- /dev/null +++ b/ansible/playbooks/proxmox/enable_snippets.yml @@ -0,0 +1,9 @@ +--- +- name: enable snippets + hosts: + - dev-kyiv01-psy-proxmox-main-01 + become: true + roles: + - role: proxmox/enable_snippets + + # ansible-playbook playbooks/proxmox/enable_snippets.yml -i inventory.ini diff --git a/ansible/playbooks/proxmox/lxc/download_template.yml b/ansible/playbooks/proxmox/lxc/download_template.yml new file mode 100644 index 0000000..76a2681 --- /dev/null +++ b/ansible/playbooks/proxmox/lxc/download_template.yml @@ -0,0 +1,11 @@ +--- +- name: download lxc template + hosts: + - dev-kyiv01-psy-proxmox-main-01 + become: true + roles: + - role: proxmox/lxc/download_template + vars: + lxc_template_name: "debian-12-standard_12.12-1_amd64.tar.zst" + + # ansible-playbook playbooks/proxmox/lxc/download_template.yml -i inventory.ini diff --git a/ansible/playbooks/proxmox/lxc/shutdown.yml b/ansible/playbooks/proxmox/lxc/shutdown.yml new file mode 100644 index 0000000..6d40df3 --- /dev/null +++ b/ansible/playbooks/proxmox/lxc/shutdown.yml @@ -0,0 +1,11 @@ +--- +- name: shutdown lxc container + hosts: + - dev-kyiv01-psy-proxmox-main-01 + become: true + roles: + - role: proxmox/lxc/shutdown + vars: + lxc_id: 200 + + # ansible-playbook playbooks/proxmox/lxc/shutdown.yml -i inventory.ini diff --git a/ansible/playbooks/proxmox/setup_proxmox_no_subscription_repository.yml b/ansible/playbooks/proxmox/setup_proxmox_no_subscription_repository.yml new file mode 100644 index 0000000..5d133f2 --- /dev/null +++ b/ansible/playbooks/proxmox/setup_proxmox_no_subscription_repository.yml @@ -0,0 +1,9 @@ +--- +- name: configure proxmox no-subscription repo + hosts: + - dev-kyiv01-psy-proxmox-main-01 + become: true + roles: + - proxmox/setup_no_subscription_repository + + # ansible-playbook playbooks/proxmox/setup_proxmox_no_subscription_repository.yml -i inventory.ini diff --git a/ansible/playbooks/proxmox/vm/download_iso.yml b/ansible/playbooks/proxmox/vm/download_iso.yml new file mode 100644 index 0000000..795bc4a --- /dev/null +++ b/ansible/playbooks/proxmox/vm/download_iso.yml @@ -0,0 +1,12 @@ +--- +- name: download vm iso + hosts: + - dev-kyiv01-psy-proxmox-main-01 + become: true + roles: + - role: proxmox/vm/download_iso + vars: + vm_iso_name: "debian-13.2.0-amd64-netinst.iso" + vm_iso_url: "https://cdimage.debian.org/debian-cd/current/amd64/iso-cd/{{ vm_iso_name }}" + + # ansible-playbook playbooks/proxmox/vm/download_iso.yml -i inventory.ini diff --git a/ansible/roles/ceph/00_install/tasks/main.yml b/ansible/roles/ceph/00_install/tasks/main.yml new file mode 100644 index 0000000..a40f82d --- /dev/null +++ b/ansible/roles/ceph/00_install/tasks/main.yml @@ -0,0 +1,43 @@ +--- +- name: apt update + ansible.builtin.apt: + update_cache: true + +- name: apt upgrade + ansible.builtin.apt: + upgrade: dist + +- name: install base packages + ansible.builtin.apt: + name: + - ca-certificates + - curl + - gnupg + - lvm2 + - podman + state: present + +- name: swapoff + ansible.builtin.command: swapoff -a + changed_when: true + +- name: comment swap in /etc/fstab + ansible.builtin.replace: + path: /etc/fstab + regexp: '^([^#].*\s+swap\s+.*)$' + replace: '# \1' + +- name: install cephadm and ceph-common + ansible.builtin.apt: + name: + - cephadm + - ceph-common + state: present + +- name: cephadm version + ansible.builtin.command: cephadm version + changed_when: false + +- name: ceph -v + ansible.builtin.command: ceph -v + changed_when: false diff --git a/ansible/roles/ceph/01_bootstrap/tasks/main.yml b/ansible/roles/ceph/01_bootstrap/tasks/main.yml new file mode 100644 index 0000000..24cb447 --- /dev/null +++ b/ansible/roles/ceph/01_bootstrap/tasks/main.yml @@ -0,0 +1,9 @@ +--- +- name: cephadm bootstrap + ansible.builtin.command: > + cephadm bootstrap + --mon-ip 192.168.0.102 + --initial-dashboard-user admin + --initial-dashboard-password password + --allow-fqdn-hostname + changed_when: true diff --git a/ansible/roles/ceph/02_share_pubkey/tasks/main.yml b/ansible/roles/ceph/02_share_pubkey/tasks/main.yml new file mode 100644 index 0000000..382ab97 --- /dev/null +++ b/ansible/roles/ceph/02_share_pubkey/tasks/main.yml @@ -0,0 +1,25 @@ +--- +- name: get cephadm pub key (run once on ceph01) + ansible.builtin.command: ceph cephadm get-pub-key + register: ceph_pubkey_cmd + changed_when: false + delegate_to: dev-kyiv01-vm-ceph-main-01 + run_once: true + +- name: set ceph pubkey fact for this play + ansible.builtin.set_fact: + ceph_pubkey: "{{ ceph_pubkey_cmd.stdout }}" + run_once: true + +- name: add ceph pub key to root authorized_keys + ansible.posix.authorized_key: + user: root + key: "{{ ceph_pubkey }}" + state: present + when: inventory_hostname in ["dev-kyiv01-vm-ceph-main-02", "dev-kyiv01-vm-ceph-main-03"] + +- name: restart ssh + ansible.builtin.service: + name: ssh + state: restarted + when: inventory_hostname in ["dev-kyiv01-vm-ceph-main-02", "dev-kyiv01-vm-ceph-main-03"] diff --git a/ansible/roles/ceph/03_setup_cluster/tasks/main.yml b/ansible/roles/ceph/03_setup_cluster/tasks/main.yml new file mode 100644 index 0000000..c7ab359 --- /dev/null +++ b/ansible/roles/ceph/03_setup_cluster/tasks/main.yml @@ -0,0 +1,40 @@ +--- +- name: add host ceph02 + ansible.builtin.command: > + ceph orch host add dev-kyiv01-vm-ceph-main-02 192.168.0.103 + changed_when: true + +- name: add host ceph03 + ansible.builtin.command: > + ceph orch host add dev-kyiv01-vm-ceph-main-03 192.168.0.104 + changed_when: true + +- name: add osd ceph01 sdb + ansible.builtin.command: > + ceph orch daemon add osd dev-kyiv01-vm-ceph-main-01:/dev/sdb + changed_when: true + +- name: add osd ceph01 sdc + ansible.builtin.command: > + ceph orch daemon add osd dev-kyiv01-vm-ceph-main-01:/dev/sdc + changed_when: true + +- name: add osd ceph02 sdb + ansible.builtin.command: > + ceph orch daemon add osd dev-kyiv01-vm-ceph-main-02:/dev/sdb + changed_when: true + +- name: add osd ceph02 sdc + ansible.builtin.command: > + ceph orch daemon add osd dev-kyiv01-vm-ceph-main-02:/dev/sdc + changed_when: true + +- name: add osd ceph03 sdb + ansible.builtin.command: > + ceph orch daemon add osd dev-kyiv01-vm-ceph-main-03:/dev/sdb + changed_when: true + +- name: add osd ceph03 sdc + ansible.builtin.command: > + ceph orch daemon add osd dev-kyiv01-vm-ceph-main-03:/dev/sdc + changed_when: true diff --git a/ansible/roles/ceph/04_setup_rgw/readme.md b/ansible/roles/ceph/04_setup_rgw/readme.md new file mode 100644 index 0000000..301f6fc --- /dev/null +++ b/ansible/roles/ceph/04_setup_rgw/readme.md @@ -0,0 +1,48 @@ +# "[1/8] Проверка кластера" +ceph -s +ceph orch status + +# "[2/8] Создаём realm/zonegroup/zone (если уже есть — будет ошибка; можно игнорировать или сначала проверить list)" +radosgw-admin realm create --rgw-realm=default --default || true +radosgw-admin zonegroup create --rgw-zonegroup=default --master --default || true +radosgw-admin zone create \ + --rgw-zonegroup=default \ + --rgw-zone=default \ + --master \ + --default || true + +# "[3/8] Коммит периода (обновляем конфиг мультисайта)" +radosgw-admin period update --commit + +# "[4/8] Проверка realm/zone" +radosgw-admin realm list +radosgw-admin zone list + +# "[5/8] Деплой RGW сервисом через cephadm/orchestrator" +ceph orch apply rgw default --placement="1" + +# "[6/8] Проверка что RGW поднялся" +ceph orch ls +ceph orch ps --service-name rgw.default +ss -lntp | grep -E 'rgw|civetweb|beast|7480|80|443' || true + +# "[7/8] Создаём admin (system) пользователя — ТОЛЬКО для ops" +# Важно: system user не для приложений, а для админских операций/автоматизации ops +radosgw-admin user create \ + --uid="admin" \ + --display-name="RGW Admin (system)" \ + --system || true + +# "[8/8] Создаём пользователя для Crunchy pgBackRest + бакет" +# Создаём отдельного юзера под pgBackRest +radosgw-admin user create \ + --uid="crunchy-backup" \ + --display-name="Crunchy pgBackRest" || true + +# Создаём бакет и назначаем владельца (uid должен существовать) +radosgw-admin bucket create \ + --bucket="crunchy-pgbackrest" \ + --uid="crunchy-backup" || true + +# "=== Итог: креды для Crunchy ===" +radosgw-admin user info --uid="crunchy-backup" diff --git a/ansible/roles/ceph/05_create_k8s_pool/readme.md b/ansible/roles/ceph/05_create_k8s_pool/readme.md new file mode 100644 index 0000000..d09f374 --- /dev/null +++ b/ansible/roles/ceph/05_create_k8s_pool/readme.md @@ -0,0 +1,28 @@ +```bash +ceph -s +ceph fsid +ceph mon dump | egrep 'mon\.' -n +ceph osd pool ls + +# создать pool (pg_num подбирай под размер кластера; для старта можно 64/128) +ceph osd pool create k8s-rbd 128 + +# включить application "rbd" (важно для CSI) +ceph osd pool application enable k8s-rbd rbd + +# (опционально) инициализировать rbd метаданные +rbd pool init k8s-rbd + +# (опционально) выставить репликацию size=3 (или как у тебя принято) +ceph osd pool set k8s-rbd size 3 + +ceph auth get-or-create client.k8s-rbd-csi \ + mon 'profile rbd' \ + osd "profile rbd pool=k8s-rbd" \ + mgr "profile rbd" + +# посмотреть ключ +ceph auth get client.k8s-rbd-csi + + +``` \ No newline at end of file diff --git a/ansible/roles/ceph/readme.md b/ansible/roles/ceph/readme.md new file mode 100644 index 0000000..fa59ddb --- /dev/null +++ b/ansible/roles/ceph/readme.md @@ -0,0 +1,85 @@ +# ЭТАП 0. Подготовка ОС (на всех Ceph-нодах) + +## обновление системы +apt update && apt upgrade -y + +## базовые пакеты (без chrony/dns/hosts) +apt install -y \ + ca-certificates \ + curl \ + gnupg \ + lvm2 \ + podman + +## отключаем swap (ОБЯЗАТЕЛЬНО для k8s; для Ceph не строго, но лучше сразу) +swapoff -a +sed -i '/ swap / s/^/#/' /etc/fstab + +## проверка дисков (убедись, что OSD диски пустые) +lsblk + +# ЭТАП 1. Установка Cephadm (на bootstrap-ноде и затем на всех нодах) + +apt install -y cephadm ceph-common +cephadm version +ceph -v + +# ЭТАП 2. Bootstrap кластера (только на первой ноде / mon) + +cephadm bootstrap \ + --mon-ip 192.168.0.102 \ + --initial-dashboard-user admin \ + --initial-dashboard-password password \ + --allow-fqdn-hostname + +ceph -s +ceph orch ps + +# ЭТАП 3. Добавляем остальные ноды в orchestrator + +ceph cephadm get-pub-key +systemctl restart ssh + +ceph orch host add dev-kyiv01-vm-ceph-main-02 192.168.0.103 +ceph orch host add dev-kyiv01-vm-ceph-main-03 192.168.0.104 + +ceph orch host ls + + +# ЭТАП 4. Добавляем OSD (на каждой ноде) + +## bootstrap-node (локальная) +ceph orch daemon add osd dev-kyiv01-vm-ceph-main-01:/dev/sdb +ceph orch daemon add osd dev-kyiv01-vm-ceph-main-01:/dev/sdc + +## vm-ceph-kyiv-02 +ceph orch daemon add osd dev-kyiv01-vm-ceph-main-02:/dev/sdb +ceph orch daemon add osd dev-kyiv01-vm-ceph-main-02:/dev/sdc + +## vm-ceph-kyiv-03 +ceph orch daemon add osd dev-kyiv01-vm-ceph-main-03:/dev/sdb +ceph orch daemon add osd dev-kyiv01-vm-ceph-main-03:/dev/sdc + +## Проверка: + +ceph osd tree +ceph -s + + +# ЭТАП 5. Пул под k8s RBD + +ceph osd pool create k8s-rbd 64 +ceph osd pool application enable k8s-rbd rbd + +ceph osd pool ls +ceph osd pool get k8s-rbd all + +## Мини-чеклист + +ceph -s +ceph orch host ls +ceph orch ps +ceph osd tree + +# Delete broken cluster +cephadm rm-cluster --force --fsid e3b4050a-e8be-11f0-84c2-027a4c119066 \ No newline at end of file diff --git a/ansible/roles/dns/push_powerdns_configs_to_node/readme.md b/ansible/roles/dns/push_powerdns_configs_to_node/readme.md new file mode 100644 index 0000000..d727354 --- /dev/null +++ b/ansible/roles/dns/push_powerdns_configs_to_node/readme.md @@ -0,0 +1,38 @@ +# example dns path in Debian13 +App → glibc resolver → /etc/resolv.conf (127.0.0.53) → systemd-resolved → 192.168.0.1 (Proxmox) + +# before role running +```bash +sudo systemctl disable --now systemd-resolved + +sudo rm -f /etc/resolv.conf +echo -e "nameserver 1.1.1.1\nnameserver 8.8.8.8" | sudo tee /etc/resolv.conf + +docker compose down +docker compose up -d +``` + +```bash +# pdns-auth web/api через nginx +curl -i -H 'Host: auth.infra.hran' http://127.0.0.1/ + +# recursor web/api через nginx +curl -i -H 'Host: recursor.infra.hran' http://127.0.0.1/ + +# dnsdist web через nginx +curl -i -H 'Host: dnsdist.infra.hran' http://127.0.0.1/ +curl -i -u 'admin:CHANGE_ME_DNSDIST_WEB_PASSWORD' -H 'Host: dnsdist.infra.hran' http://127.0.0.1/ + +# windows +C:\Windows\System32\drivers\etc\hosts + +127.0.0.1 auth.infra.hran +127.0.0.1 recursor.infra.hran +127.0.0.1 dnsdist.infra.hran:8084 + +# check from browser +http://dnsdist.infra.hran:8080/ +http://auth.infra.hran:8080/ +http://recursor.infra.hran:8080/ +``` + diff --git a/ansible/roles/dns/push_powerdns_configs_to_node/tasks/main.yml b/ansible/roles/dns/push_powerdns_configs_to_node/tasks/main.yml new file mode 100644 index 0000000..f3b1803 --- /dev/null +++ b/ansible/roles/dns/push_powerdns_configs_to_node/tasks/main.yml @@ -0,0 +1,37 @@ +- name: ensure directory structure exists + ansible.builtin.file: + path: "{{ item }}" + state: directory + owner: "root" + group: "root" + mode: "0755" + loop: + - "{{ dns_stack_root }}" + - "{{ dns_stack_root }}/postgres/initdb" + - "{{ dns_stack_root }}/pdns-auth" + - "{{ dns_stack_root }}/pdns-recursor" + - "{{ dns_stack_root }}/dnsdist" + - "{{ dns_stack_root }}/nginx" + +- name: render stack files + ansible.builtin.template: + src: "{{ item.src }}" + dest: "{{ dns_stack_root }}/{{ item.dest }}" + owner: "root" + group: "root" + mode: "0644" + loop: + - { src: "docker-compose.yml.j2", dest: "docker-compose.yml" } + - { src: ".env.j2", dest: ".env", mode: "0600" } + - { + src: "postgres/initdb/01-pdns-schema.sql.j2", + dest: "postgres/initdb/01-pdns-schema.sql", + } + - { src: "pdns-auth/pdns.conf.j2", dest: "pdns-auth/pdns.conf" } + - { + src: "pdns-recursor/recursor.conf.j2", + dest: "pdns-recursor/recursor.conf", + } + - { src: "dnsdist/dnsdist.conf.j2", dest: "dnsdist/dnsdist.conf" } + - { src: "nginx/nginx.conf.j2", dest: "nginx/nginx.conf" } + register: rendered diff --git a/ansible/roles/dns/push_powerdns_configs_to_node/templates/dnsdist/dnsdist.conf.j2 b/ansible/roles/dns/push_powerdns_configs_to_node/templates/dnsdist/dnsdist.conf.j2 new file mode 100644 index 0000000..b7074a0 --- /dev/null +++ b/ansible/roles/dns/push_powerdns_configs_to_node/templates/dnsdist/dnsdist.conf.j2 @@ -0,0 +1,41 @@ +addLocal("0.0.0.0:53") +addLocal("[::]:53") + +-- ACL для клиентов, которым вообще можно отвечать +addACL("127.0.0.0/8") -- localhost на IPv4 (машина сама себе). +addACL("10.0.0.0/8") -- приватные сети RFC1918 (часто VPN/корп сеть). +addACL("172.16.0.0/12") -- приватные 172.16–172.31 (сюда попадает и 172.30.x, docker-сеть). +addACL("192.168.0.0/16") -- типичная домашняя LAN. +addACL("::1/128") -- localhost на IPv6. +addACL("fc00::/7") -- IPv6 ULA (аналог приватных) +addACL("fe80::/10") --IPv6 link-local (адреса “на линке”, часто у интерфейса). + +newServer({ + address="172.30.0.11:5300", + pool="auth", + name="pdns-auth" +}) + +newServer({ + address="172.30.0.12:5301", + pool="recursor", + name="pdns-recursor" +}) + +-- Авторитативные зоны -> в pool auth, остальное -> recursor +local authZones = newSuffixMatchNode() +authZones:add("infra.hran.") + +pc = newPacketCache(100000, {maxTTL=86400, minTTL=0, temporaryFailureTTL=60}) +getPool("recursor"):setCache(pc) +getPool("auth"):setCache(pc) + +addAction(SuffixMatchNodeRule(authZones), PoolAction("auth")) +addAction(AllRule(), PoolAction("recursor")) + +webserver("0.0.0.0:8084") +setWebserverConfig({ + password="CHANGE_ME_DNSDIST_WEB_PASSWORD", + apiKey="CHANGE_ME_DNSDIST_KEY", + acl="127.0.0.0/8, 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16, ::1/128, fc00::/7, fe80::/10" +}) diff --git a/ansible/roles/dns/push_powerdns_configs_to_node/templates/docker-compose.yml.j2 b/ansible/roles/dns/push_powerdns_configs_to_node/templates/docker-compose.yml.j2 new file mode 100644 index 0000000..6d0f2aa --- /dev/null +++ b/ansible/roles/dns/push_powerdns_configs_to_node/templates/docker-compose.yml.j2 @@ -0,0 +1,142 @@ +services: + postgres: + image: postgres:16 + container_name: dnsstack-postgres + restart: unless-stopped + environment: + TZ: Europe/Kyiv + POSTGRES_DB: pdns + POSTGRES_USER: pdns + POSTGRES_PASSWORD: CHANGE_ME_POSTGRES_PASSWORD + volumes: + - /opt/dns-stack/postgres/data:/var/lib/postgresql/data + - ./postgres/initdb:/docker-entrypoint-initdb.d:ro + networks: + dnsnet: + ipv4_address: "172.30.0.10" + healthcheck: + test: ["CMD-SHELL", "pg_isready -U $${POSTGRES_USER} -d $${POSTGRES_DB} -h 127.0.0.1 -p 5432"] + interval: 2s + timeout: 3s + retries: 30 + start_period: 10s + logging: + driver: "json-file" + options: + tag: "dnsstack.postgres" + max-size: "20m" + max-file: "10" + + pdns-auth: + image: powerdns/pdns-auth-50:latest + container_name: dnsstack-pdns-auth + restart: unless-stopped + depends_on: + postgres: + condition: service_healthy + environment: + TZ: Europe/Kyiv + volumes: + - ./pdns-auth/pdns.conf:/etc/powerdns/pdns.conf:ro + networks: + dnsnet: + ipv4_address: "172.30.0.11" + expose: + - "5300" + - "8083" + ulimits: + nofile: + soft: 10064 + hard: 10064 + logging: + driver: "json-file" + options: + tag: "dnsstack.pdns-auth" + max-size: "20m" + max-file: "10" + + pdns-recursor: + image: powerdns/pdns-recursor-53:latest + container_name: dnsstack-pdns-recursor + restart: unless-stopped + environment: + TZ: Europe/Kyiv + volumes: + - ./pdns-recursor/recursor.conf:/etc/powerdns/recursor.conf:ro + networks: + dnsnet: + ipv4_address: "172.30.0.12" + expose: + - "5301" + - "8082" + ulimits: + nofile: + soft: 10064 + hard: 10064 + logging: + driver: "json-file" + options: + tag: "dnsstack.pdns-recursor" + max-size: "20m" + max-file: "10" + + dnsdist: + image: powerdns/dnsdist-20:latest + container_name: dnsstack-dnsdist + restart: unless-stopped + depends_on: + - pdns-auth + - pdns-recursor + environment: + TZ: Europe/Kyiv + volumes: + - ./dnsdist/dnsdist.conf:/etc/dnsdist/dnsdist.conf:ro + networks: + dnsnet: + ipv4_address: "172.30.0.2" + ports: + - "53:53/udp" + - "53:53/tcp" + expose: + - "8084" + ulimits: + nofile: + soft: 65535 + hard: 65535 + logging: + driver: "json-file" + options: + tag: "dnsstack.dnsdist" + max-size: "50m" + max-file: "10" + + nginx: + image: nginx:1.27-alpine + container_name: dnsstack-nginx + restart: unless-stopped + depends_on: + - pdns-auth + - pdns-recursor + - dnsdist + environment: + TZ: Europe/Kyiv + volumes: + - ./nginx/nginx.conf:/etc/nginx/nginx.conf:ro + networks: + dnsnet: + ipv4_address: "172.30.0.3" + ports: + - "80:80/tcp" + logging: + driver: "json-file" + options: + tag: "dnsstack.nginx" + max-size: "20m" + max-file: "10" + +networks: + dnsnet: + driver: bridge + ipam: + config: + - subnet: "172.30.0.0/24" diff --git a/ansible/roles/dns/push_powerdns_configs_to_node/templates/nginx/nginx.conf.j2 b/ansible/roles/dns/push_powerdns_configs_to_node/templates/nginx/nginx.conf.j2 new file mode 100644 index 0000000..2407301 --- /dev/null +++ b/ansible/roles/dns/push_powerdns_configs_to_node/templates/nginx/nginx.conf.j2 @@ -0,0 +1,53 @@ +worker_processes auto; + +events { worker_connections 1024; } + +http { + access_log /var/log/nginx/access.log; + error_log /var/log/nginx/error.log warn; + + # auth.infra.hran -> pdns-auth:8083 + server { + listen 80; + server_name auth.infra.hran; + + location / { + proxy_http_version 1.1; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_pass http://pdns-auth:8083; + } + } + + # recursor.infra.hran -> pdns-recursor:8082 + server { + listen 80; + server_name recursor.infra.hran; + + location / { + proxy_http_version 1.1; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_pass http://pdns-recursor:8082; + } + } + + # dnsdist.infra.hran -> dnsdist:8084 + server { + listen 80; + server_name dnsdist.infra.hran; + + location / { + proxy_http_version 1.1; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_pass http://dnsdist:8084; + } + } +} diff --git a/ansible/roles/dns/push_powerdns_configs_to_node/templates/pdns-auth/pdns.conf.j2 b/ansible/roles/dns/push_powerdns_configs_to_node/templates/pdns-auth/pdns.conf.j2 new file mode 100644 index 0000000..7f00088 --- /dev/null +++ b/ansible/roles/dns/push_powerdns_configs_to_node/templates/pdns-auth/pdns.conf.j2 @@ -0,0 +1,21 @@ +local-address=0.0.0.0,:: +local-port=5300 + +launch=gpgsql +gpgsql-host=postgres +gpgsql-port=5432 +gpgsql-dbname=pdns +gpgsql-user=pdns +gpgsql-password=CHANGE_ME_POSTGRES_PASSWORD + +api=yes +api-key=CHANGE_ME_PDNS_API_KEY + +webserver=yes +webserver-address=0.0.0.0 +webserver-port=8083 +webserver-allow-from=127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16 + +disable-axfr=yes +version-string=anonymous +loglevel=4 diff --git a/ansible/roles/dns/push_powerdns_configs_to_node/templates/pdns-recursor/recursor.conf.j2 b/ansible/roles/dns/push_powerdns_configs_to_node/templates/pdns-recursor/recursor.conf.j2 new file mode 100644 index 0000000..5486675 --- /dev/null +++ b/ansible/roles/dns/push_powerdns_configs_to_node/templates/pdns-recursor/recursor.conf.j2 @@ -0,0 +1,46 @@ +# PowerDNS Recursor 5.1+ YAML config + +incoming: + listen: + - "0.0.0.0:5301" + - "[::]:5301" + allow_from: + - "127.0.0.0/8" + - "10.0.0.0/8" + - "172.16.0.0/12" + - "192.168.0.0/16" + - "::1/128" + - "fc00::/7" + - "fe80::/10" + +outgoing: + source_address: + - "0.0.0.0" + - "::" + +webservice: + webserver: true + address: "0.0.0.0" + port: 8082 + api_key: "CHANGE_ME_RECURSOR_API_KEY" + allow_from: + - "127.0.0.0/8" + - "10.0.0.0/8" + - "172.16.0.0/12" + - "192.168.0.0/16" + - "::1/128" + - "fc00::/7" + - "fe80::/10" + +logging: + loglevel: 6 + quiet: false + +recursor: + version_string: "anonymous" + + forward_zones_recurse: + - zone: "." + forwarders: + - "1.1.1.1" + - "8.8.8.8" diff --git a/ansible/roles/dns/push_powerdns_configs_to_node/templates/postgres/initdb/01-pdns-schema.sql.j2 b/ansible/roles/dns/push_powerdns_configs_to_node/templates/postgres/initdb/01-pdns-schema.sql.j2 new file mode 100644 index 0000000..6c4ef0d --- /dev/null +++ b/ansible/roles/dns/push_powerdns_configs_to_node/templates/postgres/initdb/01-pdns-schema.sql.j2 @@ -0,0 +1,103 @@ +-- PowerDNS Generic PostgreSQL schema (gpgsql) +-- Source: PowerDNS pdns/modules/gpgsqlbackend/schema.pgsql.sql + +CREATE TABLE domains ( + id SERIAL PRIMARY KEY, + name VARCHAR(255) NOT NULL, + master VARCHAR(128) DEFAULT NULL, + last_check INT DEFAULT NULL, + type TEXT NOT NULL, + notified_serial INT DEFAULT NULL, + account VARCHAR(40) DEFAULT NULL, + options TEXT DEFAULT NULL, + catalog VARCHAR(255) DEFAULT NULL +); + +CREATE UNIQUE INDEX name_index ON domains(name); +CREATE INDEX catalog_idx ON domains(catalog); + +CREATE TABLE records ( + id BIGSERIAL PRIMARY KEY, + domain_id INT DEFAULT NULL, + name VARCHAR(255) DEFAULT NULL, + type VARCHAR(10) DEFAULT NULL, + content VARCHAR(65535) DEFAULT NULL, + ttl INT DEFAULT NULL, + prio INT DEFAULT NULL, + disabled BOOL DEFAULT 'f', + ordername VARCHAR(255), + auth BOOL DEFAULT 't' +); + +CREATE INDEX rec_name_index ON records(name); +CREATE INDEX nametype_index ON records(name, type); +CREATE INDEX domain_id ON records(domain_id); +CREATE INDEX ordername ON records(ordername); + +CREATE TABLE supermasters ( + ip INET NOT NULL, + nameserver VARCHAR(255) NOT NULL, + account VARCHAR(40) NOT NULL, + PRIMARY KEY (ip, nameserver) +); + +CREATE TABLE comments ( + id SERIAL PRIMARY KEY, + domain_id INT NOT NULL, + name VARCHAR(255) NOT NULL, + type VARCHAR(10) NOT NULL, + modified_at INT NOT NULL, + account VARCHAR(40) DEFAULT NULL, + comment VARCHAR(65535) NOT NULL +); + +CREATE INDEX comments_domain_id_idx ON comments(domain_id); +CREATE INDEX comments_name_type_idx ON comments(name, type); +CREATE INDEX comments_order_idx ON comments(domain_id, modified_at); + +CREATE TABLE domainmetadata ( + id SERIAL PRIMARY KEY, + domain_id INT NOT NULL, + kind VARCHAR(32), + content TEXT +); + +CREATE INDEX domainmetadata_idx ON domainmetadata(domain_id, kind); + +CREATE TABLE cryptokeys ( + id SERIAL PRIMARY KEY, + domain_id INT NOT NULL, + flags INT NOT NULL, + active BOOL, + published BOOL DEFAULT TRUE, + content TEXT +); + +CREATE INDEX domainidindex ON cryptokeys(domain_id); + +CREATE TABLE tsigkeys ( + id SERIAL PRIMARY KEY, + name VARCHAR(255), + algorithm VARCHAR(50), + secret VARCHAR(255) +); + +CREATE UNIQUE INDEX namealgoindex ON tsigkeys(name, algorithm); + +CREATE TABLE luarecords ( + id SERIAL PRIMARY KEY, + domain_id INT NOT NULL, + name VARCHAR(255) NOT NULL, + type VARCHAR(10) NOT NULL, + content VARCHAR(65535) NOT NULL, + ttl INT NOT NULL, + prio INT DEFAULT NULL, + disabled BOOL DEFAULT 'f', + ordername VARCHAR(255), + auth BOOL DEFAULT 't' +); + +CREATE INDEX luarecord_name_index ON luarecords(name); +CREATE INDEX luarecord_nametype_index ON luarecords(name, type); +CREATE INDEX luarecord_domain_id ON luarecords(domain_id); +CREATE INDEX luarecord_ordername ON luarecords(ordername); diff --git a/ansible/roles/dns/setup_systemd_resolved_config/handlers/main.yml b/ansible/roles/dns/setup_systemd_resolved_config/handlers/main.yml new file mode 100644 index 0000000..d7476f0 --- /dev/null +++ b/ansible/roles/dns/setup_systemd_resolved_config/handlers/main.yml @@ -0,0 +1,9 @@ +--- +- name: restart dhcpcd + ansible.builtin.shell: | + set -euo pipefail + dhcpcd -k eth0 || true + sleep 1 + dhcpcd -f /etc/dhcpcd.conf eth0 + args: + executable: /bin/bash diff --git a/ansible/roles/dns/setup_systemd_resolved_config/readme.md b/ansible/roles/dns/setup_systemd_resolved_config/readme.md new file mode 100644 index 0000000..fa24356 --- /dev/null +++ b/ansible/roles/dns/setup_systemd_resolved_config/readme.md @@ -0,0 +1,4 @@ +```bash +cat /etc/resolv.conf +getent hosts ntp-edge.infra.hran +``` \ No newline at end of file diff --git a/ansible/roles/dns/setup_systemd_resolved_config/tasks/main.yml b/ansible/roles/dns/setup_systemd_resolved_config/tasks/main.yml new file mode 100644 index 0000000..5374c4d --- /dev/null +++ b/ansible/roles/dns/setup_systemd_resolved_config/tasks/main.yml @@ -0,0 +1,9 @@ +--- +- name: render dhcpcd.conf (DNS override) + ansible.builtin.template: + src: dhcpcd.conf.j2 + dest: /etc/dhcpcd.conf + owner: root + group: root + mode: "0644" + notify: restart dhcpcd diff --git a/ansible/roles/dns/setup_systemd_resolved_config/templates/dhcpcd.conf.j2 b/ansible/roles/dns/setup_systemd_resolved_config/templates/dhcpcd.conf.j2 new file mode 100644 index 0000000..ca9f908 --- /dev/null +++ b/ansible/roles/dns/setup_systemd_resolved_config/templates/dhcpcd.conf.j2 @@ -0,0 +1,45 @@ +# A sample configuration for dhcpcd. +# See dhcpcd.conf(5) for details. + +# Allow users of this group to interact with dhcpcd via the control socket. +#controlgroup wheel + +# Inform the DHCP server of our hostname for DDNS. +hostname + +# Use the hardware address of the interface for the Client ID. +#clientid +# or +# Use the same DUID + IAID as set in DHCPv6 for DHCPv4 ClientID as per RFC4361. +# Some non-RFC compliant DHCP servers do not reply with this set. +# In this case, comment out duid and enable clientid above. +duid + +# Persist interface configuration when dhcpcd exits. +persistent + +# vendorclassid is set to blank to avoid sending the default of +# dhcpcd-::: +vendorclassid + +# A list of options to request from the DHCP server. +option domain_name_servers, domain_name, domain_search +option classless_static_routes +# Respect the network MTU. This is applied to DHCP routes. +option interface_mtu + +# Request a hostname from the network +option host_name + +# Most distributions have NTP support. +#option ntp_servers + +# A ServerID is required by RFC2131. +require dhcp_server_identifier + +# Generate SLAAC address using the Hardware Address of the interface +#slaac hwaddr +# OR generate Stable Private IPv6 Addresses based from the DUID +slaac private + +static domain_name_servers=192.168.0.100 1.1.1.1 8.8.8.8 diff --git a/ansible/roles/docker/handlers/main.yml b/ansible/roles/docker/handlers/main.yml new file mode 100644 index 0000000..ff6a2c5 --- /dev/null +++ b/ansible/roles/docker/handlers/main.yml @@ -0,0 +1,4 @@ +--- +- name: update apt cache + apt: + update_cache: yes diff --git a/ansible/roles/docker/tasks/main.yml b/ansible/roles/docker/tasks/main.yml new file mode 100644 index 0000000..93dadd8 --- /dev/null +++ b/ansible/roles/docker/tasks/main.yml @@ -0,0 +1,74 @@ +--- +# 1) Чистим потенциально битый repo-файл (как у тебя было) +- name: remove broken docker repo if exists + file: + path: /etc/apt/sources.list.d/docker.list + state: absent + +# 2) Минимум нужных пакетов +- name: install prerequisites + apt: + name: + - ca-certificates + - curl + - gnupg + state: present + update_cache: yes + +# 3) Keyring + ключ +- name: ensure keyrings dir exists + file: + path: /etc/apt/keyrings + state: directory + mode: "0755" + +- name: download docker GPG key + get_url: + url: https://download.docker.com/linux/debian/gpg + dest: /etc/apt/keyrings/docker.gpg + mode: "0644" + +# 4) Repo (архитектура через ansible_architecture -> amd64) +- name: add docker apt repository + copy: + dest: /etc/apt/sources.list.d/docker.list + content: | + deb [arch={{ 'amd64' if ansible_architecture in ['x86_64','amd64'] else ansible_architecture }} signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian {{ ansible_lsb.codename }} stable + +# 5) Пробуем поставить containerd.io, перебирая версии (и сразу держим) +- name: install first working containerd.io (skip broken versions) and hold + shell: | + set -euo pipefail + apt-get update + mapfile -t versions < <(apt-cache madison containerd.io | awk '{print $3}' | sort -V | tac) + + for v in "${versions[@]}"; do + echo "Trying containerd.io=$v" + if apt-get install -y "containerd.io=$v"; then + apt-mark hold containerd.io + exit 0 + fi + done + + echo "No working containerd.io version found in repo" + exit 1 + args: + executable: /bin/bash + changed_when: true + +# 6) Docker пакеты (containerd.io уже стоит/held) +- name: install docker packages + apt: + name: + - docker-ce + - docker-ce-cli + - docker-buildx-plugin + - docker-compose-plugin + state: present + update_cache: yes + +- name: enable & start docker service + service: + name: docker + state: started + enabled: yes diff --git a/ansible/roles/gitea/README.md b/ansible/roles/gitea/README.md new file mode 100644 index 0000000..c1306e2 --- /dev/null +++ b/ansible/roles/gitea/README.md @@ -0,0 +1,109 @@ +# Gitea Setup Notes + +## 1️⃣ Добавление HTTPS сертификата (Let's Encrypt + Nginx) + +### Установка certbot +ставим certbot на хост (НЕ в контейнер) + +``` bash +sudo apt update +sudo apt install certbot python3-certbot-nginx -y +``` + +### Базовый nginx конфиг (HTTP → прокси в Gitea) + +Файл: `./nginx/nginx.conf` + +``` nginx +server { + listen 80; + server_name gitea.quietblock.net; + + location / { + proxy_pass http://gitea:3000; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } +} +``` + +### Получение сертификата + +``` bash +sudo certbot certonly --standalone -d gitea.quietblock.net +``` + +Запрашивает SSL сертификат для домена через standalone режим. + +После успеха сертификаты будут: + + /etc/letsencrypt/live/gitea.quietblock.net/fullchain.pem + /etc/letsencrypt/live/gitea.quietblock.net/privkey.pem + +### Docker nginx сервис + +``` yaml +nginx: + image: nginx:stable + container_name: nginx + restart: always + + ports: + - "80:80" + - "443:443" + + volumes: + - ./nginx:/etc/nginx/conf.d + - /etc/letsencrypt:/etc/letsencrypt:ro + + depends_on: + - gitea +``` + +### Финальный nginx конфиг (HTTP → HTTPS + SSL) + +``` nginx +server { + listen 80; + server_name gitea.quietblock.net; + return 301 https://$host$request_uri; +} + +server { + listen 443 ssl; + server_name gitea.quietblock.net; + + ssl_certificate /etc/letsencrypt/live/gitea.quietblock.net/fullchain.pem; + ssl_certificate_key /etc/letsencrypt/live/gitea.quietblock.net/privkey.pem; + + location / { + proxy_pass http://gitea:3000; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-Proto https; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } +} +``` + +Что происходит: - HTTP редиректится на HTTPS - nginx использует SSL +сертификаты - HTTPS проксируется в контейнер gitea + +------------------------------------------------------------------------ + +## 2️⃣ Создание администратора в Gitea + +### Зайти внутрь контейнера + +``` bash +docker exec -it --user git gitea /bin/bash +``` + +Открывает shell внутри контейнера gitea от пользователя git. + +### Создать администратора + +``` bash +gitea admin user create --username adminuser --password 14881488 --email you@mail.com --admin +``` diff --git a/ansible/roles/gitea/tasks/main.yml b/ansible/roles/gitea/tasks/main.yml new file mode 100644 index 0000000..227487e --- /dev/null +++ b/ansible/roles/gitea/tasks/main.yml @@ -0,0 +1,23 @@ +- name: ensure directory structure exists + ansible.builtin.file: + path: "{{ item }}" + state: directory + owner: "root" + group: "root" + mode: "0755" + loop: + - "/opt/gitea" + - "/opt/gitea/nginx" + +- name: render stack files + ansible.builtin.template: + src: "{{ item.src }}" + dest: "/opt/gitea/{{ item.dest }}" + owner: "root" + group: "root" + mode: "0644" + loop: + - { src: "docker-compose.yml.j2", dest: "docker-compose.yml" } + - { src: ".env.j2", dest: ".env", mode: "0600" } + - { src: "nginx/nginx.conf.j2", dest: "nginx/nginx.conf" } + register: rendered diff --git a/ansible/roles/gitea/templates/docker-compose.yml.j2 b/ansible/roles/gitea/templates/docker-compose.yml.j2 new file mode 100644 index 0000000..b1187fa --- /dev/null +++ b/ansible/roles/gitea/templates/docker-compose.yml.j2 @@ -0,0 +1,78 @@ +version: "3.9" + +services: + postgres: + image: postgres:15 + container_name: postgres + restart: always + + environment: + POSTGRES_DB: ${POSTGRES_DB} + POSTGRES_USER: ${POSTGRES_USER} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + + volumes: + - ./data/postgres:/var/lib/postgresql/data + + networks: + - gitea_net + + gitea: + image: gitea/gitea:latest + container_name: gitea + restart: always + + environment: + - USER_UID=1000 + - USER_GID=1000 + + # DB + - GITEA__database__DB_TYPE=postgres + - GITEA__database__HOST=postgres:5432 + - GITEA__database__NAME=${POSTGRES_DB} + - GITEA__database__USER=${POSTGRES_USER} + - GITEA__database__PASSWD=${POSTGRES_PASSWORD} + + # basic + - GITEA__server__DOMAIN=${GITEA_URL} + - GITEA__server__ROOT_URL=https://${GITEA_URL}/ + - GITEA__server__SSH_DOMAIN=${GITEA_URL} + - GITEA__server__HTTP_PORT=3000 + - GITEA__server__SSH_PORT=2222 + + # security + - GITEA__security__INSTALL_LOCK=true + - GITEA__service__DISABLE_REGISTRATION=true + + volumes: + - ./data/gitea:/data + - /etc/timezone:/etc/timezone:ro + - /etc/localtime:/etc/localtime:ro + + depends_on: + - postgres + + networks: + - gitea_net + + nginx: + image: nginx:stable + container_name: nginx + restart: always + + ports: + - "80:80" + - "443:443" + + volumes: + - ./nginx:/etc/nginx/conf.d + - /etc/letsencrypt:/etc/letsencrypt:ro + + depends_on: + - gitea + + networks: + - gitea_net + +networks: + gitea_net: diff --git a/ansible/roles/gitea/templates/nginx/nginx.conf.j2 b/ansible/roles/gitea/templates/nginx/nginx.conf.j2 new file mode 100644 index 0000000..77f41ce --- /dev/null +++ b/ansible/roles/gitea/templates/nginx/nginx.conf.j2 @@ -0,0 +1,23 @@ +server { + listen 80; + server_name gitea.quietblock.net; + return 301 https://$host$request_uri; +} + +server { + listen 443 ssl; + server_name gitea.quietblock.net; + + ssl_certificate /etc/letsencrypt/live/gitea.quietblock.net/fullchain.pem; + ssl_certificate_key /etc/letsencrypt/live/gitea.quietblock.net/privkey.pem; + + location / { + proxy_pass http://gitea:3000; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-Proto https; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + + client_max_body_size 50M; +} diff --git a/ansible/roles/harden/fail2ban/handlers/main.yml b/ansible/roles/harden/fail2ban/handlers/main.yml new file mode 100644 index 0000000..902e346 --- /dev/null +++ b/ansible/roles/harden/fail2ban/handlers/main.yml @@ -0,0 +1,14 @@ +--- +- name: validate fail2ban config + listen: "validate and restart fail2ban" + become: true + ansible.builtin.command: fail2ban-client -t + register: f2b_validate + changed_when: false + +- name: restart fail2ban + listen: "validate and restart fail2ban" + become: true + ansible.builtin.systemd: + name: fail2ban + state: restarted diff --git a/ansible/roles/harden/fail2ban/tasks/main.yml b/ansible/roles/harden/fail2ban/tasks/main.yml new file mode 100644 index 0000000..157fd22 --- /dev/null +++ b/ansible/roles/harden/fail2ban/tasks/main.yml @@ -0,0 +1,58 @@ +--- +- name: install fail2ban + deps + ansible.builtin.apt: + name: + - fail2ban + - python3 + - python3-systemd + - nftables + state: present + update_cache: true + become: true + +- name: enable & start nftables + ansible.builtin.systemd: + name: nftables + enabled: true + state: started + become: true + +- name: ensure fail2ban directories exist + ansible.builtin.file: + path: "{{ item }}" + state: directory + owner: root + group: root + mode: "0755" + loop: + - /etc/fail2ban + - /etc/fail2ban/jail.d + - /etc/fail2ban/filter.d + become: true + +- name: deploy /etc/fail2ban/fail2ban.local + ansible.builtin.template: + src: fail2ban.local.j2 + dest: /etc/fail2ban/fail2ban.local + owner: root + group: root + mode: "0644" + notify: validate and restart fail2ban + become: true + +- name: deploy /etc/fail2ban/jail.local + ansible.builtin.template: + src: jail.local.j2 + dest: /etc/fail2ban/jail.local + owner: root + group: root + mode: "0644" + notify: validate and restart fail2ban + become: true + +- name: ensure fail2ban enabled and started + ansible.builtin.systemd: + name: fail2ban + enabled: true + state: started + become: true diff --git a/ansible/roles/harden/fail2ban/templates/fail2ban.local.j2 b/ansible/roles/harden/fail2ban/templates/fail2ban.local.j2 new file mode 100644 index 0000000..be364af --- /dev/null +++ b/ansible/roles/harden/fail2ban/templates/fail2ban.local.j2 @@ -0,0 +1,6 @@ +[Definition] +loglevel = INFO +logtarget = /var/log/fail2ban.log +socket = /run/fail2ban/fail2ban.sock +pidfile = /run/fail2ban/fail2ban.pid +dbpurgeage = 86400 diff --git a/ansible/roles/harden/fail2ban/templates/jail.local.j2 b/ansible/roles/harden/fail2ban/templates/jail.local.j2 new file mode 100644 index 0000000..8bf2bb4 --- /dev/null +++ b/ansible/roles/harden/fail2ban/templates/jail.local.j2 @@ -0,0 +1,18 @@ +[DEFAULT] +ignoreip = 127.0.0.1/8 ::1 + +findtime = 600 +maxretry = 5 +bantime = 1h + +backend = systemd +banaction = nftables[type=multiport] + +[sshd] +enabled = true +port = 25105 +filter = sshd +maxretry = 5 +findtime = 600 +bantime = 1h +mode = aggressive diff --git a/ansible/roles/harden/nftables/handlers/main.yml b/ansible/roles/harden/nftables/handlers/main.yml new file mode 100644 index 0000000..0048f09 --- /dev/null +++ b/ansible/roles/harden/nftables/handlers/main.yml @@ -0,0 +1,12 @@ +--- +- name: validate nftables config + ansible.builtin.command: + cmd: nft -c -f /etc/nftables.conf + listen: apply nftables + changed_when: false + +- name: reload nftables + ansible.builtin.systemd: + name: nftables + state: reloaded + listen: apply nftables diff --git a/ansible/roles/harden/nftables/tasks/main.yml b/ansible/roles/harden/nftables/tasks/main.yml new file mode 100644 index 0000000..6968cee --- /dev/null +++ b/ansible/roles/harden/nftables/tasks/main.yml @@ -0,0 +1,22 @@ +--- +- name: install nftables + ansible.builtin.apt: + name: nftables + state: present + update_cache: true + notify: apply nftables + +- name: deploy nftables config + ansible.builtin.template: + src: "{{ nftables_conf_name }}" + dest: /etc/nftables.conf + owner: root + group: root + mode: "0644" + notify: apply nftables + +- name: enable and start nftables service + ansible.builtin.systemd: + name: nftables + enabled: true + state: started diff --git a/ansible/roles/harden/nftables/templates/proxmox-nftables.j2 b/ansible/roles/harden/nftables/templates/proxmox-nftables.j2 new file mode 100644 index 0000000..92263bb --- /dev/null +++ b/ansible/roles/harden/nftables/templates/proxmox-nftables.j2 @@ -0,0 +1,36 @@ +#!/usr/sbin/nft -f + +flush ruleset + +table inet filter { + chain input { + type filter hook input priority 0; + policy drop; + + iif "lo" accept + ct state established,related accept + + # SSH + tcp dport {{ ssh_port }} accept + + # ICMP + ip protocol icmp accept + ip6 nexthdr icmpv6 accept + + # Proxmox Web/API (LAN only) + ip saddr 192.168.0.0/24 tcp dport 8006 accept + + # NTP + ip saddr 192.168.0.0/24 udp dport {{ ntp_port }} accept + } + + chain forward { + type filter hook forward priority 0; + policy drop; + } + + chain output { + type filter hook output priority 0; + policy accept; + } +} diff --git a/ansible/roles/harden/nftables/templates/vm-nftables.conf.j2 b/ansible/roles/harden/nftables/templates/vm-nftables.conf.j2 new file mode 100644 index 0000000..b925be3 --- /dev/null +++ b/ansible/roles/harden/nftables/templates/vm-nftables.conf.j2 @@ -0,0 +1,32 @@ +#!/usr/sbin/nft -f + +flush ruleset + +table inet filter { + chain input { + type filter hook input priority 0; + policy drop; + + iif "lo" accept + ct state established,related accept + + # SSH + tcp dport {{ ssh_port }} accept + + # udp dport {{ ntp_port }} accept + + # ICMP + ip protocol icmp accept + ip6 nexthdr icmpv6 accept + } + + chain forward { + type filter hook forward priority 0; + policy drop; + } + + chain output { + type filter hook output priority 0; + policy accept; + } +} diff --git a/ansible/roles/harden/sshd_config/tasks/main.yml b/ansible/roles/harden/sshd_config/tasks/main.yml new file mode 100644 index 0000000..35d89bc --- /dev/null +++ b/ansible/roles/harden/sshd_config/tasks/main.yml @@ -0,0 +1,25 @@ +--- +- name: ensure sshd_config.d directory exists + become: true + file: + path: "/etc/ssh/sshd_config.d" + state: directory + owner: root + group: root + mode: "0755" + +- name: deploy sshd config file + become: true + template: + src: "00-sshd_config-hardening.conf.j2" + dest: "/etc/ssh/sshd_config.d/00-sshd_config-hardening.conf" + owner: root + group: root + mode: "0644" + validate: "sshd -t -f %s" + +- name: restart SSH service + become: true + service: + name: ssh + state: restarted diff --git a/ansible/roles/harden/sshd_config/templates/00-sshd_config-hardening.conf.j2 b/ansible/roles/harden/sshd_config/templates/00-sshd_config-hardening.conf.j2 new file mode 100644 index 0000000..8b7717a --- /dev/null +++ b/ansible/roles/harden/sshd_config/templates/00-sshd_config-hardening.conf.j2 @@ -0,0 +1,107 @@ +# --- MAIN --- + +# Change default port 22 → {{ ssh_port }} (reduces noise from scanners) +Port {{ ssh_port }} + +# Optionally limit interfaces (default is all) +# ListenAddress 0.0.0.0 # IPv4 +# ListenAddress :: # IPv6 + +# Allow only SSH protocol version 2 (v1 is insecure) +Protocol 2 + + +# --- AUTHENTICATION --- + +# Disable root login (only via sudo) +PermitRootLogin prohibit-password + +# Disable password login (keys only) +PasswordAuthentication no + +# Disable interactive keyboard auth (OTP, TOTP, etc.) +KbdInteractiveAuthentication no + +# Disable challenge-response auth (legacy) +ChallengeResponseAuthentication no + +# Enable public key authentication (main method) +PubkeyAuthentication yes + + +# --- ACCESS --- + +# Allow only specific user +# AllowUsers adminuser +# Or alternatively allow a group: +# AllowGroups sshusers + + +# --- FUNCTION RESTRICTIONS --- + +# Disallow empty passwords +PermitEmptyPasswords no + +# Disallow user environment modification (~/.ssh/environment) +PermitUserEnvironment no + +# Disable X11 forwarding (no GUI sessions) +X11Forwarding no + +# Disable TCP forwarding (no tunnels) +AllowTcpForwarding yes + +# Disable gateway ports (no external binding) +GatewayPorts no + +# Disable VPN tunnels via SSH +PermitTunnel no + +# Disable SSH agent forwarding +AllowAgentForwarding yes + + +# --- ANTI-BRUTEFORCE & STABILITY --- + +# Login timeout (20 seconds) +LoginGraceTime 20 + +# Max 3 auth attempts per connection +MaxAuthTries 3 + +# Limit simultaneous connections +# Allow 10 new, start dropping at 30, max 60 queued +MaxStartups 10:30:60 + + +# --- SESSION ACTIVITY --- + +# Ping client every 300s (5 minutes) +ClientAliveInterval 300 + +# Disconnect if no response twice +ClientAliveCountMax 2 + +# Disable TCP keepalive +TCPKeepAlive no + +# Skip DNS checks for faster login +UseDNS no + + +# --- SFTP --- + +# Use internal SFTP subsystem +Subsystem sftp internal-sftp + + +# --- CRYPTOGRAPHY (optional) --- + +# Modern key exchange algorithms (if supported) +# KexAlgorithms sntrup761x25519-sha512@openssh.com,curve25519-sha256 + +# Modern ciphers +# Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes256-ctr + +# Modern MAC algorithms +# MACs umac-128-etm@openssh.com,hmac-sha2-256-etm@openssh.com diff --git a/ansible/roles/harden/unattended_upgrades/handlers/main.yml b/ansible/roles/harden/unattended_upgrades/handlers/main.yml new file mode 100644 index 0000000..385b736 --- /dev/null +++ b/ansible/roles/harden/unattended_upgrades/handlers/main.yml @@ -0,0 +1,15 @@ +--- +- name: restart unattended-upgrades + ansible.builtin.service: + name: unattended-upgrades + state: restarted + enabled: true + +- name: restart apt timers + ansible.builtin.systemd: + name: "{{ item }}" + state: restarted + enabled: true + loop: + - apt-daily.timer + - apt-daily-upgrade.timer diff --git a/ansible/roles/harden/unattended_upgrades/readme.md b/ansible/roles/harden/unattended_upgrades/readme.md new file mode 100644 index 0000000..32e92ff --- /dev/null +++ b/ansible/roles/harden/unattended_upgrades/readme.md @@ -0,0 +1,17 @@ +```bash +## Проверить, что таймеры включены и “тикают” +systemctl status apt-daily.timer apt-daily-upgrade.timer +systemctl list-timers --all | egrep 'apt-daily|apt-daily-upgrade' + +## Проверить, что unattended-upgrades реально запускался +systemctl status unattended-upgrades.service +journalctl -u unattended-upgrades --no-pager -n 200 + +## Проверить логи и фактические действия +ls -l /var/log/unattended-upgrades/ +tail -n 200 /var/log/unattended-upgrades/unattended-upgrades.log +tail -n 200 /var/log/unattended-upgrades/unattended-upgrades-dpkg.log + +## Быстрый “самотест” (прогон в dry-run) +unattended-upgrade --dry-run --debug +``` \ No newline at end of file diff --git a/ansible/roles/harden/unattended_upgrades/tasks/main.yml b/ansible/roles/harden/unattended_upgrades/tasks/main.yml new file mode 100644 index 0000000..985cc5f --- /dev/null +++ b/ansible/roles/harden/unattended_upgrades/tasks/main.yml @@ -0,0 +1,49 @@ +--- +- name: ensure required packages are present + ansible.builtin.apt: + name: + - unattended-upgrades + - apt-listchanges + - gpg + state: present + update_cache: true + +- name: ensure debian-security repo is present + ansible.builtin.apt_repository: + repo: >- + deb http://deb.debian.org/debian-security + {{ ansible_facts.lsb.codename | default(ansible_facts.distribution_release) }}-security + main contrib non-free non-free-firmware + state: present + filename: debian-security + update_cache: true + notify: restart apt timers + +- name: deploy /etc/apt/apt.conf.d/50unattended-upgrades + ansible.builtin.template: + src: 50unattended-upgrades.j2 + dest: /etc/apt/apt.conf.d/50unattended-upgrades + owner: root + group: root + mode: "0644" + notify: restart unattended-upgrades + +- name: deploy /etc/apt/apt.conf.d/20auto-upgrades + ansible.builtin.template: + src: 20auto-upgrades.j2 + dest: /etc/apt/apt.conf.d/20auto-upgrades + owner: root + group: root + mode: "0644" + notify: + - restart unattended-upgrades + - restart apt timers + +- name: enable & start apt timers + ansible.builtin.systemd: + name: "{{ item }}" + state: started + enabled: true + loop: + - apt-daily.timer + - apt-daily-upgrade.timer diff --git a/ansible/roles/harden/unattended_upgrades/templates/20auto-upgrades.j2 b/ansible/roles/harden/unattended_upgrades/templates/20auto-upgrades.j2 new file mode 100644 index 0000000..29a82d7 --- /dev/null +++ b/ansible/roles/harden/unattended_upgrades/templates/20auto-upgrades.j2 @@ -0,0 +1,4 @@ +APT::Periodic::Update-Package-Lists "1"; +APT::Periodic::Download-Upgradeable-Packages "1"; +APT::Periodic::Unattended-Upgrade "1"; +APT::Periodic::AutocleanInterval "7"; diff --git a/ansible/roles/harden/unattended_upgrades/templates/50unattended-upgrades.j2 b/ansible/roles/harden/unattended_upgrades/templates/50unattended-upgrades.j2 new file mode 100644 index 0000000..8f377cf --- /dev/null +++ b/ansible/roles/harden/unattended_upgrades/templates/50unattended-upgrades.j2 @@ -0,0 +1,10 @@ +Unattended-Upgrade::Origins-Pattern { + "origin=Debian,codename=${distro_codename}-security"; +}; + +Unattended-Upgrade::Automatic-Reboot "false"; +Unattended-Upgrade::Automatic-Reboot-Time "03:30"; +Unattended-Upgrade::Automatic-Reboot-WithUsers "false"; + +Unattended-Upgrade::Remove-Unused-Dependencies "true"; +Unattended-Upgrade::MinimalSteps "true"; diff --git a/ansible/roles/k8s/install/00_python/tasks/main.yml b/ansible/roles/k8s/install/00_python/tasks/main.yml new file mode 100644 index 0000000..c8927f3 --- /dev/null +++ b/ansible/roles/k8s/install/00_python/tasks/main.yml @@ -0,0 +1,8 @@ +--- +- name: Ensure required Python libraries are installed + ansible.builtin.apt: + name: + - python3-pip + - python3-kubernetes + state: present + update_cache: yes diff --git a/ansible/roles/k8s/install/01_helm/install-helm.md b/ansible/roles/k8s/install/01_helm/install-helm.md new file mode 100644 index 0000000..320af2a --- /dev/null +++ b/ansible/roles/k8s/install/01_helm/install-helm.md @@ -0,0 +1,3 @@ +```bash +curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash +``` \ No newline at end of file diff --git a/ansible/roles/k8s/install/01_helm/tasks/main.yml b/ansible/roles/k8s/install/01_helm/tasks/main.yml new file mode 100644 index 0000000..8d62c91 --- /dev/null +++ b/ansible/roles/k8s/install/01_helm/tasks/main.yml @@ -0,0 +1,20 @@ +--- +- name: Download Helm install script + ansible.builtin.get_url: + url: https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 + dest: /tmp/get-helm-3.sh + mode: '0755' + +- name: Install Helm + ansible.builtin.command: /tmp/get-helm-3.sh + args: + creates: /usr/local/bin/helm + +- name: Verify Helm installation + ansible.builtin.command: helm version + register: helm_version_output + changed_when: false + +- name: Show Helm version + ansible.builtin.debug: + var: helm_version_output.stdout diff --git a/ansible/roles/k8s/install/02_common/tasks/main.yml b/ansible/roles/k8s/install/02_common/tasks/main.yml new file mode 100644 index 0000000..33124db --- /dev/null +++ b/ansible/roles/k8s/install/02_common/tasks/main.yml @@ -0,0 +1,172 @@ +# roles/k8s/k8scommon/tasks/main.yml +--- +# === 1. Обновление пакетов и базовые утилиты === +- name: Install base packages + ansible.builtin.apt: + update_cache: yes + name: + - apt-transport-https + - ca-certificates + - curl + - gnupg + - lsb-release + state: present + +# === 2. Отключить swap === +- name: Disable swap immediately + ansible.builtin.command: swapoff -a + changed_when: false + +- name: Backup fstab + ansible.builtin.copy: + src: /etc/fstab + dest: /etc/fstab.bak + remote_src: yes + force: no + +- name: Comment out swap entries in fstab + ansible.builtin.replace: + path: /etc/fstab + regexp: '^\s*([^#].*\s+swap\s+.*)$' + replace: '# \1' + +# === 3. Модули ядра === +- name: Write kernel modules config for Kubernetes + ansible.builtin.copy: + dest: /etc/modules-load.d/k8s.conf + content: | + overlay + br_netfilter + +- name: Load overlay module + ansible.builtin.command: modprobe overlay + changed_when: false + +- name: Load br_netfilter module + ansible.builtin.command: modprobe br_netfilter + changed_when: false + +# === 4. sysctl для Kubernetes / containerd === +- name: Configure Kubernetes sysctl params + ansible.builtin.copy: + dest: /etc/sysctl.d/99-kubernetes-cri.conf + content: | + net.bridge.bridge-nf-call-iptables = 1 + net.bridge.bridge-nf-call-ip6tables = 1 + net.ipv4.ip_forward = 1 + +- name: Apply sysctl settings + ansible.builtin.command: sysctl --system + changed_when: false + +# === 5. Установить containerd === +- name: Install containerd + ansible.builtin.apt: + update_cache: yes + name: containerd + state: present + +- name: Ensure containerd config directory exists + ansible.builtin.file: + path: /etc/containerd + state: directory + mode: '0755' + +# ВАЖНО: всегда пересоздаём config.toml, как в manual script +- name: Generate default containerd config (overwrite) + ansible.builtin.shell: | + set -o errexit + containerd config default > /etc/containerd/config.toml + args: + executable: /bin/bash + +- name: Enable SystemdCgroup in containerd config + ansible.builtin.replace: + path: /etc/containerd/config.toml + regexp: 'SystemdCgroup = false' + replace: 'SystemdCgroup = true' + +- name: Set correct CNI bin_dir in containerd config + ansible.builtin.replace: + path: /etc/containerd/config.toml + regexp: 'bin_dir = .*' + replace: 'bin_dir = "/opt/cni/bin"' + +- name: Set correct CNI conf_dir in containerd config + ansible.builtin.replace: + path: /etc/containerd/config.toml + regexp: 'conf_dir = .*' + replace: 'conf_dir = "/etc/cni/net.d"' + +- name: Enable and restart containerd + ansible.builtin.systemd: + name: containerd + enabled: true + state: restarted + +# === 6. Подготовить директории для CNI === +- name: Ensure CNI directories exist + ansible.builtin.file: + path: "{{ item }}" + state: directory + mode: '0755' + loop: + - /opt/cni/bin + - /etc/cni/net.d + +# /usr/lib/cni → /opt/cni/bin, только если /usr/lib/cni не существует +- name: Check if /usr/lib/cni exists + ansible.builtin.stat: + path: /usr/lib/cni + register: cni_usr_lib + +- name: Create symlink /usr/lib/cni -> /opt/cni/bin (if not exists) + ansible.builtin.file: + src: /opt/cni/bin + dest: /usr/lib/cni + state: link + when: not cni_usr_lib.stat.exists + +# === 7. Репозиторий Kubernetes v1.34 === +- name: Ensure apt keyrings directory exists + ansible.builtin.file: + path: /etc/apt/keyrings + state: directory + mode: '0755' + +- name: Download Kubernetes repo key + ansible.builtin.shell: | + set -o errexit + curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.34/deb/Release.key \ + | gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg + args: + executable: /bin/bash + creates: /etc/apt/keyrings/kubernetes-apt-keyring.gpg + +- name: Add Kubernetes apt repository + ansible.builtin.copy: + dest: /etc/apt/sources.list.d/kubernetes.list + content: | + deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.34/deb/ / + +- name: Update apt cache after adding Kubernetes repo + ansible.builtin.apt: + update_cache: yes + +# === 8. Установить kubelet, kubeadm, kubectl и зафиксировать версии === +- name: Install kubelet, kubeadm, kubectl + ansible.builtin.apt: + name: + - kubelet + - kubeadm + - kubectl + state: present + update_cache: yes + +- name: Hold Kubernetes packages + ansible.builtin.command: apt-mark hold kubelet kubeadm kubectl + register: hold_result + changed_when: >- + 'hold' in hold_result.stdout + or 'marked' in hold_result.stdout + or hold_result.rc == 0 diff --git a/ansible/roles/k8s/install/03_master/tasks/main.yml b/ansible/roles/k8s/install/03_master/tasks/main.yml new file mode 100644 index 0000000..62cbe9b --- /dev/null +++ b/ansible/roles/k8s/install/03_master/tasks/main.yml @@ -0,0 +1,136 @@ +# roles/k8s/k8smaster/tasks/main.yml +--- +# === 9. kubeadm init (аналог шага 14) === +- name: Initialize Kubernetes control plane (kubeadm init) + ansible.builtin.command: > + kubeadm init + --apiserver-advertise-address={{ ansible_default_ipv4.address }} + --pod-network-cidr=10.244.0.0/16 + args: + creates: /etc/kubernetes/admin.conf + +# === 10. kubeconfig для root и пользователя === +- name: Ensure kubeconfig directory for root exists + ansible.builtin.file: + path: /root/.kube + state: directory + mode: "0700" + +- name: Copy admin kubeconfig for root + ansible.builtin.copy: + src: /etc/kubernetes/admin.conf + dest: /root/.kube/config + owner: root + group: root + mode: "0600" + remote_src: yes + +- name: Ensure kubeconfig directory for user exists + ansible.builtin.file: + path: "/home/adminuser/.kube" + state: directory + owner: "adminuser" + group: "adminuser" + mode: "0700" + +- name: Copy admin kubeconfig to user home + ansible.builtin.copy: + src: /etc/kubernetes/admin.conf + dest: "/home/adminuser/.kube/config" + owner: "adminuser" + group: "adminuser" + mode: "0600" + remote_src: yes + +# === 11. Ждём API-сервер === +- name: Wait for Kubernetes API to become reachable + ansible.builtin.command: kubectl get --raw=/healthz + register: api_health + until: api_health.rc == 0 + retries: 30 + delay: 10 + environment: + KUBECONFIG: /etc/kubernetes/admin.conf + +# === 12. Ставим Flannel CNI (НЕ ждём Ready ноды до него) === +- name: Install Flannel CNI + ansible.builtin.command: > + kubectl apply --validate=false + -f https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml + register: flannel_result + until: flannel_result.rc == 0 + retries: 10 + delay: 6 + environment: + KUBECONFIG: /etc/kubernetes/admin.conf + +- name: Wait for flannel DaemonSet to be Ready + ansible.builtin.command: > + kubectl -n kube-flannel rollout status daemonset/kube-flannel-ds --timeout=300s + register: flannel_rollout + until: flannel_rollout.rc == 0 + retries: 5 + delay: 15 + environment: + KUBECONFIG: /etc/kubernetes/admin.conf + +# === 13. Теперь ждём, пока нода станет Ready === +- name: Wait for control-plane node to become Ready + ansible.builtin.shell: | + kubectl get node "$(hostname -s)" \ + -o jsonpath='{.status.conditions[?(@.type=="Ready")].status}' + register: node_ready + until: node_ready.stdout == "True" + retries: 30 + delay: 10 + environment: + KUBECONFIG: /etc/kubernetes/admin.conf + +# === 14. Ждём CoreDNS === +- name: Wait for CoreDNS deployment to be Ready + ansible.builtin.command: > + kubectl -n kube-system rollout status deployment/coredns --timeout=300s + register: coredns_rollout + until: coredns_rollout.rc == 0 + retries: 5 + delay: 15 + environment: + KUBECONFIG: /etc/kubernetes/admin.conf + +# === 14. Разрешаем поды на master (как шаг 18), если нужно === +- name: Allow scheduling pods on control-plane node + ansible.builtin.command: > + kubectl taint nodes --all node-role.kubernetes.io/control-plane- + environment: + KUBECONFIG: /etc/kubernetes/admin.conf + when: false + +# === 15. Проверка статуса кластера === +- name: Get nodes + ansible.builtin.command: kubectl get nodes + register: nodes_out + environment: + KUBECONFIG: /etc/kubernetes/admin.conf + +- name: Show nodes + ansible.builtin.debug: + var: nodes_out.stdout + +- name: Get all pods in all namespaces + ansible.builtin.command: kubectl get pods -A + register: pods_out + environment: + KUBECONFIG: /etc/kubernetes/admin.conf + +- name: Show pods + ansible.builtin.debug: + var: pods_out.stdout + +# === 16. Вывести join-команду (как шаг 20) === +- name: Get kubeadm join command + ansible.builtin.command: kubeadm token create --print-join-command + register: join_cmd + +- name: Show join command + ansible.builtin.debug: + msg: "Use this command on workers: {{ join_cmd.stdout }}" diff --git a/ansible/roles/k8s/install/04_worker/tasks/main.yml b/ansible/roles/k8s/install/04_worker/tasks/main.yml new file mode 100644 index 0000000..e4bf33c --- /dev/null +++ b/ansible/roles/k8s/install/04_worker/tasks/main.yml @@ -0,0 +1,13 @@ +--- +# === 2. Join в кластер (аналог kubeadm join в ручном скрипте) === +- name: Join node to Kubernetes cluster + ansible.builtin.command: "{{ k8s_kubeadm_join_command }}" + args: + creates: /etc/kubernetes/kubelet.conf + +# === 3. Убедиться, что kubelet включён и работает === +- name: Ensure kubelet is enabled and running + ansible.builtin.systemd: + name: kubelet + enabled: true + state: started \ No newline at end of file diff --git a/ansible/roles/k8s/readme/install-k8scommon.md b/ansible/roles/k8s/readme/install-k8scommon.md new file mode 100644 index 0000000..8d7b9a5 --- /dev/null +++ b/ansible/roles/k8s/readme/install-k8scommon.md @@ -0,0 +1,109 @@ +```bash +# === Стать root (если ещё не) === +sudo -i +``` + +```bash +# === 1. Обновление пакетов и базовые утилиты === +apt-get update -y +apt-get install -y apt-transport-https ca-certificates curl gnupg lsb-release +``` + +```bash +# === 2. Отключить swap немедленно === +swapoff -a +``` + +```bash +# === 3. Убрать swap из /etc/fstab (чтобы не включался после перезагрузки) === +cp /etc/fstab /etc/fstab.bak +sed -i '/ swap / s/^/#/' /etc/fstab +``` + +```bash +# === 4. Включить модули ядра overlay и br_netfilter === +cat </etc/modules-load.d/k8s.conf +overlay +br_netfilter +EOF + +modprobe overlay +modprobe br_netfilter +``` + +```bash +# === 5. Настроить sysctl для Kubernetes и containerd === +cat </etc/sysctl.d/99-kubernetes-cri.conf +net.bridge.bridge-nf-call-iptables = 1 +net.bridge.bridge-nf-call-ip6tables = 1 +net.ipv4.ip_forward = 1 +EOF + +sysctl --system +``` + +```bash +# === 6. Установить containerd === +apt-get install -y containerd +``` + +```bash +# === 7. Сгенерировать конфиг containerd и включить SystemdCgroup === +mkdir -p /etc/containerd +containerd config default >/etc/containerd/config.toml + +# Включаем SystemdCgroup +sed -i 's/SystemdCgroup = false/SystemdCgroup = true/' /etc/containerd/config.toml + +# (Опционально) Убедиться, что пути CNI прописаны как /opt/cni/bin и /etc/cni/net.d +sed -i 's@bin_dir = .*@bin_dir = "/opt/cni/bin"@' /etc/containerd/config.toml +sed -i 's@conf_dir = .*@conf_dir = "/etc/cni/net.d"@' /etc/containerd/config.toml + +systemctl restart containerd +systemctl enable containerd +``` + +```bash +# === 8. Подготовить директории для CNI-плагинов === +mkdir -p /opt/cni/bin +mkdir -p /etc/cni/net.d +``` + +```bash +# === 9. Фикс пути для flannel: /usr/lib/cni → /opt/cni/bin === +# ВАЖНО: если каталог /usr/lib/cni уже существует — ЭТУ команду пропусти +ln -s /opt/cni/bin /usr/lib/cni +``` + + + + + +```bash +# === 10. Добавить официальный репозиторий Kubernetes (pkgs.k8s.io, ветка v1.34) === +mkdir -p /etc/apt/keyrings + +curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.34/deb/Release.key \ + | gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg + +cat </etc/apt/sources.list.d/kubernetes.list +deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.34/deb/ / +EOF + +apt-get update -y +``` + +```bash +# === 11. Установить kubelet, kubeadm, kubectl и зафиксировать версии === +apt-get install -y kubelet kubeadm kubectl +apt-mark hold kubelet kubeadm kubectl +``` \ No newline at end of file diff --git a/ansible/roles/k8s/readme/install-k8smaster.md b/ansible/roles/k8s/readme/install-k8smaster.md new file mode 100644 index 0000000..ef00597 --- /dev/null +++ b/ansible/roles/k8s/readme/install-k8smaster.md @@ -0,0 +1,53 @@ +```bash +# === 13. Посмотреть IP адреса мастера === +hostname -I + +# Запомни нужный IP (например, 192.168.0.26) и подставь его в следующую команду. +# POD CIDR под Flannel — 10.244.0.0/16 +``` + +```bash +# === 14. Инициализация control-plane (kubeadm init) === +kubeadm init \ + --apiserver-advertise-address=192.168.0.154 \ + --pod-network-cidr=10.244.0.0/16 +``` + +```bash +# === 15. Настроить kubeconfig для root (чтобы kubectl работал без доп. флагов) === +mkdir -p /root/.kube +cp /etc/kubernetes/admin.conf /root/.kube/config +chown root:root /root/.kube/config +``` + +```bash +# === 16. (Опционально) Скопировать kubeconfig обычному пользователю adminuser === +# ЗАМЕНИ adminuser на своё имя пользователя +mkdir -p /home/adminuser/.kube +cp /etc/kubernetes/admin.conf /home/adminuser/.kube/config +chown adminuser:adminuser /home/adminuser/.kube/config +``` + +```bash +# === 17. Установить Flannel как CNI-плагин === +kubectl apply -f https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml +``` + +```bash +# === 18. (Опционально) Разрешить запуск pod'ов на master (single-node кластер) === +# Если хочешь использовать мастер и как worker: +kubectl taint nodes --all node-role.kubernetes.io/control-plane- +``` + +```bash +# === 19. Проверить статус кластера === +kubectl get nodes +kubectl get pods -A +``` + +```bash +# === 20. Получить команду для присоединения worker-узлов === +kubeadm token create --print-join-command + +# Скопируй полностью выведенную команду "kubeadm join ..." — она понадобится на worker. +``` \ No newline at end of file diff --git a/ansible/roles/k8s/readme/install-k8sworker.md b/ansible/roles/k8s/readme/install-k8sworker.md new file mode 100644 index 0000000..d7aa049 --- /dev/null +++ b/ansible/roles/k8s/readme/install-k8sworker.md @@ -0,0 +1,14 @@ +```bash +# === 21. Выполнить join-команду, полученную на мастере === +# Пример (ЭТО ТОЛЬКО ПРИМЕР, ИСПОЛЬЗУЙ СВОЮ КОМАНДУ ИЗ ШАГА 20): + +kubeadm join 192.168.0.154:6443 --token 9jz5xr.xvwirgtsp2v2brge \ + --discovery-token-ca-cert-hash sha256:e09d4918b52e647af493e8345504ecb9907e79637a52932e730df350d3f76ede +``` + +```bash +# === 22. Проверить с мастера, что worker добавился в кластер === +# Команды выполняются на master-узле: +kubectl get nodes +kubectl get pods -A +``` \ No newline at end of file diff --git a/ansible/roles/k8s/readme/install-keyclock.md b/ansible/roles/k8s/readme/install-keyclock.md new file mode 100644 index 0000000..972d72f --- /dev/null +++ b/ansible/roles/k8s/readme/install-keyclock.md @@ -0,0 +1,87 @@ +```bash +helm repo add codecentric https://codecentric.github.io/helm-charts +helm repo update +``` + +```bash +kubectl create namespace keycloak +``` + +```bash +vim values-keycloak.yaml + +# Какой именно Keycloak ставим +image: + repository: quay.io/keycloak/keycloak + # Подставь нужную версию, например ту, которую ты хочешь зафиксировать + # (пример — 26.0.7, но лучше глянуть актуальные теги на quay.io/keycloak/keycloak) + tag: "26.0.7" + pullPolicy: IfNotPresent + +replicas: 1 + +# HTTP-путь, по которому будет доступен Keycloak +http: + # "/" или "/auth" — на твой вкус, я делаю "/" для простоты + relativePath: "/" + +# Подключение к внешней БД PostgreSQL +database: + vendor: postgres + hostname: postgres-postgresql.postgres.svc.cluster.local + port: 5432 + database: keycloak + username: keycloak_user + password: "password" + +# Команда запуска Keycloak (рекомендуемый стиль с kc.sh) +command: + - "/opt/keycloak/bin/kc.sh" + - "start" + - "--http-enabled=true" + - "--http-port=8080" + - "--hostname-strict=false" + - "--hostname-strict-https=false" + - "--proxy=edge" + +# Ingress NGINX на keycloak.local +ingress: + enabled: true + ingressClassName: "nginx" + annotations: + nginx.ingress.kubernetes.io/ssl-redirect: "false" + rules: + - host: "keycloak.local" + paths: + - path: '{{ tpl .Values.http.relativePath $ | trimSuffix "/" }}/' + pathType: Prefix + tls: [] # позже можно включить TLS через cert-manager + +# Переменные окружения Keycloak +extraEnv: | + # Админ и пароль + - name: KEYCLOAK_ADMIN + value: admin + - name: KEYCLOAK_ADMIN_PASSWORD + value: password + + # Настройки прокси / hostname + - name: KC_PROXY + value: edge + - name: KC_HOSTNAME + value: "keycloak.local" + + # JGroups discovery через headless-сервис чарта + - name: JAVA_OPTS_APPEND + value: >- + -XX:+UseContainerSupport + -XX:MaxRAMPercentage=50.0 + -Djava.awt.headless=true + -Djgroups.dns.query={{ include "keycloak.fullname" . }}-headless + + + +helm install keycloak codecentric/keycloakx \ + --namespace keycloak \ + --values values-keycloak.yaml +``` diff --git a/ansible/roles/k8s/readme/install-netbird.md b/ansible/roles/k8s/readme/install-netbird.md new file mode 100644 index 0000000..9ad6529 --- /dev/null +++ b/ansible/roles/k8s/readme/install-netbird.md @@ -0,0 +1,288 @@ +```bash +helm repo add jaconi https://charts.jaconi.io +helm repo update +``` + +```bash +fullnameOverride: "netbird" + +config: + database: + DB_TYPE: postgres + HOST: postgres-postgresql.postgres.svc.cluster.local + PORT: 5432 + NAME: netbird + USER: netbird_user + PASSWD: password + +relay: + enabled: true + config: + NB_EXPOSED_ADDRESS: "netbird-relay.netbird.svc.cluster.local:33080" + +signal: + enabled: true + +management: + enabled: true + config: + NETBIRD_SIGNAL_URI: "netbird-signal.netbird.svc.cluster.local:10000" + NETBIRD_SIGNAL_PROTOCOL: "https" + NETBIRD_RELAY_DOMAIN: "netbird-relay.netbird.svc.cluster.local" + NETBIRD_RELAY_PORT: "33080" + NETBIRD_STUN_URI: "stun:netbird-signal.netbird.svc.cluster.local:3478" + NETBIRD_TURN_URI: "turn:netbird-signal.netbird.svc.cluster.local:3478" + +dashboard: + enabled: true + service: + type: ClusterIP + ingress: + enabled: false +``` + +```bash +openssl rand -hex 32 + +kubectl create secret generic netbird-relay-secret \ + -n netbird \ + --from-literal=netbird-relay-secret-key="8626c1ed1c8cfcb13df6c65819042771a2bf7a280c16f0ba54abea8cde7b560d" + +``` + +```bash +helm install netbird jaconi/netbird \ + -n netbird \ + --create-namespace \ + -f netbird-values.yaml + +or + +helm upgrade netbird jaconi/netbird \ + -n netbird \ + -f netbird-values.yaml +``` + +```bash +kubectl -n netbird get pods +kubectl -n netbird get svc +kubectl -n netbird get ingress +``` + + +```bash +vim netbird-dashboard-deployment.yaml + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: netbird-dashboard + namespace: netbird + labels: + app: netbird-dashboard +spec: + replicas: 1 + selector: + matchLabels: + app: netbird-dashboard + template: + metadata: + labels: + app: netbird-dashboard + spec: + containers: + - name: dashboard + image: netbirdio/dashboard:0.45.1 + ports: + - containerPort: 80 + env: + - name: NB_MANAGEMENT_API_ENDPOINT + value: "http://netbird.local:30830" +``` + +```bash +vim netbird-dashboard-service.yaml + +apiVersion: v1 +kind: Service +metadata: + name: netbird-dashboard + namespace: netbird +spec: + selector: + app: netbird-dashboard + ports: + - protocol: TCP + port: 80 + targetPort: 80 + type: ClusterIP +``` + +```bash +vim netbird-dashboard-ingress.yaml + +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: netbird-dashboard + namespace: netbird +spec: + ingressClassName: nginx + rules: + - host: netbird.local + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: netbird-dashboard + port: + number: 80 + + +``` + +```bash +kubectl apply -f netbird-dashboard-deployment.yaml +kubectl apply -f netbird-dashboard-service.yaml +kubectl apply -f netbird-dashboard-ingress.yaml +``` + +```bash +C:\Windows\System32\drivers\etc\hosts +``` + +# k8s + +```bash +vim netbird-application.yaml + +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: netbird # как будет называться приложение в ArgoCD + namespace: argocd # namespace, где установлен ArgoCD +spec: + project: default + + source: + repoURL: https://charts.jaconi.io # тот самый helm repo + chart: netbird # имя чарта + targetRevision: "*" # можно зафиксировать версию, пока пусть будет любая + helm: + releaseName: netbird # как будто ты делал "helm install netbird ..." + values: |- + fullnameOverride: "netbird" + + config: + database: + DB_TYPE: postgres + HOST: postgres-postgresql.postgres.svc.cluster.local + PORT: 5432 + NAME: netbird + USER: netbird_user + PASSWD: password + + relay: + enabled: true + config: + NB_EXPOSED_ADDRESS: "netbird-relay.netbird.svc.cluster.local:33080" + + signal: + enabled: true + + management: + enabled: true + config: + NETBIRD_SIGNAL_URI: "netbird-signal.netbird.svc.cluster.local:10000" + NETBIRD_SIGNAL_PROTOCOL: "https" + NETBIRD_RELAY_DOMAIN: "netbird-relay.netbird.svc.cluster.local" + NETBIRD_RELAY_PORT: "33080" + NETBIRD_STUN_URI: "stun:netbird-signal.netbird.svc.cluster.local:3478" + NETBIRD_TURN_URI: "turn:netbird-signal.netbird.svc.cluster.local:3478" + + dashboard: + enabled: true + service: + type: ClusterIP + ingress: + enabled: true + className: nginx + hosts: + - host: netbird.local + paths: + - path: / + pathType: Prefix + + destination: + server: https://kubernetes.default.svc + namespace: netbird # сюда чарты будут ставиться + + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=true + +kubectl apply -f netbird-application.yaml -n argocd +``` + +```bash +kubectl create namespace netbird || true + +kubectl create secret generic netbird-relay-secret \ + -n netbird \ + --from-literal=netbird-relay-secret-key="8626c1ed1c8cfcb13df6c65819042771a2bf7a280c16f0ba54abea8cde7b560d" +``` + +```bash +helm repo add jaconi https://charts.jaconi.io +helm repo update + +vim netbird-dashboard-values.yaml + +image: + # Версия образа UI; есть тег v2.22.2 на Docker Hub + # см. netbirdio/dashboard:v2.22.2 :contentReference[oaicite:2]{index=2} + tag: v2.22.2 + +auth: + # OIDC-провайдер (например, Keycloak) + authority: https://keycloak.example.com/realms/homelab + audience: netbird + clientID: netbird + supportedScopes: > + openid profile email offline_access netbird-api + +netbird: + # HTTP API management-сервиса NetBird (тот же, к которому коннектятся клиенты) + managementApiEndpoint: https://netbird.example.com + # gRPC endpoint того же сервиса + managementGrpcApiEndpoint: https://netbird.example.com + +ingress: + enabled: true + className: nginx + annotations: + # Пример для cert-manager, можно убрать если не используешь + cert-manager.io/cluster-issuer: letsencrypt + hosts: + - host: netbird.example.com + paths: + - path: / + pathType: Prefix + tls: + - secretName: netbird-tls-certificate + hosts: + - netbird.example.com + +# namespace можно выбрать любой, но обычно используют netbird +kubectl create namespace netbird --dry-run=client -o yaml | kubectl apply -f - + +helm install netbird-dashboard jaconi/netbird-dashboard \ + --namespace netbird \ + --values netbird-dashboard-values.yaml + +``` \ No newline at end of file diff --git a/ansible/roles/node/change_hostname/main.yml b/ansible/roles/node/change_hostname/main.yml new file mode 100644 index 0000000..9acdd1c --- /dev/null +++ b/ansible/roles/node/change_hostname/main.yml @@ -0,0 +1,20 @@ +--- +- name: Set hostname + ansible.builtin.hostname: + name: "{{ hostname }}" + +- name: Ensure /etc/hosts has proper hostname entry + ansible.builtin.lineinfile: + path: /etc/hosts + regexp: "^127\\.0\\.1\\.1" + line: "127.0.1.1 {{ hostname }}" + create: yes + backup: yes + +- name: Reboot system + ansible.builtin.reboot: + msg: "Rebooting after hostname change" + connect_timeout: 5 + reboot_timeout: 300 + pre_reboot_delay: 0 + post_reboot_delay: 10 diff --git a/ansible/roles/node/execute_command/tasks/main.yml b/ansible/roles/node/execute_command/tasks/main.yml new file mode 100644 index 0000000..ed05840 --- /dev/null +++ b/ansible/roles/node/execute_command/tasks/main.yml @@ -0,0 +1,5 @@ +--- +- name: execute [ {{ command }} ] command + ansible.builtin.command: "{{ command }}" + args: + chdir: "{{ chdir | default(omit) }}" diff --git a/ansible/roles/node/push_dir/tasks/main.yml b/ansible/roles/node/push_dir/tasks/main.yml new file mode 100644 index 0000000..6c62d03 --- /dev/null +++ b/ansible/roles/node/push_dir/tasks/main.yml @@ -0,0 +1,7 @@ +--- +- name: copy local directory to remote node (recursive) + ansible.builtin.copy: + src: "{{ resource_dir }}" + dest: "{{ target_dir }}" + mode: "0644" + directory_mode: "0755" diff --git a/ansible/roles/node/remove_file/tasks/main.yml b/ansible/roles/node/remove_file/tasks/main.yml new file mode 100644 index 0000000..93d5a8d --- /dev/null +++ b/ansible/roles/node/remove_file/tasks/main.yml @@ -0,0 +1,5 @@ +- name: remove file + become: true + ansible.builtin.file: + path: "{{ file_path }}" + state: absent diff --git a/ansible/roles/node/remove_user/defaults/main.yml b/ansible/roles/node/remove_user/defaults/main.yml new file mode 100644 index 0000000..354f345 --- /dev/null +++ b/ansible/roles/node/remove_user/defaults/main.yml @@ -0,0 +1,6 @@ +--- +# Удалять ли домашнюю директорию и почту (/var/mail/) +remove_user_home: true + +# Форсировать удаление даже если есть процессы (полезно для билд-юнитов/packer) +remove_user_force: true diff --git a/ansible/roles/node/remove_user/tasks/main.yml b/ansible/roles/node/remove_user/tasks/main.yml new file mode 100644 index 0000000..8d90e8f --- /dev/null +++ b/ansible/roles/node/remove_user/tasks/main.yml @@ -0,0 +1,13 @@ +--- +- name: remove sudoers drop-in for {{ remove_user }} user (if exists) + ansible.builtin.file: + path: "/etc/sudoers.d/{{ remove_user }}" + state: absent + mode: "0440" + +- name: remove {{ remove_user }} user + ansible.builtin.user: + name: "{{ remove_user }}" + state: absent + remove: "{{ remove_user_home }}" + force: "{{ remove_user_force }}" diff --git a/ansible/roles/ntp/chrony/handlers/main.yml b/ansible/roles/ntp/chrony/handlers/main.yml new file mode 100644 index 0000000..3a5f6fe --- /dev/null +++ b/ansible/roles/ntp/chrony/handlers/main.yml @@ -0,0 +1,5 @@ +--- +- name: restart chrony + ansible.builtin.service: + name: chrony + state: restarted diff --git a/ansible/roles/ntp/chrony/tasks/main.yml b/ansible/roles/ntp/chrony/tasks/main.yml new file mode 100644 index 0000000..17b65eb --- /dev/null +++ b/ansible/roles/ntp/chrony/tasks/main.yml @@ -0,0 +1,74 @@ +--- +- name: install chrony + ansible.builtin.apt: + name: + - chrony + state: present + update_cache: true + +# чтобы не было “двух клиентов времени” (минимально и без сложных проверок) +- name: stop and disable systemd-timesyncd (if exists) + ansible.builtin.service: + name: systemd-timesyncd + state: stopped + enabled: false + ignore_errors: true + +- name: ensure /etc/chrony/sources.d exists + ansible.builtin.file: + path: /etc/chrony/sources.d + state: directory + owner: root + group: root + mode: "0755" + +- name: ensure /etc/chrony/conf.d exists + ansible.builtin.file: + path: /etc/chrony/conf.d + state: directory + owner: root + group: root + mode: "0755" + +- name: deploy /etc/chrony/chrony.conf + ansible.builtin.template: + src: chrony.conf.j2 + dest: /etc/chrony/chrony.conf + owner: root + group: root + mode: "0644" + notify: restart chrony + +- name: configure upstream sources + ansible.builtin.template: + src: 00-upstream.sources.j2 + dest: /etc/chrony/sources.d/00-upstream.sources + owner: root + group: root + mode: "0644" + notify: restart chrony + +# server-mode: allow clients (опционально) +- name: configure allowed client networks (optional) + ansible.builtin.template: + src: 00-allow.conf.j2 + dest: /etc/chrony/conf.d/00-allow.conf + owner: root + group: root + mode: "0644" + when: chrony_allow_networks | length > 0 + notify: restart chrony + +# если раньше был allow, а теперь роль как client — подчистим файл +- name: remove allow config when not needed + ansible.builtin.file: + path: /etc/chrony/conf.d/00-allow.conf + state: absent + when: chrony_allow_networks | length == 0 + notify: restart chrony + +- name: ensure chrony is enabled and started + ansible.builtin.service: + name: chrony + enabled: true + state: started diff --git a/ansible/roles/ntp/chrony/templates/00-allow.conf.j2 b/ansible/roles/ntp/chrony/templates/00-allow.conf.j2 new file mode 100644 index 0000000..818adf9 --- /dev/null +++ b/ansible/roles/ntp/chrony/templates/00-allow.conf.j2 @@ -0,0 +1,5 @@ +# Managed by Ansible: allow NTP clients (server) +deny all +{% for net in chrony_allow_networks %} +allow {{ net }} +{% endfor %} diff --git a/ansible/roles/ntp/chrony/templates/00-upstream.sources.j2 b/ansible/roles/ntp/chrony/templates/00-upstream.sources.j2 new file mode 100644 index 0000000..5c5446f --- /dev/null +++ b/ansible/roles/ntp/chrony/templates/00-upstream.sources.j2 @@ -0,0 +1,4 @@ +# Managed by Ansible: upstream NTP sources +{% for s in chrony_upstream_sources %} +server {{ s }} iburst +{% endfor %} diff --git a/ansible/roles/ntp/chrony/templates/chrony.conf.j2 b/ansible/roles/ntp/chrony/templates/chrony.conf.j2 new file mode 100644 index 0000000..0e2341b --- /dev/null +++ b/ansible/roles/ntp/chrony/templates/chrony.conf.j2 @@ -0,0 +1,47 @@ +# Welcome to the chrony configuration file. See chrony.conf(5) for more +# information about usable directives. + +# Use Debian vendor zone. +# pool 2.debian.pool.ntp.org iburst + +# Use time sources from DHCP. +# sourcedir /run/chrony-dhcp + +# Use NTP sources found in /etc/chrony/sources.d. +sourcedir /etc/chrony/sources.d + +# This directive specifies the location of the file containing ID/key pairs for +# NTP authentication. +keyfile /etc/chrony/chrony.keys + +# This directive specifies the file into which chronyd will store the rate +# information. +driftfile /var/lib/chrony/chrony.drift + +# Save NTS keys and cookies. +ntsdumpdir /var/lib/chrony + +# Uncomment the following line to turn logging on. +#log tracking measurements statistics + +# Log files location. +logdir /var/log/chrony + +# Stop bad estimates upsetting machine clock. +maxupdateskew 100.0 + +# This directive enables kernel synchronisation (every 11 minutes) of the +# real-time clock. Note that it can't be used along with the 'rtcfile' directive. +rtcsync + +# Step the system clock instead of slewing it if the adjustment is larger than +# one second, but only in the first three clock updates. +makestep 1 3 + +# Get TAI-UTC offset and leap seconds from the system tz database. +# This directive must be commented out when using time sources serving +# leap-smeared time. +leapseclist /usr/share/zoneinfo/leap-seconds.list + +# Include configuration files found in /etc/chrony/conf.d. +confdir /etc/chrony/conf.d \ No newline at end of file diff --git a/ansible/roles/ntp/readme.md b/ansible/roles/ntp/readme.md new file mode 100644 index 0000000..b4739a7 --- /dev/null +++ b/ansible/roles/ntp/readme.md @@ -0,0 +1,20 @@ +```bash +vim /etc/chrony/chrony.conf + +# закоментить +pool 2.debian.pool.ntp.org iburst +sourcedir /run/chrony-dhcp + +# задать внешние апстримы отдельным файлом +cat >/etc/chrony/sources.d/00-upstream.sources <<'EOF' +server ntp.time.in.ua iburst +server ntp2.time.in.ua iburst +server time.google.com iburst +server time.cloudflare.com iburst +EOF + +# применить и проверить +systemctl restart chrony +chronyc sources -v +chronyc tracking +``` \ No newline at end of file diff --git a/ansible/roles/packer/install/tasks/main.yml b/ansible/roles/packer/install/tasks/main.yml new file mode 100644 index 0000000..0f4964d --- /dev/null +++ b/ansible/roles/packer/install/tasks/main.yml @@ -0,0 +1,48 @@ +--- +- name: install base deps for HashiCorp repo + ansible.builtin.apt: + update_cache: true + name: + - ca-certificates # чтобы качать по HTTPS + - curl # чтобы скачать packer/плагины + - gnupg + - lsb-release + - unzip # packer часто в zip + state: present + +- name: ensure keyrings dir exists + ansible.builtin.file: + path: /usr/share/keyrings + state: directory + mode: "0755" + +- name: add HashiCorp GPG key (dearmored) + ansible.builtin.shell: | + set -euo pipefail + curl -fsSL https://apt.releases.hashicorp.com/gpg \ + | gpg --dearmor -o /usr/share/keyrings/hashicorp-archive-keyring.gpg + args: + executable: /bin/bash + creates: /usr/share/keyrings/hashicorp-archive-keyring.gpg + +- name: add HashiCorp APT repository + ansible.builtin.copy: + dest: /etc/apt/sources.list.d/hashicorp.list + mode: "0644" + content: | + deb [signed-by=/usr/share/keyrings/hashicorp-archive-keyring.gpg] https://apt.releases.hashicorp.com {{ ansible_distribution_release }} main + +- name: install packer + ansible.builtin.apt: + update_cache: true + name: packer + state: present + +- name: check packer version + ansible.builtin.command: packer version + register: packer_version + changed_when: false + +- name: print packer version + ansible.builtin.debug: + var: packer_version.stdout diff --git a/ansible/roles/packer/run/tasks/main.yml b/ansible/roles/packer/run/tasks/main.yml new file mode 100644 index 0000000..79ff512 --- /dev/null +++ b/ansible/roles/packer/run/tasks/main.yml @@ -0,0 +1,33 @@ +--- +- name: ensure packer exists + ansible.builtin.command: packer version + changed_when: false + +- name: packer init + ansible.builtin.command: packer init . + args: + chdir: "{{ packer_config_dir }}" + changed_when: false + +- name: packer fmt + ansible.builtin.command: packer fmt -recursive . + args: + chdir: "{{ packer_config_dir }}" + changed_when: false + +- name: packer validate + ansible.builtin.command: packer validate . + args: + chdir: "{{ packer_config_dir }}" + changed_when: false + +- name: packer build + ansible.builtin.shell: | + set -euo pipefail + stdbuf -oL -eL packer build -on-error=cleanup -timestamp-ui . + args: + chdir: "{{ packer_config_dir }}" + executable: /bin/bash + environment: + PACKER_LOG: "1" + PACKER_LOG_PATH: "" diff --git a/ansible/roles/proxmox/enable_snippets/tasks/main.yml b/ansible/roles/proxmox/enable_snippets/tasks/main.yml new file mode 100644 index 0000000..8fff466 --- /dev/null +++ b/ansible/roles/proxmox/enable_snippets/tasks/main.yml @@ -0,0 +1,4 @@ +--- +- name: enable snippets on storage "local" + ansible.builtin.command: > + pvesm set local --content backup,iso,vztmpl,snippets diff --git a/ansible/roles/proxmox/install_nvidia_driver/readme.md b/ansible/roles/proxmox/install_nvidia_driver/readme.md new file mode 100644 index 0000000..3f0fe72 --- /dev/null +++ b/ansible/roles/proxmox/install_nvidia_driver/readme.md @@ -0,0 +1,41 @@ +## 1.0 Быстрая проверка, что GPU видна хосту +lspci -nn | grep -i nvidia + +## 1.1 GRUB +nano /etc/default/grub +GRUB_CMDLINE_LINUX_DEFAULT="quiet iommu=pt" +update-grub +reboot + +## 1.2 VFIO модули +nano /etc/modules-load.d/vfio.conf +vfio +vfio_iommu_type1 +vfio_pci +vfio_virqfd + +## 1.3 Привязать GPU к vfio-pci по ID +nano /etc/modprobe.d/vfio.conf +options vfio-pci ids=10de:2d58,10de:22eb disable_vga=1 + +## 1.4 Заблэклистить nouveau (и не ставить nvidia на хост, если passthrough нужен “чисто”) +nano /etc/modprobe.d/blacklist-nouveau.conf +blacklist nouveau +options nouveau modeset=0 + +## 1.5 Пересобрать initramfs и перезагрузиться +update-initramfs -u -k all +reboot + +## 1.6 Проверка: GPU реально ушла в VFIO +dmesg | grep -E "AMD-Vi|IOMMU" | tail -n 50 +lspci -nnk -s 01:00.0 +lspci -nnk -s 01:00.1 + +## В Proxmox создай PCI mapping для RTX 5070 +Datacenter → Resource Mapping → PCI Devices → Add +Сделай маппинг: +rtx5070_gpu → 0000:01:00 + +dmesg | grep -E "IOMMU|AMD-Vi" + diff --git a/ansible/roles/proxmox/lxc/download_template/tasks/main.yml b/ansible/roles/proxmox/lxc/download_template/tasks/main.yml new file mode 100644 index 0000000..94dafa3 --- /dev/null +++ b/ansible/roles/proxmox/lxc/download_template/tasks/main.yml @@ -0,0 +1,10 @@ +--- +- name: update LXC template index + ansible.builtin.command: pveam update + register: pveam_update + changed_when: false + +- name: download LXC template + ansible.builtin.command: "pveam download local {{ lxc_template_name }}" + args: + creates: "/var/lib/vz/template/cache/{{ lxc_template_name }}" diff --git a/ansible/roles/proxmox/lxc/shutdown/tasks/main.yml b/ansible/roles/proxmox/lxc/shutdown/tasks/main.yml new file mode 100644 index 0000000..b8edc4a --- /dev/null +++ b/ansible/roles/proxmox/lxc/shutdown/tasks/main.yml @@ -0,0 +1,5 @@ +--- +- name: shutdown LXC container + ansible.builtin.command: pct shutdown {{ lxc_id }} + become: true + changed_when: true diff --git a/ansible/roles/proxmox/setup_no_subscription_repository/tasks/main.yml b/ansible/roles/proxmox/setup_no_subscription_repository/tasks/main.yml new file mode 100644 index 0000000..114f516 --- /dev/null +++ b/ansible/roles/proxmox/setup_no_subscription_repository/tasks/main.yml @@ -0,0 +1,30 @@ +--- +- name: remove proxmox enterprise repo + ansible.builtin.file: + path: /etc/apt/sources.list.d/pve-enterprise.sources + state: absent + +- name: remove ceph enterprise repo + ansible.builtin.file: + path: /etc/apt/sources.list.d/ceph.sources + state: absent + +- name: remove duplicate no-subscription entries from /etc/apt/sources.list + ansible.builtin.replace: + path: /etc/apt/sources.list + regexp: "^deb .*pve-no-subscription.*$" + replace: "" + ignore_errors: true + +- name: ensure proxmox no-subscription repo file exists + ansible.builtin.copy: + dest: /etc/apt/sources.list.d/pve-no-subscription.list + content: | + deb http://download.proxmox.com/debian/pve trixie pve-no-subscription + owner: root + group: root + mode: "0644" + +- name: update apt cache + ansible.builtin.apt: + update_cache: yes diff --git a/ansible/roles/proxmox/vm/download_iso/tasks/main.yml b/ansible/roles/proxmox/vm/download_iso/tasks/main.yml new file mode 100644 index 0000000..4061fc0 --- /dev/null +++ b/ansible/roles/proxmox/vm/download_iso/tasks/main.yml @@ -0,0 +1,18 @@ +--- +- name: Ensure ISO directory exists + ansible.builtin.file: + path: /var/lib/vz/template/iso + state: directory + owner: root + group: root + mode: "0755" + +- name: Download Debian netinst ISO + ansible.builtin.get_url: + url: "{{ vm_iso_url }}" + dest: "/var/lib/vz/template/iso/{{ vm_iso_name }}" + mode: "0644" + owner: root + group: root + force: false # не перекачивать, если файл уже есть + timeout: 60 diff --git a/argocd/applications/netbird-core/main.yml b/argocd/applications/netbird-core/main.yml new file mode 100644 index 0000000..81835dd --- /dev/null +++ b/argocd/applications/netbird-core/main.yml @@ -0,0 +1,67 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: netbird-core + namespace: argocd +spec: + project: default + + source: + repoURL: https://charts.jaconi.io # Helm repo + chart: netbird + targetRevision: "*" + helm: + releaseName: netbird-core + values: |- + # fullnameOverride = базовое имя ресурсов внутри кластера. + # Оставляем "netbird", чтобы сервисы были: + # netbird-management, netbird-signal, netbird-relay и т.п. + # Если потом захочешь, можно переосмыслить, но так проще стыковать. + fullnameOverride: "netbird" + + config: + database: + DB_TYPE: postgres + HOST: postgres-postgresql.postgres.svc.cluster.local + PORT: 5432 + NAME: netbird + USER: netbird_user + PASSWD: password + + relay: + enabled: true + config: + # Адрес, который будут видеть клиенты (обычно внешний / LB) + # Пока можно оставить сервис кластера, позже сменить на внешний. + NB_EXPOSED_ADDRESS: "netbird-relay.netbird.svc.cluster.local:33080" + + signal: + enabled: true + + management: + enabled: true + config: + NETBIRD_SIGNAL_URI: "netbird-signal.netbird.svc.cluster.local:10000" + NETBIRD_SIGNAL_PROTOCOL: "https" + NETBIRD_RELAY_DOMAIN: "netbird-relay.netbird.svc.cluster.local" + NETBIRD_RELAY_PORT: "33080" + NETBIRD_STUN_URI: "stun:netbird-signal.netbird.svc.cluster.local:3478" + NETBIRD_TURN_URI: "turn:netbird-signal.netbird.svc.cluster.local:3478" + + # ВАЖНО: dashboard в core-чарте выключен, + # чтобы UI поднимался только отдельным чартом netbird-dashboard + dashboard: + enabled: false + + destination: + server: https://kubernetes.default.svc + namespace: netbird + + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=true +# kubectl apply -f main.yaml -n argocd +# kubectl create secret generic netbird-relay-secret -n netbird --from-literal=netbird-relay-secret-key="86..." diff --git a/argocd/applications/netbird-dashboard/main.yaml b/argocd/applications/netbird-dashboard/main.yaml new file mode 100644 index 0000000..ff62a43 --- /dev/null +++ b/argocd/applications/netbird-dashboard/main.yaml @@ -0,0 +1,65 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: netbird-dashboard + namespace: argocd +spec: + project: default + + source: + repoURL: https://charts.jaconi.io + chart: netbird-dashboard + targetRevision: "*" + helm: + releaseName: netbird-dashboard + values: |- + image: + # Версия образа UI + tag: v2.22.2 + + auth: + authority: https://keycloak.example.com/realms/homelab + audience: netbird + clientID: netbird + supportedScopes: > + openid profile email offline_access netbird-api + + netbird: + # !!! ВАЖНО !!! + # Здесь указываем тот же домен, по которому приходит трафик снаружи. + # На первом этапе можем просто использовать netbird.local – он будет + # ходить к backend'у через ingress-nginx. + managementApiEndpoint: https://netbird.local + managementGrpcApiEndpoint: https://netbird.local + + ingress: + enabled: true + className: nginx + annotations: {} + hosts: + - host: netbird.local + paths: + - path: / + pathType: Prefix + + # Пока TLS можно не поднимать (нет cert-manager / real cert'ов): + tls: [] + # Если будешь использовать cert-manager: + # annotations: + # cert-manager.io/cluster-issuer: letsencrypt + # tls: + # - secretName: netbird-tls-certificate + # hosts: + # - netbird.local + + destination: + server: https://kubernetes.default.svc + namespace: netbird + + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=true +# kubectl apply -f main.yaml -n argocd diff --git a/documentation/images/arch-diagram.jpg b/documentation/images/arch-diagram.jpg new file mode 100755 index 0000000..b22b802 Binary files /dev/null and b/documentation/images/arch-diagram.jpg differ diff --git a/documentation/issues/issue-0.md b/documentation/issues/issue-0.md new file mode 100644 index 0000000..90b8aec --- /dev/null +++ b/documentation/issues/issue-0.md @@ -0,0 +1,88 @@ +# Bug Report: cloud-init расширение root-диска ломается из-за «рандомного» имени диска (/dev/sda не всегда root) + +## TL;DR +Скрипт в `terraform cloud-init` жёстко использует `/dev/sda`, но система **не всегда** назначает root-диск как `sda`. Иногда root оказывается на `sdc` (или другом), из‑за чего `runcmd` частично/полностью не выполняет расширение LVM и файловой системы. + +--- + +## Контекст +В `cloud-init` используется `runcmd`, который расширяет разделы и LVM на диске `/dev/sda`: + +```yaml +runcmd: + - | + set -euxo pipefail + + # растянуть extended + LVM partition до конца диска + growpart /dev/sda 2 || true + growpart /dev/sda 5 || true + parted -s /dev/sda "resizepart 2 100%" "resizepart 5 100%" || true + partprobe /dev/sda || true + + # растянуть PV -> LV(root) -> FS + pvresize /dev/sda5 + lvextend -l +100%FREE -r /dev/vg0/root +``` + +--- + +## Симптомы +- После первого бута root‑раздел **не расширен** (LVM/FS остаются маленькими). +- В `cloud-init status --long` возможны ошибки в `scripts-user / runcmd` (если команды без `|| true` падают). +- Скрипт «работает в большинстве случаев», но **иногда ломается** без изменений в конфиге. + +--- + +## Наблюдение (доказательство) +Одинаковая VM‑логика, но root‑диск получает разные имена. + +### Случай A (работает): root на `sda` +``` +sda 30G disk +├─sda1 /boot +└─sda5 + └─vg0-root / +sdb 150G disk +sdc 150G disk +``` + +### Случай B (ломается): root на `sdc` +``` +sda 150G disk +sdb 150G disk +sdc 30G disk +├─sdc1 /boot +└─sdc5 + └─vg0-root / +``` + +--- + +## Причина (root cause) +Имена `/dev/sdX` **не гарантированы**: порядок обнаружения дисков может меняться (особенно в VM/Proxmox при разных контроллерах/порядке подключения). +Скрипт предполагает, что root всегда на `/dev/sda`, но когда root на `/dev/sdc`, команды `growpart/pvresize` применяются к **не тому** диску. + +--- + +## Ожидаемое поведение +Скрипт должен определять **реальный диск**, на котором смонтирован `/`, и работать с ним (а не с фиксированным `/dev/sda`). + +--- + +## Фактическое поведение +Скрипт работает только когда root-диск случайно оказывается `sda`. При других раскладах расширение не происходит. + +--- + +## Влияние +- Интермиттентный (рандомный) фейл на bootstrap VM. +- На части VM root остаётся маленьким → проблемы при установке пакетов/логах/кэше и т.д. +- Сложно диагностировать, т.к. «иногда всё ок». + +--- + +## Почему сейчас не фикшу +Баг проявляется редко и не хочу тратить время на стабильное авто‑определение диска прямо сейчас. +В большинстве случаев root всё же назначается на `sda`. + +--- \ No newline at end of file diff --git a/documentation/issues/issue-1.md b/documentation/issues/issue-1.md new file mode 100644 index 0000000..8481aa4 --- /dev/null +++ b/documentation/issues/issue-1.md @@ -0,0 +1,85 @@ +## Заголовок +**[cephadm/bootstrap] Bootstrap падает на `orch host add`, если SSH на ноде не на 22 (custom port 10225)** + +## TL;DR +`cephadm bootstrap` во время установки пытается добавить bootstrap-хост в orchestrator через SSH на **порт 22**. Если SSH слушает **10225**, bootstrap ломается с ошибкой `Can't communicate with remote host ... Connect call failed (ip, 22)`. + +## Контекст +- Компонент: **Cephadm / ceph orch (orchestrator backend: cephadm)** +- ОС: Debian 13 (trixie), VM (Proxmox) +- SSH: **sshd слушает 10225**, порт 22 закрыт/не слушает +- Ceph: `cephadm 18.2.7` (reef), `ceph-common 18.2.7` +- Сеть: `192.168.0.0/24`, bootstrap mon-ip: `192.168.0.102` + +## Шаги воспроизведения +1. На ноде включить SSH только на кастомном порту: + - `Port 10225` + - порт 22 не слушает/закрыт +2. Запустить bootstrap: + ```bash + cephadm bootstrap \ + --mon-ip 192.168.0.102 \ + --initial-dashboard-user admin \ + --initial-dashboard-password password \ + --allow-fqdn-hostname + ``` +3. Дождаться шага добавления хоста в orchestrator. + +## Ожидаемое +- Bootstrap завершён успешно. +- Bootstrap-нода добавлена в `ceph orch host ls`. +- `ceph -s` и `ceph orch ps` работают. + +## Фактическое +- Bootstrap прерывается на добавлении bootstrap-хоста в orchestrator: + - `Error EINVAL: Can't communicate with remote host ...` + - `Connect call failed ('192.168.0.102', 22)` +- Кластер остаётся в “полуразвернутом” состоянии и требует cleanup через `cephadm rm-cluster`. + +## Логи / доказательства +Команда: +```bash +cephadm bootstrap \ + --mon-ip 192.168.0.102 \ + --initial-dashboard-user admin \ + --initial-dashboard-password password \ + --allow-fqdn-hostname +``` + +Фрагмент вывода: +```text +Generating ssh key... +Wrote public SSH key to /etc/ceph/ceph.pub +Adding key to root@localhost authorized_keys... +Adding host dev-kyiv01-vm-ceph-main-01... +... +Error EINVAL: Can't communicate with remote host `192.168.0.102` +[Errno 111] Connect call failed ('192.168.0.102', 22) +``` + +Проверки, подтверждающие причину: +```bash +ss -lntp | grep sshd # показывает слушает 10225, нет :22 +nc -vz 192.168.0.102 22 # refused/failed +nc -vz 192.168.0.102 10225 # ok +``` + +## Root cause (гипотеза/факт) +- Факт: `cephadm bootstrap` внутри запускает `ceph orch host add ` для bootstrap-ноды и пытается достучаться до неё по SSH на **22/tcp**. +- При SSH на 10225 соединение на 22 не устанавливается → bootstrap падает. + +## Влияние +- Частота: **always**, если sshd не слушает 22. +- Impact: + - невозможно быстро поднять кластер “из коробки” при custom ssh port + - остаются артефакты “битого” кластера (нужен ручной purge) + +## Workaround +1. Временно открыть/включить SSH на 22 в mgmt-сети + +## План фикса / идеи +- Попробовать bootstrap/оркестратор с явной настройкой SSH порта через ssh_config для cephadm (custom port 10225): + - подготовить отдельный ключ и `ssh_config` с `Port 10225` + - прокинуть его в bootstrap (например через параметры вида `--ssh-config`, `--ssh-private-key/--ssh-public-key`, `--ssh-user` — зависит от версии/пакета) + - после поднятия закрепить ssh_config для cephadm module (чтобы `ceph orch host add` всегда использовал 10225) +- Если “быстро и надёжно” не выходит — принять стандарт: **внутри mgmt/VPN оставить 22**, с firewall allowlist (а наружу не публиковать вообще), а 10225 использовать только там, где реально нужно. diff --git a/documentation/issues/template.md b/documentation/issues/template.md new file mode 100644 index 0000000..442d922 --- /dev/null +++ b/documentation/issues/template.md @@ -0,0 +1,39 @@ +# Мини‑шаблон для будущих баг‑репортов (копипаст) +## Заголовок +**[Компонент] Короткое описание проблемы (симптом + причина/условие)** + +## TL;DR +1–2 предложения: что ломается и почему важно. + +## Контекст +- где (сервис/скрипт/модуль) +- версия/окружение (OS/VM/провайдер/конфиг) + +## Шаги воспроизведения +1. +2. +3. + +## Ожидаемое +- + +## Фактическое +- + +## Логи / доказательства +- команды +- вывод +- скрин/фрагменты конфигов + +## Root cause (гипотеза/факт) +- + +## Влияние +- частота (always/sometimes) +- риск/impact + +## Workaround +- + +## План фикса / идеи +- \ No newline at end of file diff --git a/makefiles/00_create_and_setup_lxc_container_with_packer.mk b/makefiles/00_create_and_setup_lxc_container_with_packer.mk new file mode 100644 index 0000000..a05f84c --- /dev/null +++ b/makefiles/00_create_and_setup_lxc_container_with_packer.mk @@ -0,0 +1,28 @@ +SHELL := /bin/bash +.ONESHELL: +.SHELLFLAGS := -eu -o pipefail -c + +MAKEFILE_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) +ANSIBLE_DIR := $(abspath $(MAKEFILE_DIR)/../ansible) +TERRAFORM_DIR := $(abspath $(MAKEFILE_DIR)/../terraform/stacks/proxmox/lxc) + +.PHONY: all \ + download_lxc_template create_lxc_container install_packer + +all: install_packer + +download_lxc_template: + cd "$(ANSIBLE_DIR)" + ansible-playbook playbooks/proxmox/lxc/download_template.yml -i inventory.ini + +create_lxc_container: download_lxc_template + cd "$(TERRAFORM_DIR)" + terraform init + terraform plan -var-file="terraform.tfvars" + terraform apply -auto-approve -var-file="terraform.tfvars" + +install_packer: create_lxc_container + cd "$(ANSIBLE_DIR)" + ansible-playbook playbooks/packer/install.yml -i inventory.ini + +# make -f 00_create_and_setup_lxc_container_with_packer.mk \ No newline at end of file diff --git a/makefiles/01_create_vm_golden_template.mk b/makefiles/01_create_vm_golden_template.mk new file mode 100644 index 0000000..b48d2a0 --- /dev/null +++ b/makefiles/01_create_vm_golden_template.mk @@ -0,0 +1,34 @@ +SHELL := /bin/bash +.ONESHELL: +.SHELLFLAGS := -eu -o pipefail -c + +MAKEFILE_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) +ANSIBLE_DIR := $(abspath $(MAKEFILE_DIR)/../ansible) + +.PHONY: all \ + download_vm_iso push_packer_dir run_packer shutdown_lxc_container + +all: shutdown_lxc_container + +download_vm_iso: + cd "$(ANSIBLE_DIR)" + ansible-playbook playbooks/proxmox/vm/download_iso.yml -i inventory.ini + +push_packer_dir: download_vm_iso + cd "$(ANSIBLE_DIR)" && \ + ansible-playbook playbooks/node/push_dir.yml -i inventory.ini \ + -l "dev-kyiv01-lxc-packer-main-01" \ + -e "resource_dir=/workspaces/infrastructure/packer/proxmox/debian13 target_dir=/opt/packer/proxmox/" + + +run_packer: push_packer_dir + cd "$(ANSIBLE_DIR)" + ansible-playbook playbooks/packer/run.yml -i inventory.ini + +shutdown_lxc_container: run_packer + cd "$(ANSIBLE_DIR)" && \ + ansible-playbook playbooks/node/execute_command.yml -i inventory.ini \ + -l "dev-kyiv01-psy-proxmox-main-01" \ + -e '{"command":"pct shutdown 200"}' + +# make -f 01_create_vm_golden_template.mk \ No newline at end of file diff --git a/makefiles/02_create_vms.mk b/makefiles/02_create_vms.mk new file mode 100644 index 0000000..1014e80 --- /dev/null +++ b/makefiles/02_create_vms.mk @@ -0,0 +1,24 @@ +SHELL := /bin/bash +.ONESHELL: +.SHELLFLAGS := -eu -o pipefail -c + +MAKEFILE_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) +ANSIBLE_DIR := $(abspath $(MAKEFILE_DIR)/../ansible) +TERRAFORM_DIR := $(abspath $(MAKEFILE_DIR)/../terraform/stacks/proxmox/vm) + +.PHONY: all \ + enable_snippets create_vms + +all: create_vms + +enable_snippets: + cd "$(ANSIBLE_DIR)" + ansible-playbook "playbooks/proxmox/enable_snippets.yml" -i "inventory.ini" + +create_vms: enable_snippets + cd "$(TERRAFORM_DIR)" + terraform init + terraform plan -var-file="terraform.tfvars" + terraform apply -auto-approve -var-file="terraform.tfvars" + +# make -f 02_create_vms.mk \ No newline at end of file diff --git a/makefiles/03_harden_nodes.mk b/makefiles/03_harden_nodes.mk new file mode 100644 index 0000000..a029ae2 --- /dev/null +++ b/makefiles/03_harden_nodes.mk @@ -0,0 +1,31 @@ +SHELL := /bin/bash +.ONESHELL: +.SHELLFLAGS := -eu -o pipefail -c + +MAKEFILE_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) +ANSIBLE_DIR := $(abspath $(MAKEFILE_DIR)/../ansible) + +.PHONY: all \ + remove_node_user harden_nodes harden_ceph_nodes + +all: harden_ceph_nodes + +remove_node_user: + cd "$(ANSIBLE_DIR)" && \ + ansible-playbook playbooks/node/remove_user.yml -i inventory.p22.ini \ + -l "p22_nodes" \ + -e '{"remove_user":"packer"}' + +harden_nodes: remove_node_user + cd "$(ANSIBLE_DIR)" && \ + ansible-playbook playbooks/harden/harden_node.yml -i inventory.before_p25105.ini \ + -l "before_p25105_nodes" \ + -e '{"ssh_port":25105}' + +harden_ceph_nodes: harden_nodes + cd "$(ANSIBLE_DIR)" && \ + ansible-playbook playbooks/harden/harden_node.yml -i inventory.ceph.ini \ + -l "ceph_nodes" \ + -e '{"ssh_port":22}' + +# make -f 03_harden_vms.mk \ No newline at end of file diff --git a/makefiles/04_setup_dns.mk b/makefiles/04_setup_dns.mk new file mode 100644 index 0000000..3796c63 --- /dev/null +++ b/makefiles/04_setup_dns.mk @@ -0,0 +1,29 @@ +SHELL := /bin/bash +.ONESHELL: +.SHELLFLAGS := -eu -o pipefail -c + +MAKEFILE_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) +ANSIBLE_DIR := $(abspath $(MAKEFILE_DIR)/../ansible) +TERRAFORM_DIR := $(abspath $(MAKEFILE_DIR)/../terraform/stacks/powerdns) + +.PHONY: all \ + install_powerdns setup_dns_zone_and_records setup_systemd_resolved_config + +all: setup_systemd_resolved_config + +install_powerdns: + cd "$(ANSIBLE_DIR)" + ansible-playbook playbooks/dns/install_powerdns.yml -i inventory.ini + +setup_dns_zone_and_records: install_powerdns + cd "$(TERRAFORM_DIR)" + terraform init + terraform plan -var-file="terraform.tfvars" + terraform apply -auto-approve -var-file="terraform.tfvars" + +setup_systemd_resolved_config: setup_dns_zone_and_records + cd "$(ANSIBLE_DIR)" + ansible-playbook playbooks/dns/setup_systemd_resolved_config.yml -i inventory.ini + +# make -f 04_setup_dns.mk + diff --git a/makefiles/05_setup_ntp.mk b/makefiles/05_setup_ntp.mk new file mode 100644 index 0000000..13563ed --- /dev/null +++ b/makefiles/05_setup_ntp.mk @@ -0,0 +1,26 @@ +SHELL := /bin/bash +.ONESHELL: +.SHELLFLAGS := -eu -o pipefail -c + +MAKEFILE_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) +ANSIBLE_DIR := $(abspath $(MAKEFILE_DIR)/../ansible) + +.PHONY: all \ + setup_edge_ntp_node setup_core_ntp_node setup_client_ntp_node + +all: setup_client_ntp_node + +setup_edge_ntp_node: + cd "$(ANSIBLE_DIR)" + ansible-playbook playbooks/ntp/chrony/00_setup_edge_ntp_node.yml -i inventory.ini + +setup_core_ntp_node: setup_edge_ntp_node + cd "$(ANSIBLE_DIR)" + ansible-playbook playbooks/ntp/chrony/01_setup_core_ntp_node.yml -i inventory.ini + +setup_client_ntp_node: setup_core_ntp_node + cd "$(ANSIBLE_DIR)" + ansible-playbook playbooks/ntp/chrony/02_setup_client_ntp_node.yml -i inventory.ini + +# make -f 05_setup_ntp.mk + diff --git a/makefiles/06_setup_ceph.mk b/makefiles/06_setup_ceph.mk new file mode 100644 index 0000000..2eb4e16 --- /dev/null +++ b/makefiles/06_setup_ceph.mk @@ -0,0 +1,31 @@ +SHELL := /bin/bash +.ONESHELL: +.SHELLFLAGS := -eu -o pipefail -c + +MAKEFILE_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) +ANSIBLE_DIR := $(abspath $(MAKEFILE_DIR)/../ansible) + +.PHONY: all \ + install_ceph bootstrap_ceph share_ceph_pubkey setup_cluster + +all: setup_cluster + +install_ceph: + cd "$(ANSIBLE_DIR)" + ansible-playbook playbooks/ceph/00_install.yml -i inventory.ini + +bootstrap_ceph: install_ceph + cd "$(ANSIBLE_DIR)" + ansible-playbook playbooks/ceph/01_bootstrap.yml -i inventory.ini + +share_ceph_pubkey: bootstrap_ceph + cd "$(ANSIBLE_DIR)" + ansible-playbook playbooks/ceph/02_share_pubkey.yml -i inventory.ini + +setup_cluster: share_ceph_pubkey + cd "$(ANSIBLE_DIR)" + ansible-playbook playbooks/ceph/03_setup_cluster.yml -i inventory.ini + + +# make -f 06_setup_ceph.mk + diff --git a/makefiles/07_setup_k8s.mk b/makefiles/07_setup_k8s.mk new file mode 100644 index 0000000..4f8c3e7 --- /dev/null +++ b/makefiles/07_setup_k8s.mk @@ -0,0 +1,40 @@ +SHELL := /bin/bash +.ONESHELL: +.SHELLFLAGS := -eu -o pipefail -c + +MAKEFILE_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) +ANSIBLE_DIR := $(abspath $(MAKEFILE_DIR)/../ansible) + +.PHONY: all \ + install_k8s_worker + +all: install_k8s_worker + +# install_k8s_master: +# cd "$(ANSIBLE_DIR)" +# ansible-playbook playbooks/k8s/install/k8s_master.yml -i inventory.ini + +install_k8s_worker: + cd "$(ANSIBLE_DIR)" + ansible-playbook playbooks/k8s/install/k8s_worker.yml -i inventory.ini + +# 1) ssh adminuser@localhost -p 10525 -i ./dev-kyiv01-vm-default-main-01 +# 2) cat /root/.kube/config # copy config to dev containers and change cluster block +# - cluster: +# insecure-skip-tls-verify: true +# server: https://localhost:10563 +# 3) terraform apply -target=module.metallb_helm --auto-approve +# 4) terraform apply -target=module.crunchy_operator --auto-approve +# 5) terraform apply --auto-approve +# 6) get gitlab credentials +# kubectl -n postgres-operator get secret hippo-pguser-gitlab -o jsonpath='{.data.user}' | base64 -d; echo +# kubectl -n postgres-operator get secret hippo-pguser-gitlab -o jsonpath='{.data.password}' | base64 -d; echo +# kubectl -n postgres-operator get secret hippo-pguser-gitlab -o jsonpath='{.data.dbname}' | base64 -d; echo +# kubectl -n postgres-operator get secret hippo-pguser-gitlab -o jsonpath='{.data.host}' | base64 -d; echo +# kubectl -n postgres-operator get secret hippo-pguser-gitlab -o jsonpath='{.data.port}' | base64 -d; echo +# 7) get valkey password +# kubectl -n valkey get secret valkey-users -o jsonpath='{.data.default}' | base64 -d; echo + + +# make -f 07_setup_k8s.mk + diff --git a/makefiles/bootstrap.mk b/makefiles/bootstrap.mk new file mode 100644 index 0000000..e96ba83 --- /dev/null +++ b/makefiles/bootstrap.mk @@ -0,0 +1,24 @@ +SHELL := /bin/bash +.DEFAULT_GOAL := all + +MAKEFILE_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) +ANSIBLE_DIR := $(abspath $(MAKEFILE_DIR)/../ansible) + +.PHONY: refresh_known_hosts all + +refresh_known_hosts: + cd "$(ANSIBLE_DIR)" && \ + ansible-playbook playbooks/node/remove_file.yml \ + -i "localhost," -c local \ + -e "file_path=/root/.ssh/known_hosts" + +all: refresh_known_hosts +# $(MAKE) -f 00_create_and_setup_lxc_container_with_packer.mk +# $(MAKE) -f 01_create_vm_golden_template.mk + $(MAKE) -f 02_create_vms.mk + $(MAKE) -f 03_harden_nodes.mk + $(MAKE) -f 04_setup_dns.mk + $(MAKE) -f 05_setup_ntp.mk + $(MAKE) -f 06_setup_ceph.mk + +# make -f bootstrap.mk \ No newline at end of file diff --git a/packer/proxmox/debian13/debian13.pkr.hcl b/packer/proxmox/debian13/debian13.pkr.hcl new file mode 100644 index 0000000..aef3f72 --- /dev/null +++ b/packer/proxmox/debian13/debian13.pkr.hcl @@ -0,0 +1,91 @@ +packer { + required_plugins { + proxmox = { + source = "github.com/hashicorp/proxmox" + version = ">= 1.2.3" + } + } +} + +source "proxmox-iso" "debian13" { + proxmox_url = "https://192.168.0.126:8006/api2/json" + username = "root@pam!packer" + token = "7f3cd12a-c98e-4aec-abca-8d7fd1005fd7" + insecure_skip_tls_verify = true + + node = "proxmox-main-kyiv-01" + vm_id = 300 + vm_name = "dev-kyiv01-template-packer-main-01" + template_name = "dev-kyiv01-template-packer-main-01" + tags = "debian-13" + + os = "l26" + bios = "seabios" + machine = "q35" + cpu_type = "qemu64" + cores = 1 + sockets = 1 + memory = 1024 + + qemu_agent = true + + scsi_controller = "virtio-scsi-single" + + network_adapters { + model = "virtio" + bridge = "vmbr0" + firewall = false + mac_address = "repeatable" + } + + disks { + type = "scsi" + storage_pool = "local-lvm" + disk_size = "4G" + io_thread = true + discard = true + ssd = true + } + + boot_iso { + type = "scsi" + iso_file = "local:iso/debian-13.2.0-amd64-netinst.iso" + unmount = true + } + + # ===== HTTP preseed ===== + http_directory = "${path.root}/http" + http_bind_address = "192.168.0.200" + http_port_min = 8870 + http_port_max = 8870 + boot_wait = "10s" + boot_key_interval = "25ms" + + boot_command = [ + "", + "auto auto=true priority=critical ", + "preseed/url=http://{{ .HTTPIP }}:{{ .HTTPPort }}/preseed.cfg ", + "debian-installer=en_US ", + "fb=false ", + "" + ] + + # ===== SSH ===== + ssh_username = "packer" + ssh_password = "14881488" + ssh_timeout = "35m" + + cloud_init = false +} + +build { + sources = ["source.proxmox-iso.debian13"] + + provisioner "shell" { + execute_command = "sudo -S -E bash '{{ .Path }}'" + scripts = [ + "scripts/10-base.sh", + "scripts/90-cleanup.sh" + ] + } +} diff --git a/packer/proxmox/debian13/http/preseed.cfg b/packer/proxmox/debian13/http/preseed.cfg new file mode 100644 index 0000000..72ac204 --- /dev/null +++ b/packer/proxmox/debian13/http/preseed.cfg @@ -0,0 +1,100 @@ +### Locale / Keyboard +d-i debian-installer/locale string en_US.UTF-8 +d-i keyboard-configuration/xkb-keymap select us + +### Network (DHCP) +d-i netcfg/choose_interface select auto +d-i netcfg/get_hostname string debian +d-i netcfg/get_domain string local + +### Mirror +d-i mirror/country string manual +d-i mirror/http/hostname string deb.debian.org +d-i mirror/http/directory string /debian +d-i mirror/http/proxy string + +### User +d-i passwd/root-login boolean false +# d-i passwd/root-password password 14881488 +# d-i passwd/root-password-again password 14881488 +d-i passwd/user-fullname string Packer User +d-i passwd/username string packer +d-i passwd/user-password password 14881488 +d-i passwd/user-password-again password 14881488 +d-i user-setup/allow-password-weak boolean true + +### Time +d-i clock-setup/utc boolean true +d-i time/zone string UTC + +### Partitioning (LVM expert: /boot + VG vg0 + LV swap + LV root=rest) +d-i partman-auto/disk string /dev/sda +d-i partman-auto/method string lvm + +# если на диске были старые LVM/RAID — снести без вопросов +d-i partman-lvm/device_remove_lvm boolean true +d-i partman-md/device_remove_md boolean true +d-i partman-md/confirm boolean true +d-i partman-md/confirm_nooverwrite boolean true + +# имя VG фиксируем +d-i partman-auto-lvm/new_vg_name string vg0 +d-i partman-auto-lvm/guided_size string max + +# рецепт разметки +d-i partman-auto/choose_recipe select boot-root-lvm-swap +d-i partman-auto/expert_recipe string \ + boot-root-lvm-swap :: \ + 512 512 1024 ext4 \ + $primary{ } $bootable{ } \ + method{ format } format{ } \ + use_filesystem{ } filesystem{ ext4 } \ + mountpoint{ /boot } \ + . \ + 1024 1024 -1 lvm \ + $primary{ } \ + method{ lvm } device{ /dev/sda } \ + vg_name{ vg0 } \ + . \ + 2048 2048 2048 linux-swap \ + $lvmok{ } \ + in_vg{ vg0 } lv_name{ swap } \ + method{ swap } format{ } \ + . \ + 4096 4096 -1 ext4 \ + $lvmok{ } \ + in_vg{ vg0 } lv_name{ root } \ + method{ format } format{ } \ + use_filesystem{ } filesystem{ ext4 } \ + mountpoint{ / } \ + . + +# подтверждения без интерактива +d-i partman-lvm/confirm boolean true +d-i partman-lvm/confirm_nooverwrite boolean true +d-i partman/choose_partition select finish +d-i partman/confirm_write_new_label boolean true +d-i partman/confirm boolean true +d-i partman/confirm_nooverwrite boolean true + +### Packages +tasksel tasksel/first multiselect standard, ssh-server +d-i pkgsel/include string sudo openssh-server cloud-init qemu-guest-agent ca-certificates curl +popularity-contest popularity-contest/participate boolean false + +### Bootloader +d-i grub-installer/only_debian boolean true +d-i grub-installer/bootdev string /dev/sda + +# debconf for grub-pc package (this is the screen you see) +d-i grub-pc/install_devices multiselect /dev/sda +d-i grub-pc/install_devices_empty boolean false + +### Late command (самое важное) +d-i preseed/late_command string \ + in-target usermod -aG sudo packer; \ + echo 'packer ALL=(ALL) NOPASSWD:ALL' > /target/etc/sudoers.d/packer; \ + chmod 440 /target/etc/sudoers.d/packer; + +### Finish +d-i finish-install/reboot_in_progress note diff --git a/packer/proxmox/debian13/readme.md b/packer/proxmox/debian13/readme.md new file mode 100644 index 0000000..1966f1e --- /dev/null +++ b/packer/proxmox/debian13/readme.md @@ -0,0 +1,16 @@ +```bash +set -a +source ./.env +set +a + +packer init . +packer fmt -recursive . +packer validate . +PACKER_LOG=1 packer build -on-error=ask -timestamp-ui . +``` + +```bash +lsblk -o NAME,SIZE,TYPE,MOUNTPOINT +sudo fdisk -l /dev/sda | sed -n '1,120p' +cloud-init status --long +``` \ No newline at end of file diff --git a/packer/proxmox/debian13/scripts/10-base.sh b/packer/proxmox/debian13/scripts/10-base.sh new file mode 100644 index 0000000..89b2fc6 --- /dev/null +++ b/packer/proxmox/debian13/scripts/10-base.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +set -euo pipefail +export DEBIAN_FRONTEND=noninteractive + +apt-get update +apt-get -y dist-upgrade + +apt-get -y install qemu-guest-agent sudo + +systemctl enable --now qemu-guest-agent || true diff --git a/packer/proxmox/debian13/scripts/90-cleanup.sh b/packer/proxmox/debian13/scripts/90-cleanup.sh new file mode 100644 index 0000000..c3e9b2c --- /dev/null +++ b/packer/proxmox/debian13/scripts/90-cleanup.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash +set -euo pipefail +export DEBIAN_FRONTEND=noninteractive + +# cloud-init: очистить состояние +cloud-init clean --logs || true + +# machine-id: должен быть уникальный на каждом клоне +truncate -s 0 /etc/machine-id || true +rm -f /var/lib/dbus/machine-id || true +ln -sf /etc/machine-id /var/lib/dbus/machine-id || true + +# ssh host keys: перегенерятся на клоне +rm -f /etc/ssh/ssh_host_* || true + +# чистка кешей/листов +apt-get -y autoremove --purge +apt-get -y clean +rm -rf /var/lib/apt/lists/* +find /var/log -type f -exec truncate -s 0 {} \; || true + +sync diff --git a/terraform/modules/k8s/ceph/k8s-ceph-csi-rbd/helm.tf b/terraform/modules/k8s/ceph/k8s-ceph-csi-rbd/helm.tf new file mode 100644 index 0000000..ce9a961 --- /dev/null +++ b/terraform/modules/k8s/ceph/k8s-ceph-csi-rbd/helm.tf @@ -0,0 +1,19 @@ +resource "helm_release" "ceph_csi_rbd" { + name = "ceph-csi-rbd" + namespace = kubernetes_namespace_v1.this.metadata[0].name + repository = "https://ceph.github.io/csi-charts" + chart = "ceph-csi-rbd" + version = var.chart_version + + create_namespace = false + + values = [yamlencode({ + csiConfig = [{ + clusterID = var.ceph_cluster_id + monitors = var.ceph_monitors + }] + provisioner = { + replicaCount = 1 + } + })] +} diff --git a/terraform/modules/k8s/ceph/k8s-ceph-csi-rbd/namespace.tf b/terraform/modules/k8s/ceph/k8s-ceph-csi-rbd/namespace.tf new file mode 100644 index 0000000..3d6587a --- /dev/null +++ b/terraform/modules/k8s/ceph/k8s-ceph-csi-rbd/namespace.tf @@ -0,0 +1,3 @@ +resource "kubernetes_namespace_v1" "this" { + metadata { name = var.namespace } +} diff --git a/terraform/modules/k8s/ceph/k8s-ceph-csi-rbd/variables.tf b/terraform/modules/k8s/ceph/k8s-ceph-csi-rbd/variables.tf new file mode 100644 index 0000000..5a9ec02 --- /dev/null +++ b/terraform/modules/k8s/ceph/k8s-ceph-csi-rbd/variables.tf @@ -0,0 +1,15 @@ +variable "namespace" { + type = string +} + +variable "chart_version" { + type = string +} + +variable "ceph_cluster_id" { + type = string +} + +variable "ceph_monitors" { + type = list(string) +} diff --git a/terraform/modules/k8s/ceph/k8s-ceph-csi-rbd/versions.tf b/terraform/modules/k8s/ceph/k8s-ceph-csi-rbd/versions.tf new file mode 100644 index 0000000..b824839 --- /dev/null +++ b/terraform/modules/k8s/ceph/k8s-ceph-csi-rbd/versions.tf @@ -0,0 +1,6 @@ +terraform { + required_providers { + helm = { source = "hashicorp/helm" } + kubernetes = { source = "hashicorp/kubernetes" } + } +} diff --git a/terraform/modules/k8s/ceph/k8s-ceph-rbd-storage/secret.tf b/terraform/modules/k8s/ceph/k8s-ceph-rbd-storage/secret.tf new file mode 100644 index 0000000..6372819 --- /dev/null +++ b/terraform/modules/k8s/ceph/k8s-ceph-rbd-storage/secret.tf @@ -0,0 +1,13 @@ +resource "kubernetes_secret_v1" "csi_rbd_secret" { + metadata { + name = "csi-rbd-secret" + namespace = var.namespace + } + + data = { + userID = var.ceph_user_id + userKey = var.ceph_user_key + } + + type = "Opaque" +} diff --git a/terraform/modules/k8s/ceph/k8s-ceph-rbd-storage/storage_class.tf b/terraform/modules/k8s/ceph/k8s-ceph-rbd-storage/storage_class.tf new file mode 100644 index 0000000..56297e4 --- /dev/null +++ b/terraform/modules/k8s/ceph/k8s-ceph-rbd-storage/storage_class.tf @@ -0,0 +1,27 @@ +resource "kubernetes_storage_class_v1" "ceph_rbd" { + metadata { + name = "ceph-rbd" + # если хочешь сделать default: + # annotations = { + # "storageclass.kubernetes.io/is-default-class" = "true" + # } + } + + storage_provisioner = "rbd.csi.ceph.com" + reclaim_policy = "Delete" + volume_binding_mode = "Immediate" + allow_volume_expansion = true + + parameters = { + clusterID = var.ceph_cluster_id + pool = var.ceph_rbd_pool + + # ВАЖНО: это строки-ключи, строго без пробелов и без "/" + "csi.storage.k8s.io/provisioner-secret-name" = kubernetes_secret_v1.csi_rbd_secret.metadata[0].name + "csi.storage.k8s.io/provisioner-secret-namespace" = kubernetes_secret_v1.csi_rbd_secret.metadata[0].namespace + "csi.storage.k8s.io/node-stage-secret-name" = kubernetes_secret_v1.csi_rbd_secret.metadata[0].name + "csi.storage.k8s.io/node-stage-secret-namespace" = kubernetes_secret_v1.csi_rbd_secret.metadata[0].namespace + + imageFeatures = "layering" + } +} diff --git a/terraform/modules/k8s/ceph/k8s-ceph-rbd-storage/variables.tf b/terraform/modules/k8s/ceph/k8s-ceph-rbd-storage/variables.tf new file mode 100644 index 0000000..4e28c4b --- /dev/null +++ b/terraform/modules/k8s/ceph/k8s-ceph-rbd-storage/variables.tf @@ -0,0 +1,20 @@ +variable "namespace" { + type = string +} + +variable "ceph_cluster_id" { + type = string +} + +variable "ceph_rbd_pool" { + type = string +} + +variable "ceph_user_id" { + type = string +} + +variable "ceph_user_key" { + type = string + sensitive = true +} diff --git a/terraform/modules/k8s/ceph/k8s-ceph-rbd-storage/versions.tf b/terraform/modules/k8s/ceph/k8s-ceph-rbd-storage/versions.tf new file mode 100644 index 0000000..f6e834d --- /dev/null +++ b/terraform/modules/k8s/ceph/k8s-ceph-rbd-storage/versions.tf @@ -0,0 +1,5 @@ +terraform { + required_providers { + kubernetes = { source = "hashicorp/kubernetes" } + } +} diff --git a/terraform/modules/k8s/crunchy-data/operator/helm.tf b/terraform/modules/k8s/crunchy-data/operator/helm.tf new file mode 100644 index 0000000..3ef6d47 --- /dev/null +++ b/terraform/modules/k8s/crunchy-data/operator/helm.tf @@ -0,0 +1,26 @@ +resource "helm_release" "pgo" { + name = var.release_name + namespace = kubernetes_namespace_v1.this.metadata[0].name + + # Crunchy публикует Helm chart в OCI registry + # helm install pgo oci://registry.developers.crunchydata.com/crunchydata/pgo :contentReference[oaicite:2]{index=2} + repository = "oci://registry.developers.crunchydata.com/crunchydata" + chart = "pgo" + version = var.chart_version + + create_namespace = false + + values = [ + yamlencode({ + # безопасные дефолты, без лишней магии + debug = var.debug + replicas = var.replicas + + # Если хочешь ограничить оператор только этим namespace: + # singleNamespace = true + singleNamespace = var.single_namespace + + installCRDs = true + }) + ] +} diff --git a/terraform/modules/k8s/crunchy-data/operator/namespace.tf b/terraform/modules/k8s/crunchy-data/operator/namespace.tf new file mode 100644 index 0000000..5ef28b7 --- /dev/null +++ b/terraform/modules/k8s/crunchy-data/operator/namespace.tf @@ -0,0 +1,5 @@ +resource "kubernetes_namespace_v1" "this" { + metadata { + name = var.namespace + } +} diff --git a/terraform/modules/k8s/crunchy-data/operator/outputs.tf b/terraform/modules/k8s/crunchy-data/operator/outputs.tf new file mode 100644 index 0000000..e53d2d3 --- /dev/null +++ b/terraform/modules/k8s/crunchy-data/operator/outputs.tf @@ -0,0 +1,7 @@ +output "namespace" { + value = kubernetes_namespace_v1.this.metadata[0].name +} + +output "release_name" { + value = helm_release.pgo.name +} diff --git a/terraform/modules/k8s/crunchy-data/operator/variables.tf b/terraform/modules/k8s/crunchy-data/operator/variables.tf new file mode 100644 index 0000000..162f144 --- /dev/null +++ b/terraform/modules/k8s/crunchy-data/operator/variables.tf @@ -0,0 +1,33 @@ +variable "namespace" { + type = string + description = "Namespace, куда ставим Crunchy operator" + default = "postgres-operator" +} + +variable "release_name" { + type = string + description = "Helm release name" + default = "pgo" +} + +variable "chart_version" { + type = string + description = "Версия чарта pgo (пинить обязательно для воспроизводимости)" + default = "6.0.0" +} + +variable "debug" { + type = bool + default = false +} + +variable "replicas" { + type = number + default = 1 +} + +variable "single_namespace" { + type = bool + description = "Если true — оператор управляет кластерами только в этом namespace" + default = true +} diff --git a/terraform/modules/k8s/crunchy-data/operator/versions.tf b/terraform/modules/k8s/crunchy-data/operator/versions.tf new file mode 100644 index 0000000..b824839 --- /dev/null +++ b/terraform/modules/k8s/crunchy-data/operator/versions.tf @@ -0,0 +1,6 @@ +terraform { + required_providers { + helm = { source = "hashicorp/helm" } + kubernetes = { source = "hashicorp/kubernetes" } + } +} diff --git a/terraform/modules/k8s/crunchy-data/postgres-cluster/manifest.tf b/terraform/modules/k8s/crunchy-data/postgres-cluster/manifest.tf new file mode 100644 index 0000000..f6223a6 --- /dev/null +++ b/terraform/modules/k8s/crunchy-data/postgres-cluster/manifest.tf @@ -0,0 +1,60 @@ +resource "kubernetes_manifest" "postgres_cluster" { + manifest = { + apiVersion = "postgres-operator.crunchydata.com/v1beta1" + kind = "PostgresCluster" + + metadata = { + name = var.name + namespace = var.namespace + } + + spec = { + postgresVersion = var.postgres_version + + instances = [ + { + name = "instance1" + dataVolumeClaimSpec = { + storageClassName = var.storage_class_name + accessModes = ["ReadWriteOnce"] + resources = { + requests = { + storage = var.instance_storage + } + } + } + } + ] + + backups = { + pgbackrest = { + repos = [ + { + name = "repo1" + volume = { + volumeClaimSpec = { + storageClassName = var.storage_class_name + accessModes = ["ReadWriteOnce"] + resources = { + requests = { + storage = var.backup_storage + } + } + } + } + } + ] + } + } + + users = [ + { + name = var.gitlab_db_user + databases = [ + var.gitlab_db_name + ] + } + ] + } + } +} diff --git a/terraform/modules/k8s/crunchy-data/postgres-cluster/outputs.tf b/terraform/modules/k8s/crunchy-data/postgres-cluster/outputs.tf new file mode 100644 index 0000000..949488c --- /dev/null +++ b/terraform/modules/k8s/crunchy-data/postgres-cluster/outputs.tf @@ -0,0 +1,7 @@ +output "cluster_name" { + value = kubernetes_manifest.postgres_cluster.manifest["metadata"]["name"] +} + +output "namespace" { + value = kubernetes_manifest.postgres_cluster.manifest["metadata"]["namespace"] +} diff --git a/terraform/modules/k8s/crunchy-data/postgres-cluster/variables.tf b/terraform/modules/k8s/crunchy-data/postgres-cluster/variables.tf new file mode 100644 index 0000000..053e4be --- /dev/null +++ b/terraform/modules/k8s/crunchy-data/postgres-cluster/variables.tf @@ -0,0 +1,46 @@ +variable "namespace" { + type = string + description = "Namespace, где будет PostgresCluster" + default = "postgres-operator" +} + +variable "name" { + type = string + description = "Имя PostgresCluster" + default = "hippo" +} + +variable "storage_class_name" { + type = string + description = "StorageClass для PVC (твой Ceph CSI RBD), например: ceph-rbd" + default = "ceph-rbd" +} + +variable "postgres_version" { + type = number + description = "Major версия PostgreSQL (ставь ту, которую поддерживает твой CPK)" + default = 16 +} + +variable "instance_storage" { + type = string + description = "Размер диска под data" + default = "10Gi" +} + +variable "backup_storage" { + type = string + description = "Размер диска под pgBackRest repo" + default = "10Gi" +} + +variable "gitlab_db_user" { + type = string + default = "gitlab" +} + +variable "gitlab_db_name" { + type = string + default = "gitlabhq_production" +} + diff --git a/terraform/modules/k8s/crunchy-data/postgres-cluster/versions.tf b/terraform/modules/k8s/crunchy-data/postgres-cluster/versions.tf new file mode 100644 index 0000000..1817373 --- /dev/null +++ b/terraform/modules/k8s/crunchy-data/postgres-cluster/versions.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + kubernetes = { + source = "hashicorp/kubernetes" + } + } +} diff --git a/terraform/modules/k8s/metallb/helm/main.tf b/terraform/modules/k8s/metallb/helm/main.tf new file mode 100644 index 0000000..0da352b --- /dev/null +++ b/terraform/modules/k8s/metallb/helm/main.tf @@ -0,0 +1,7 @@ +resource "helm_release" "metallb" { + name = "metallb" + repository = "https://metallb.github.io/metallb" + chart = "metallb" + namespace = "metallb-system" + create_namespace = true +} diff --git a/terraform/modules/k8s/metallb/helm/versions.tf b/terraform/modules/k8s/metallb/helm/versions.tf new file mode 100644 index 0000000..665c434 --- /dev/null +++ b/terraform/modules/k8s/metallb/helm/versions.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + helm = { + source = "hashicorp/helm" + } + } +} diff --git a/terraform/modules/k8s/metallb/resources/manifest.tf b/terraform/modules/k8s/metallb/resources/manifest.tf new file mode 100644 index 0000000..4d2c5ed --- /dev/null +++ b/terraform/modules/k8s/metallb/resources/manifest.tf @@ -0,0 +1,31 @@ +# Это пул IP-адресов, из которого MetalLB будет брать “external IP” и назначать их сервисам типа LoadBalancer +resource "kubernetes_manifest" "metallb_ip_pool" { + manifest = { + apiVersion = "metallb.io/v1beta1" + kind = "IPAddressPool" + metadata = { + name = var.pool_name + namespace = var.namespace + } + spec = { + addresses = var.addresses + } + } +} + +# Это говорит MetalLB: рекламируй (announce) адреса из этого пула в L2 режиме. +resource "kubernetes_manifest" "metallb_l2" { + manifest = { + apiVersion = "metallb.io/v1beta1" + kind = "L2Advertisement" + metadata = { + name = var.l2_name + namespace = var.namespace + } + spec = { + ipAddressPools = [var.pool_name] + } + } + + depends_on = [kubernetes_manifest.metallb_ip_pool] +} diff --git a/terraform/modules/k8s/metallb/resources/outputs.tf b/terraform/modules/k8s/metallb/resources/outputs.tf new file mode 100644 index 0000000..d3c410f --- /dev/null +++ b/terraform/modules/k8s/metallb/resources/outputs.tf @@ -0,0 +1,3 @@ +output "pool_name" { + value = var.pool_name +} diff --git a/terraform/modules/k8s/metallb/resources/variables.tf b/terraform/modules/k8s/metallb/resources/variables.tf new file mode 100644 index 0000000..999de81 --- /dev/null +++ b/terraform/modules/k8s/metallb/resources/variables.tf @@ -0,0 +1,22 @@ +variable "addresses" { + type = list(string) + description = "MetalLB address pool ranges" +} + +variable "namespace" { + type = string + description = "Namespace where MetalLB is installed" + default = "metallb-system" +} + +variable "pool_name" { + type = string + description = "IPAddressPool name" + default = "default-pool" +} + +variable "l2_name" { + type = string + description = "L2Advertisement name" + default = "default-l2" +} diff --git a/terraform/modules/k8s/metallb/resources/versions.tf b/terraform/modules/k8s/metallb/resources/versions.tf new file mode 100644 index 0000000..1817373 --- /dev/null +++ b/terraform/modules/k8s/metallb/resources/versions.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + kubernetes = { + source = "hashicorp/kubernetes" + } + } +} diff --git a/terraform/modules/k8s/nginx_ingress/helm/locals.tf b/terraform/modules/k8s/nginx_ingress/helm/locals.tf new file mode 100644 index 0000000..334a021 --- /dev/null +++ b/terraform/modules/k8s/nginx_ingress/helm/locals.tf @@ -0,0 +1,18 @@ +locals { + ingress_nginx_values = merge( + { + controller = { + service = { + type = "LoadBalancer" + } + } + }, + var.ingress_lb_ip == null ? {} : { + controller = { + service = { + loadBalancerIP = var.ingress_lb_ip + } + } + } + ) +} diff --git a/terraform/modules/k8s/nginx_ingress/helm/main.tf b/terraform/modules/k8s/nginx_ingress/helm/main.tf new file mode 100644 index 0000000..87cf48a --- /dev/null +++ b/terraform/modules/k8s/nginx_ingress/helm/main.tf @@ -0,0 +1,9 @@ +resource "helm_release" "ingress_nginx" { + name = "ingress-nginx" + repository = "https://kubernetes.github.io/ingress-nginx" + chart = "ingress-nginx" + namespace = "ingress-nginx" + create_namespace = true + + values = [yamlencode(local.ingress_nginx_values)] +} diff --git a/terraform/modules/k8s/nginx_ingress/helm/variables.tf b/terraform/modules/k8s/nginx_ingress/helm/variables.tf new file mode 100644 index 0000000..a3cfa77 --- /dev/null +++ b/terraform/modules/k8s/nginx_ingress/helm/variables.tf @@ -0,0 +1,10 @@ +variable "ingress_lb_ip" { + type = string + description = "Static LB IP for ingress-nginx controller Service (must be from MetalLB pool). Leave null for dynamic." + default = null +} + +variable "pool_name" { + type = string + description = "MetalLB address pool name for ingress-nginx Service annotation" +} diff --git a/terraform/modules/k8s/openebs/helm.tf b/terraform/modules/k8s/openebs/helm.tf new file mode 100644 index 0000000..02c910d --- /dev/null +++ b/terraform/modules/k8s/openebs/helm.tf @@ -0,0 +1,8 @@ +resource "helm_release" "openebs" { + name = var.release_name + repository = "https://openebs.github.io/openebs" + chart = "openebs" + version = var.chart_version + namespace = var.namespace + create_namespace = true +} diff --git a/terraform/modules/k8s/openebs/storage_class.tf b/terraform/modules/k8s/openebs/storage_class.tf new file mode 100644 index 0000000..6d004d6 --- /dev/null +++ b/terraform/modules/k8s/openebs/storage_class.tf @@ -0,0 +1,25 @@ +resource "kubernetes_storage_class_v1" "openebs_hostpath" { + metadata { + name = var.storageclass_name + annotations = { + "storageclass.kubernetes.io/is-default-class" = "true" + "openebs.io/cas-type" = "local" + } + } + + storage_provisioner = "openebs.io/local" + reclaim_policy = "Delete" + volume_binding_mode = "WaitForFirstConsumer" + allow_volume_expansion = false + + parameters = { + "cas.openebs.io/config" = <<-EOT + - name: StorageType + value: "hostpath" + - name: BasePath + value: "${var.base_path}" + EOT + } + + depends_on = [helm_release.openebs] +} diff --git a/terraform/modules/k8s/openebs/variables.tf b/terraform/modules/k8s/openebs/variables.tf new file mode 100644 index 0000000..358e267 --- /dev/null +++ b/terraform/modules/k8s/openebs/variables.tf @@ -0,0 +1,26 @@ +variable "namespace" { + type = string + default = "openebs" +} + +variable "release_name" { + type = string + default = "openebs" +} + +variable "chart_version" { + type = string + default = null + description = "Версия helm chart openebs (null = последняя доступная)." +} + +variable "storageclass_name" { + type = string + default = "openebs-local-hostpath" +} + +variable "base_path" { + type = string + default = "/var/openebs/local/" + description = "Путь на нодах для hostpath LocalPV (можно кастомизировать)." +} diff --git a/terraform/modules/k8s/openebs/versions.tf b/terraform/modules/k8s/openebs/versions.tf new file mode 100644 index 0000000..b824839 --- /dev/null +++ b/terraform/modules/k8s/openebs/versions.tf @@ -0,0 +1,6 @@ +terraform { + required_providers { + helm = { source = "hashicorp/helm" } + kubernetes = { source = "hashicorp/kubernetes" } + } +} diff --git a/terraform/modules/k8s/valkey/helm.tf b/terraform/modules/k8s/valkey/helm.tf new file mode 100644 index 0000000..8842d29 --- /dev/null +++ b/terraform/modules/k8s/valkey/helm.tf @@ -0,0 +1,39 @@ +resource "helm_release" "valkey" { + name = var.release_name + namespace = var.namespace + + repository = var.repository + chart = "valkey" + version = var.chart_version + create_namespace = false + + values = [ + yamlencode({ + auth = { + enabled = true + usersExistingSecret = kubernetes_secret_v1.valkey_users.metadata[0].name + + # ВАЖНО: 'default' обязан быть определён тут (или в aclConfig), + # иначе чарт ругнётся / будет небезопасная конфигурация + aclUsers = { + default = { + permissions = "~* &* +@all" + # password НЕ нужен, потому что берётся из usersExistingSecret + } + } + } + + # (опционально) персистентность на Ceph RBD: + # dataStorage = { + # enabled = true + # requestedSize = "5Gi" + # className = "ceph-rbd" + # } + }) + ] + + depends_on = [ + kubernetes_namespace_v1.this, + kubernetes_secret_v1.valkey_users, + ] +} diff --git a/terraform/modules/k8s/valkey/namespace.tf b/terraform/modules/k8s/valkey/namespace.tf new file mode 100644 index 0000000..66e1ec1 --- /dev/null +++ b/terraform/modules/k8s/valkey/namespace.tf @@ -0,0 +1,7 @@ +resource "kubernetes_namespace_v1" "this" { + count = var.create_namespace ? 1 : 0 + + metadata { + name = var.namespace + } +} diff --git a/terraform/modules/k8s/valkey/secret.tf b/terraform/modules/k8s/valkey/secret.tf new file mode 100644 index 0000000..9667a82 --- /dev/null +++ b/terraform/modules/k8s/valkey/secret.tf @@ -0,0 +1,13 @@ +resource "kubernetes_secret_v1" "valkey_users" { + metadata { + name = "valkey-users" + namespace = var.namespace # "valkey" + } + + type = "Opaque" + + data = { + # ВАЖНО: ключ = username, по умолчанию чарт ожидает так + default = base64encode(var.valkey_password) + } +} diff --git a/terraform/modules/k8s/valkey/variables.tf b/terraform/modules/k8s/valkey/variables.tf new file mode 100644 index 0000000..0094dbf --- /dev/null +++ b/terraform/modules/k8s/valkey/variables.tf @@ -0,0 +1,35 @@ +variable "namespace" { + type = string + default = "valkey" +} + +variable "create_namespace" { + type = bool + default = true +} + +variable "release_name" { + type = string + default = "valkey" +} + +variable "chart_version" { + type = string + default = "0.9.2" +} + +variable "repository" { + type = string + default = "https://valkey.io/valkey-helm/" +} + +# Если хочешь кастомизировать chart values — просто передай сюда yamlencode({...}) +variable "values" { + type = list(string) + default = [] +} + +variable "valkey_password" { + type = string + sensitive = true +} diff --git a/terraform/modules/k8s/valkey/versions.tf b/terraform/modules/k8s/valkey/versions.tf new file mode 100644 index 0000000..27dd139 --- /dev/null +++ b/terraform/modules/k8s/valkey/versions.tf @@ -0,0 +1,14 @@ +terraform { + required_version = ">= 1.5.0" + + required_providers { + helm = { + source = "hashicorp/helm" + version = ">= 3.0.0" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.0.0" + } + } +} diff --git a/terraform/modules/powerdns/record/main.tf b/terraform/modules/powerdns/record/main.tf new file mode 100644 index 0000000..96fa3cd --- /dev/null +++ b/terraform/modules/powerdns/record/main.tf @@ -0,0 +1,7 @@ +resource "powerdns_record" "this" { + zone = var.zone_name + name = var.name + type = var.type + ttl = var.ttl + records = var.records +} diff --git a/terraform/modules/powerdns/record/variables.tf b/terraform/modules/powerdns/record/variables.tf new file mode 100644 index 0000000..896760c --- /dev/null +++ b/terraform/modules/powerdns/record/variables.tf @@ -0,0 +1,19 @@ +variable "zone_name" { + type = string +} + +variable "name" { + type = string +} + +variable "type" { + type = string +} + +variable "ttl" { + type = number +} + +variable "records" { + type = list(string) +} diff --git a/terraform/modules/powerdns/record/versions.tf b/terraform/modules/powerdns/record/versions.tf new file mode 100644 index 0000000..01eccbd --- /dev/null +++ b/terraform/modules/powerdns/record/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.3.0" + + required_providers { + powerdns = { + source = "pan-net/powerdns" + # version = "1.5.0" # опционально: можно зафиксировать + } + } +} diff --git a/terraform/modules/powerdns/zone/main.tf b/terraform/modules/powerdns/zone/main.tf new file mode 100644 index 0000000..ddbb5c0 --- /dev/null +++ b/terraform/modules/powerdns/zone/main.tf @@ -0,0 +1,6 @@ +resource "powerdns_zone" "zone" { + name = var.zone_name + kind = var.zone_kind + soa_edit_api = var.soa_edit_api + nameservers = var.zone_nameservers +} diff --git a/terraform/modules/powerdns/zone/outputs.tf b/terraform/modules/powerdns/zone/outputs.tf new file mode 100644 index 0000000..025d526 --- /dev/null +++ b/terraform/modules/powerdns/zone/outputs.tf @@ -0,0 +1,3 @@ +output "name" { + value = powerdns_zone.zone.name +} diff --git a/terraform/modules/powerdns/zone/variables.tf b/terraform/modules/powerdns/zone/variables.tf new file mode 100644 index 0000000..3b99d96 --- /dev/null +++ b/terraform/modules/powerdns/zone/variables.tf @@ -0,0 +1,15 @@ +variable "zone_name" { + type = string +} + +variable "zone_kind" { + type = string +} + +variable "soa_edit_api" { + type = string +} + +variable "zone_nameservers" { + type = list(string) +} diff --git a/terraform/modules/powerdns/zone/versions.tf b/terraform/modules/powerdns/zone/versions.tf new file mode 100644 index 0000000..01eccbd --- /dev/null +++ b/terraform/modules/powerdns/zone/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.3.0" + + required_providers { + powerdns = { + source = "pan-net/powerdns" + # version = "1.5.0" # опционально: можно зафиксировать + } + } +} diff --git a/terraform/modules/proxmox/lxc/main.tf b/terraform/modules/proxmox/lxc/main.tf new file mode 100644 index 0000000..41d612f --- /dev/null +++ b/terraform/modules/proxmox/lxc/main.tf @@ -0,0 +1,72 @@ +resource "proxmox_virtual_environment_container" "this" { + node_name = var.target_node + vm_id = var.vm_id + unprivileged = var.unprivileged + + started = var.started + start_on_boot = var.start_on_boot + + cpu { + cores = var.cores + units = var.cpu_units + } + + memory { + dedicated = var.memory + swap = var.swap + } + + disk { + datastore_id = var.rootfs_storage + size = var.rootfs_size_gib + } + + features { + nesting = var.nesting + } + + network_interface { + name = var.netif_name + bridge = var.bridge + enabled = true + firewall = var.firewall + } + + operating_system { + template_file_id = var.template_file_id + type = var.os_type + } + + initialization { + hostname = var.hostname + + ip_config { + ipv4 { + address = var.ipv4_address + gateway = var.ipv4_gateway + } + } + + # user_account делаем опциональным: + # - если задан пароль (не null) или есть ssh keys + dynamic "user_account" { + for_each = (var.password != null || length(var.ssh_public_keys) > 0) ? [1] : [] + content { + # provider норм принимает keys list(string) + keys = var.ssh_public_keys + + # password задаём только если не null + password = var.password + } + } + + # DNS опционально + dynamic "dns" { + for_each = (var.dns_domain != null || length(var.dns_servers) > 0) ? [1] : [] + content { + domain = var.dns_domain + servers = var.dns_servers + } + } + } +} diff --git a/terraform/modules/proxmox/lxc/variables.tf b/terraform/modules/proxmox/lxc/variables.tf new file mode 100644 index 0000000..76f454f --- /dev/null +++ b/terraform/modules/proxmox/lxc/variables.tf @@ -0,0 +1,126 @@ +variable "vm_id" { + type = number +} + +variable "hostname" { + type = string +} + +variable "target_node" { + type = string +} + +variable "template_file_id" { + type = string +} + +variable "os_type" { + type = string + default = "debian" +} + +variable "unprivileged" { + type = bool + default = true +} + +variable "nesting" { + type = bool + default = true +} + +variable "cores" { + type = number + default = 1 +} + +# Proxmox cpuunits +variable "cpu_units" { + type = number + default = 1024 +} + +variable "memory" { + type = number + default = 512 +} + +variable "swap" { + type = number + default = 512 +} + +variable "rootfs_storage" { + type = string + default = "local-lvm" +} + +variable "rootfs_size_gib" { + type = number + default = 8 +} + +variable "bridge" { + type = string + default = "vmbr0" +} + +variable "netif_name" { + type = string + default = "eth0" +} + +variable "firewall" { + type = bool + default = true +} + +# DHCP: "dhcp" +# Static: "192.168.1.50/24" +variable "ipv4_address" { + type = string + default = "dhcp" + + validation { + condition = var.ipv4_address == "dhcp" || can(cidrnetmask(var.ipv4_address)) + error_message = "ipv4_address must be \"dhcp\" or a valid CIDR like 192.168.1.50/24." + } +} + +# gateway допустим только если не dhcp +variable "ipv4_gateway" { + type = string + default = null +} + +# Пароль опциональный (можешь управлять через ssh keys) +variable "password" { + type = string + default = null + sensitive = true +} + +variable "ssh_public_keys" { + type = list(string) + default = [] +} + +variable "dns_domain" { + type = string + default = null +} + +variable "dns_servers" { + type = list(string) + default = [] +} + +variable "started" { + type = bool + default = false +} + +variable "start_on_boot" { + type = bool + default = false +} diff --git a/terraform/modules/proxmox/lxc/versions.tf b/terraform/modules/proxmox/lxc/versions.tf new file mode 100644 index 0000000..bb496bf --- /dev/null +++ b/terraform/modules/proxmox/lxc/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.6" + + required_providers { + proxmox = { + source = "bpg/proxmox" + version = "0.86.0" + } + } +} diff --git a/terraform/modules/proxmox/vm/main.tf b/terraform/modules/proxmox/vm/main.tf new file mode 100644 index 0000000..48a3b85 --- /dev/null +++ b/terraform/modules/proxmox/vm/main.tf @@ -0,0 +1,63 @@ +resource "proxmox_virtual_environment_vm" "this" { + name = var.name + node_name = var.target_node + + clone { + vm_id = var.template_id + full = true + } + + cpu { + cores = var.cpu + type = var.cpu_type + } + + memory { + dedicated = var.memory + } + + # Предсказуемая SCSI нумерация + scsi_hardware = "virtio-scsi-single" + boot_order = ["scsi0"] + + # OS disk (scsi0) + disk { + datastore_id = var.storage + size = var.disk_size + interface = "scsi0" + } + + # OSD disks (scsi1, scsi2, ...) + dynamic "disk" { + for_each = (var.osd_storage != null && length(var.osd_disks) > 0) ? { for idx, size in var.osd_disks : idx => size } : {} + + content { + datastore_id = var.osd_storage + size = disk.value + interface = "scsi${disk.key + 1}" + + file_format = "raw" + cache = "none" + iothread = true + discard = "on" + } + } + + network_device { + bridge = var.bridge + model = "virtio" + mac_address = var.mac_address + } + + agent { + enabled = true + } + + initialization { + user_data_file_id = var.user_data_file_id + + ip_config { + ipv4 { address = "dhcp" } + } + } +} diff --git a/terraform/modules/proxmox/vm/variables.tf b/terraform/modules/proxmox/vm/variables.tf new file mode 100644 index 0000000..ac37ae6 --- /dev/null +++ b/terraform/modules/proxmox/vm/variables.tf @@ -0,0 +1,55 @@ +variable "name" { + type = string +} + +variable "target_node" { + type = string +} + +variable "template_id" { + type = number +} + +variable "cpu" { + type = number +} + +variable "cpu_type" { + type = string +} + +variable "memory" { + type = number +} + +variable "disk_size" { + type = number +} + +variable "storage" { + type = string +} + +variable "bridge" { + type = string +} + +variable "osd_storage" { + type = string + default = null +} + +variable "osd_disks" { + type = list(number) + default = [] +} + +variable "user_data_file_id" { + type = string + default = null +} + +variable "mac_address" { + description = "Static MAC for VM NIC (for DHCP reservation)." + type = string +} diff --git a/terraform/modules/proxmox/vm/versions.tf b/terraform/modules/proxmox/vm/versions.tf new file mode 100644 index 0000000..85ef525 --- /dev/null +++ b/terraform/modules/proxmox/vm/versions.tf @@ -0,0 +1,8 @@ +terraform { + required_providers { + proxmox = { + source = "bpg/proxmox" + version = ">= 0.86.0" + } + } +} diff --git a/terraform/readme.md b/terraform/readme.md new file mode 100644 index 0000000..be891cc --- /dev/null +++ b/terraform/readme.md @@ -0,0 +1,5 @@ +```bash +terraform init +terraform plan -var-file="terraform.tfvars" +terraform apply -var-file="terraform.tfvars" +``` \ No newline at end of file diff --git a/terraform/stacks/k8s/configs/config b/terraform/stacks/k8s/configs/config new file mode 100644 index 0000000..b76944d --- /dev/null +++ b/terraform/stacks/k8s/configs/config @@ -0,0 +1,18 @@ +apiVersion: v1 +clusters: + - cluster: + insecure-skip-tls-verify: true + server: https://localhost:10563 + name: kubernetes +contexts: + - context: + cluster: kubernetes + user: kubernetes-admin + name: kubernetes-admin@kubernetes +current-context: kubernetes-admin@kubernetes +kind: Config +users: + - name: kubernetes-admin + user: + client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURLVENDQWhHZ0F3SUJBZ0lJYVI1WXRlRHdabjR3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB5TmpBeE1EUXhOak0xTXpSYUZ3MHlOekF4TURReE5qUXdNelJhTUR3eApIekFkQmdOVkJBb1RGbXQxWW1WaFpHMDZZMngxYzNSbGNpMWhaRzFwYm5NeEdUQVhCZ05WQkFNVEVHdDFZbVZ5CmJtVjBaWE10WVdSdGFXNHdnZ0VpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElCRHdBd2dnRUtBb0lCQVFESEN4TmgKS3JBQXdPd2ZjK3U3NW1jaUU4RVRwaGM4blNkRWxtVXNJWFdINE5YWkxCK2dzWGtOdzc2NXBxcWkvVjU4cTI0egpVNHJrOE9xRHFoVmFYT2psZmNFSTh1blZaM3BsVEhGRS9uU00rZWkwOUpEVGpzUlNBR2JTWU9iMnFHWWtpTC9sCnlKOCtNNTR0Q0FMZWRtWWk1SGJOdHpoOFFsWHdvK2ozMHRiU1QvSmtGbXNKaTFubk50Q29KYlRSR3ZNVmFEaWkKZS91cDJzcU8rc3ZHS2RxL2E5NStROE9ST1prL3JuWHVqTzZxcjNUMWNnTmJPQlVLdDNGQ1pXK3gyamRwRzRPRgpqUVc4cUs2eHNKMFgrZmh0MHNMZC9NU1pKajdNL1VBalhYb3N6Zm9qS29IMUd4dHZxU2RCTXFLUjQ2T1ZmVjFhCldhSENvLzIzUnJJdUJPTmxBZ01CQUFHalZqQlVNQTRHQTFVZER3RUIvd1FFQXdJRm9EQVRCZ05WSFNVRUREQUsKQmdnckJnRUZCUWNEQWpBTUJnTlZIUk1CQWY4RUFqQUFNQjhHQTFVZEl3UVlNQmFBRkE1SkFIdkhVZEhURFNPRwpmdmdYR1k1VHkzU3BNQTBHQ1NxR1NJYjNEUUVCQ3dVQUE0SUJBUUJBTHRKMXJHaUZMaU83NmtZZDBDaVNvM0FICmYxbHdWRjJLREExMENTS2FqdmR2QmZlajI5cm9vYm9mMGM5UHVCVWtNYzR2ZitoeEY1a0lhK21BM3FDRmRFK2cKbW1VUVlFdFJXWWZRTmREYStWbTFVSVBJOGpUOXgvSWRYanpvY0UzL1FQZ0JBVEFTMVRmYVBJRktLZU9qMy9sNApDS0UwMks2RklzUklTVVhsMVdnS093SGxrOEwyMThsUTg0WVFZNG4yd1FSNzM3eTdUTnRLZ3BjeU5VN1ZLdFhnCnQ2Z1p4NkxqbnRVZGRzTlkyazg5Q3dmM0lUSENqN040SDE5Mll3VFBZajd0NkI5Q1Y4SXVaZEtKaWpFNkFYbHMKU2J0WjRYWStiUTdGaWIwM25CbTRSSXdMeEdVV3JMbkFnYzJBRnFGK29xSmc5SFFzdEgxVS8rOGhwWkkzCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K + client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBeHdzVFlTcXdBTURzSDNQcnUrWm5JaFBCRTZZWFBKMG5SSlpsTENGMWgrRFYyU3dmCm9MRjVEY08rdWFhcW92MWVmS3R1TTFPSzVQRHFnNm9WV2x6bzVYM0JDUExwMVdkNlpVeHhSUDUwalBub3RQU1EKMDQ3RVVnQm0wbURtOXFobUpJaS81Y2lmUGpPZUxRZ0MzblptSXVSMnpiYzRmRUpWOEtQbzk5TFcway95WkJacgpDWXRaNXpiUXFDVzAwUnJ6RldnNG9udjdxZHJLanZyTHhpbmF2MnZlZmtQRGtUbVpQNjUxN296dXFxOTA5WElECld6Z1ZDcmR4UW1WdnNkbzNhUnVEaFkwRnZLaXVzYkNkRi9uNGJkTEMzZnpFbVNZK3pQMUFJMTE2TE0zNkl5cUIKOVJzYmI2a25RVEtpa2VPamxYMWRXbG1od3FQOXQwYXlMZ1RqWlFJREFRQUJBb0lCQUdIU2hxNjlmUXlSeERwZApEV0VtaGs5UUtCY1JET0NFVi9VMGVQcXNZd2ZwcncveHlJV0FVMzg1VWJZS1BGaW9iMVNVS0MvZmdpYXNaSVZiCkJPMkpOZ2o3dWZBOCtrRWdmeDNqNk5OQXYyVTl1V21kdk1QTXFaMm5odUVrTUw3TzJveGdEUGZRbHJyS1FaWGUKRnhMZ1c2Z1FZbmNOOGh2WHVrYnZONkw4b3dsdTFOc01HVFJPdG10NEQ0WVptSnlGKzNsakZQcGF3TFlZL3M5awp5TGVaRXBDd2VCdEpDS1ZyODhaMXVVaVh2Mzg0cGEzMVA2VjFnRUt5SFQya3lGUXFvdWNLUDE0Y0FrazNyb0JGCkJ0cjc1WHBjUHYvZGExY2gvU3VQZDdscnV4UUtpZ1dWOWtNZG1TczVON0c2Rm5ZYS9jbnpxUWovZFNmV3lMUkgKRHZmTUN3MENnWUVBMDc4VXZjUVU2aUZBMm1ZLzNFQW5Zbmg1UzhJTE5OSXVQY1pMTkhqSVp5WGlyRFJ4VjRKNApXMWlZdWhUK0lVVFkwYWptVmhLUStMTkhJK0hzTkZOL2svdmM0cTVaa0czWUlMNk5pbWd3Y3FCNjVnbUMrNlo2ClJJQ3Y3YnBkUm9mYTdCMit3TjcxeEx1S282d2RyblArYmNKbzhaY09LQmYvRDlXa0RmNlZUM3NDZ1lFQThLUlkKNDZRWDMxYlRxSFhTWGhQSHV6QWdvMEFHd2JPRlAxT2pPeG4xVFBWQnNHa2N5dnhwOGtYNUlYS2ZNdExZWnpUbQpqdmpjV1dlNHFZTlZiUTF2VVpxT05aSUVrYjZNbGF6NW0xaVZMa3FucktubkJaaHRNNGtyMzhTUEpPY0dZazlHClVlaDBFZmhOZ3Y2Z1VtTTFBSUJTR1NVcjc1OHUvOFdrMzNCL3NwOENnWUJoQUsxNHpjWWpCLzdVem5pODVxcmsKUW5xV3lSc25KSTVJZ0huZFhPTzUxVEpGWDNUNCtPMDRNNXNyekFncnA0V0liczZ1YWF6K01lc0tOaXBtUWtZMAp2ZklQNm4xZlcrTGlCVW1FT1h6UVZsSlc1YzZhaUVhRThVc25KZlFySm51VkpYOUlqaHVhOTZ0b2xhVzNVSzRqCkRDZlZYVFVBQ3hZdTQ5bFhDK1RNMXdLQmdRRE43cGJ6R0RZbHRwUWpFZEVaR1N4UGtId2R1R2tQMHFVdzhFNDgKQVpiZWFQUHlGOEhBSkFvMmZwTVlnSktrVjdOQmZ3L2ZRakN2Z2dlUmFRYnQ4QlZYYkVCT3I4cWhQc1BvUXNMSQpvaUhvSDVNbU82K3NKaWt0ZFRIS3FOY202VjJaTytZZHFpUEtUUWRvRnFiMFdsbTlPQk1KMmJtanNrSHlPQjFECjZXNGVXUUtCZ1FERWY4bzdNZUxLditZdXliTW85MnZXaDhiODBsVDhyVGxYa1hDakU3TkRtU1FUbWZEOVFRNFIKeWJ4SHlmR2dwZFIzN1EraWwxWGxoSllmZERFOTNEZW5ZeXdQaUNyWnJNaVVWcWRRQW1JMGc2WjRCSi91RDNZNwpPc3JSUUhvL0VBSnc5aUdHeXVzUmpyNEpPMUFrWDZwbGo5VTU4ZWtIRStSMGh0RW5RUXRzaXc9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo= diff --git a/terraform/stacks/k8s/main.tf b/terraform/stacks/k8s/main.tf new file mode 100644 index 0000000..d6a0e76 --- /dev/null +++ b/terraform/stacks/k8s/main.tf @@ -0,0 +1,122 @@ +module "metallb_helm" { + source = "../../modules/k8s/metallb/helm" + + providers = { + helm = helm + } +} + +module "metallb_resources" { + source = "../../modules/k8s/metallb/resources" + + providers = { + kubernetes = kubernetes + } + + addresses = ["192.168.0.230-192.168.0.250"] + + depends_on = [module.metallb_helm] +} + +module "nginx_ingress" { + source = "../../modules/k8s/nginx_ingress/helm" + + pool_name = module.metallb_resources.pool_name + + depends_on = [module.metallb_resources] +} + +# # ceph +# module "ceph_csi_rbd" { +# source = "../../modules/k8s/ceph/k8s-ceph-csi-rbd" + +# providers = { +# helm = helm +# kubernetes = kubernetes +# } + +# namespace = var.ceph_csi_namespace +# chart_version = var.ceph_csi_chart_version + +# ceph_cluster_id = var.ceph_cluster_id +# ceph_monitors = var.ceph_monitors +# } + +# module "ceph_rbd_storage" { +# source = "../../modules/k8s/ceph/k8s-ceph-rbd-storage" + +# providers = { +# kubernetes = kubernetes +# } + +# namespace = var.ceph_csi_namespace +# ceph_cluster_id = var.ceph_cluster_id +# ceph_rbd_pool = var.ceph_rbd_pool + +# ceph_user_id = var.ceph_user_id +# ceph_user_key = var.ceph_user_key + +# # чтобы Secret/SC создавались после установки CSI +# depends_on = [module.ceph_csi_rbd] +# } + +module "openebs" { + source = "../../modules/k8s/openebs" + storageclass_name = "openebs-hostpath-custom" + base_path = "/var/openebs/local/" +} + +module "crunchy_operator" { + source = "../../modules/k8s/crunchy-data/operator" + + providers = { + kubernetes = kubernetes + helm = helm + } + + namespace = var.crunchy_data_namespace + chart_version = var.pgo_chart_version + release_name = "pgo" + single_namespace = true + replicas = 1 + debug = false +} + +module "crunchy_postgres_cluster" { + source = "../../modules/k8s/crunchy-data/postgres-cluster" + + providers = { + kubernetes = kubernetes + } + + namespace = module.crunchy_operator.namespace + name = var.cluster_name + storage_class_name = var.storage_class_name + + postgres_version = 16 + instance_storage = "20Gi" + backup_storage = "20Gi" + + # важно: CRD должны появиться после установки оператора + depends_on = [module.crunchy_operator] +} + +# valkey +module "valkey" { + source = "../../modules/k8s/valkey" + + providers = { + kubernetes = kubernetes + helm = helm + } + + namespace = var.valkey_namespace + create_namespace = true + + release_name = var.release_name + chart_version = var.chart_version + + values = var.values + + valkey_password = "password" +} diff --git a/terraform/stacks/k8s/providers.tf b/terraform/stacks/k8s/providers.tf new file mode 100644 index 0000000..ed35b35 --- /dev/null +++ b/terraform/stacks/k8s/providers.tf @@ -0,0 +1,9 @@ +provider "kubernetes" { + config_path = var.kubeconfig_path +} + +provider "helm" { + kubernetes = { + config_path = var.kubeconfig_path + } +} diff --git a/terraform/stacks/k8s/variables.tf b/terraform/stacks/k8s/variables.tf new file mode 100644 index 0000000..29442a7 --- /dev/null +++ b/terraform/stacks/k8s/variables.tf @@ -0,0 +1,84 @@ +variable "kubeconfig_path" { + type = string + description = "Path to kubeconfig" +} + +# ceph +variable "ceph_cluster_id" { + type = string + description = "Ceph FSID (ceph fsid)" +} + +variable "ceph_monitors" { + type = list(string) + description = "Ceph MON endpoints, e.g. [\"192.168.0.100:6789\", \"192.168.0.101:6789\"]" +} + +variable "ceph_rbd_pool" { + type = string + default = "k8s-rbd" +} + +variable "ceph_user_id" { + type = string + default = "k8s-rbd-csi" # без 'client.' +} + +variable "ceph_user_key" { + type = string + sensitive = true + description = "Key from: ceph auth get client.k8s-rbd-csi" +} + +variable "ceph_csi_namespace" { + type = string + default = "ceph-csi" +} + +variable "ceph_csi_chart_version" { + type = string + default = "3.11.0" +} + +# crunchy-data +variable "storage_class_name" { + type = string + description = "Твой Ceph RBD storageclass" + default = "ceph-rbd" +} + +variable "crunchy_data_namespace" { + type = string + default = "postgres-operator" +} + +variable "pgo_chart_version" { + type = string + default = "6.0.0" +} + +variable "cluster_name" { + type = string + default = "hippo" +} + +# valkey +variable "valkey_namespace" { + type = string + default = "valkey" +} + +variable "release_name" { + type = string + default = "valkey" +} + +variable "chart_version" { + type = string + default = "0.9.2" +} + +variable "values" { + type = list(string) + default = [] +} diff --git a/terraform/stacks/k8s/versions.tf b/terraform/stacks/k8s/versions.tf new file mode 100644 index 0000000..27dd139 --- /dev/null +++ b/terraform/stacks/k8s/versions.tf @@ -0,0 +1,14 @@ +terraform { + required_version = ">= 1.5.0" + + required_providers { + helm = { + source = "hashicorp/helm" + version = ">= 3.0.0" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.0.0" + } + } +} diff --git a/terraform/stacks/powerdns/main.tf b/terraform/stacks/powerdns/main.tf new file mode 100644 index 0000000..c74e43d --- /dev/null +++ b/terraform/stacks/powerdns/main.tf @@ -0,0 +1,41 @@ +# --------------------------- +# Zones (many) +# --------------------------- +module "zones" { + for_each = var.zones + source = "../../modules/powerdns/zone" + + zone_name = each.key + zone_kind = each.value.zone_kind + soa_edit_api = each.value.soa_edit_api + zone_nameservers = each.value.zone_nameservers +} + +# --------------------------- +# Records (flatten -> many) +# --------------------------- +locals { + records_flat = merge([ + for zone_name, z in var.zones : { + for rec_key, rec in z.records : + "${zone_name}::${rec_key}" => { + zone_name = zone_name + name = rec.name + type = rec.type + ttl = rec.ttl + records = rec.records + } + } + ]...) +} + +module "records" { + for_each = local.records_flat + source = "../../modules/powerdns/record" + + zone_name = module.zones[each.value.zone_name].name + name = each.value.name + type = each.value.type + ttl = each.value.ttl + records = each.value.records +} diff --git a/terraform/stacks/powerdns/providers.tf b/terraform/stacks/powerdns/providers.tf new file mode 100644 index 0000000..633d509 --- /dev/null +++ b/terraform/stacks/powerdns/providers.tf @@ -0,0 +1,4 @@ +provider "powerdns" { + server_url = var.pdns_server_url + api_key = var.pdns_api_key +} diff --git a/terraform/stacks/powerdns/variables.tf b/terraform/stacks/powerdns/variables.tf new file mode 100644 index 0000000..0c98ab1 --- /dev/null +++ b/terraform/stacks/powerdns/variables.tf @@ -0,0 +1,23 @@ +variable "pdns_server_url" { + type = string +} + +variable "pdns_api_key" { + type = string + sensitive = true +} + +variable "zones" { + type = map(object({ + zone_kind = string + soa_edit_api = string + zone_nameservers = list(string) + + records = map(object({ + name = string + type = string + ttl = number + records = list(string) + })) + })) +} diff --git a/terraform/stacks/powerdns/versions.tf b/terraform/stacks/powerdns/versions.tf new file mode 100644 index 0000000..01eccbd --- /dev/null +++ b/terraform/stacks/powerdns/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.3.0" + + required_providers { + powerdns = { + source = "pan-net/powerdns" + # version = "1.5.0" # опционально: можно зафиксировать + } + } +} diff --git a/terraform/stacks/proxmox/lxc/main.tf b/terraform/stacks/proxmox/lxc/main.tf new file mode 100644 index 0000000..d6e5ee2 --- /dev/null +++ b/terraform/stacks/proxmox/lxc/main.tf @@ -0,0 +1,37 @@ +module "lxc_packer_main" { + source = "../../../modules/proxmox/lxc" + + vm_id = var.lxc_packer_main_vm_id + hostname = var.lxc_packer_main_hostname + target_node = var.target_node + + template_file_id = var.lxc_template_file_id + os_type = var.lxc_os_type + + unprivileged = var.lxc_unprivileged + nesting = var.lxc_nesting + + cores = var.lxc_cores + cpu_units = var.lxc_cpu_units + memory = var.lxc_memory + swap = var.lxc_swap + + rootfs_storage = var.lxc_rootfs_storage + rootfs_size_gib = var.lxc_rootfs_size_gib + + bridge = var.bridge + netif_name = var.lxc_netif_name + firewall = var.lxc_firewall + + ipv4_address = var.lxc_ipv4_address + ipv4_gateway = var.lxc_ipv4_gateway + + dns_domain = var.lxc_dns_domain + dns_servers = var.lxc_dns_servers + + started = var.lxc_started + start_on_boot = var.lxc_start_on_boot + + password = var.lxc_root_password + ssh_public_keys = var.lxc_ssh_public_keys +} diff --git a/terraform/stacks/proxmox/lxc/providers.tf b/terraform/stacks/proxmox/lxc/providers.tf new file mode 100755 index 0000000..a41ad5f --- /dev/null +++ b/terraform/stacks/proxmox/lxc/providers.tf @@ -0,0 +1,10 @@ +provider "proxmox" { + endpoint = var.pm_api_url + api_token = var.pm_api_token + insecure = true + + ssh { + username = var.pm_user + password = var.pm_password + } +} diff --git a/terraform/stacks/proxmox/lxc/variables.tf b/terraform/stacks/proxmox/lxc/variables.tf new file mode 100644 index 0000000..40981a3 --- /dev/null +++ b/terraform/stacks/proxmox/lxc/variables.tf @@ -0,0 +1,137 @@ +# --- Proxmox provider creds --- +variable "pm_api_url" { + type = string +} + +variable "pm_api_token" { + type = string + sensitive = true +} + +variable "pm_user" { + type = string +} + +variable "pm_password" { + type = string + sensitive = true +} + +# --- Target infra --- +variable "target_node" { + type = string +} + +variable "bridge" { + type = string + default = "vmbr0" +} + +# --- LXC конкретный контейнер --- +variable "lxc_packer_main_vm_id" { + type = number +} + +variable "lxc_packer_main_hostname" { + type = string +} + +variable "lxc_template_file_id" { + type = string +} + +variable "lxc_os_type" { + type = string + default = "debian" +} + +variable "lxc_unprivileged" { + type = bool + default = true +} + +variable "lxc_nesting" { + type = bool + default = true +} + +variable "lxc_cores" { + type = number + default = 1 +} + +variable "lxc_cpu_units" { + type = number + default = 1024 +} + +variable "lxc_memory" { + type = number + default = 512 +} + +variable "lxc_swap" { + type = number + default = 512 +} + +variable "lxc_rootfs_storage" { + type = string + default = "local-lvm" +} + +variable "lxc_rootfs_size_gib" { + type = number + default = 8 +} + +variable "lxc_netif_name" { + type = string + default = "eth0" +} + +variable "lxc_firewall" { + type = bool + default = true +} + +variable "lxc_ipv4_address" { + type = string + default = "dhcp" +} + +variable "lxc_ipv4_gateway" { + type = string + default = null +} + +variable "lxc_dns_domain" { + type = string + default = null +} + +variable "lxc_dns_servers" { + type = list(string) + default = [] +} + +variable "lxc_started" { + type = bool + default = true +} + +variable "lxc_start_on_boot" { + type = bool + default = true +} + +variable "lxc_root_password" { + type = string + sensitive = true + default = null +} + +variable "lxc_ssh_public_keys" { + type = list(string) + default = [] +} diff --git a/terraform/stacks/proxmox/lxc/versions.tf b/terraform/stacks/proxmox/lxc/versions.tf new file mode 100644 index 0000000..bb496bf --- /dev/null +++ b/terraform/stacks/proxmox/lxc/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.6" + + required_providers { + proxmox = { + source = "bpg/proxmox" + version = "0.86.0" + } + } +} diff --git a/terraform/stacks/proxmox/vm/cloud-init/user-data.yaml.tpl b/terraform/stacks/proxmox/vm/cloud-init/user-data.yaml.tpl new file mode 100644 index 0000000..2c55300 --- /dev/null +++ b/terraform/stacks/proxmox/vm/cloud-init/user-data.yaml.tpl @@ -0,0 +1,36 @@ +#cloud-config +hostname: ${hostname} +manage_etc_hosts: true + +package_update: true +package_upgrade: true + +packages: + - parted + +# user +users: + - name: "adminuser" + groups: sudo + sudo: ALL=(ALL) NOPASSWD:ALL + lock_passwd: false + passwd: "$6$qL4GPP3AhSodbF9U$Lu4.VSpCSlAVPNIZyPNme0AH8HhbVYE6SAm3P3Er7KSLIYydj799tZBz/n6NRzzRYhyQh9a4h8m8WCbjw2nXg1" + shell: /bin/bash + ssh_authorized_keys: + - "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBcTy4Zcj3MHkW7XvnZhakl64vZXnjzDJymYlo+Ax8FM dev-kyiv01-vm-default-main-01-adminuser" + +ssh_pwauth: false + +runcmd: + - | + set -euxo pipefail + + # растянуть extended + LVM partition до конца диска + growpart /dev/sda 2 || true + growpart /dev/sda 5 || true + parted -s /dev/sda "resizepart 2 100%" "resizepart 5 100%" || true + partprobe /dev/sda || true + + # растянуть PV -> LV(root) -> FS + pvresize /dev/sda5 + lvextend -l +100%FREE -r /dev/vg0/root diff --git a/terraform/stacks/proxmox/vm/locals.tf b/terraform/stacks/proxmox/vm/locals.tf new file mode 100644 index 0000000..e034b26 --- /dev/null +++ b/terraform/stacks/proxmox/vm/locals.tf @@ -0,0 +1,72 @@ +locals { + vms = { + dev_kyiv01_vm_dns_main_01 = { + name = "dev-kyiv01-vm-dns-main-01" + cpu = 2 + cpu_type = "x86-64-v2" + memory = 3072 + disk_size = 20 + mac = "02:7A:4C:11:90:64" + } + + dev_kyiv01_vm_ntp_main_01 = { + name = "dev-kyiv01-vm-ntp-main-01" + cpu = 1 + cpu_type = "x86-64-v2" + memory = 2048 + disk_size = 8 + mac = "02:7A:4C:11:90:65" + } + + dev_kyiv01_vm_ceph_main_01 = { + name = "dev-kyiv01-vm-ceph-main-01" + cpu = 2 + cpu_type = "x86-64-v2" + memory = 4096 + disk_size = 30 + mac = "02:7A:4C:11:90:66" + osd_storage = "ceph-osd" + osd_disks = [150, 150] + } + + dev_kyiv01_vm_ceph_main_02 = { + name = "dev-kyiv01-vm-ceph-main-02" + cpu = 2 + cpu_type = "x86-64-v2" + memory = 4096 + disk_size = 30 + mac = "02:7A:4C:11:90:67" + osd_storage = "ceph-osd" + osd_disks = [150, 150] + } + + dev_kyiv01_vm_ceph_main_03 = { + name = "dev-kyiv01-vm-ceph-main-03" + cpu = 2 + cpu_type = "x86-64-v2" + memory = 4096 + disk_size = 30 + mac = "02:7A:4C:11:90:68" + osd_storage = "ceph-osd" + osd_disks = [150, 150] + } + + dev_kyiv01_vm_k8s_master_01 = { + name = "dev-kyiv01-vm-k8s-master-01" + cpu = 2 + cpu_type = "x86-64-v2" + memory = 4096 + disk_size = 40 + mac = "02:7A:4C:11:90:69" + } + + dev_kyiv01_vm_k8s_worker_01 = { + name = "dev-kyiv01-vm-k8s-worker-01" + cpu = 4 + cpu_type = "x86-64-v2" + memory = 8192 + disk_size = 60 + mac = "02:7A:4C:11:90:6A" + } + } +} diff --git a/terraform/stacks/proxmox/vm/main.tf b/terraform/stacks/proxmox/vm/main.tf new file mode 100644 index 0000000..501750f --- /dev/null +++ b/terraform/stacks/proxmox/vm/main.tf @@ -0,0 +1,41 @@ +# 1) Для каждой VM создаём snippet user-data (cloud-init) +resource "proxmox_virtual_environment_file" "user_data" { + for_each = local.vms + + content_type = "snippets" + datastore_id = var.snippets_storage + node_name = var.target_node + + source_raw { + data = templatefile("${path.module}/cloud-init/user-data.yaml.tpl", { + hostname = each.value.name + }) + + file_name = "user-data-${each.value.name}.yaml" + } +} + +# 2) Создаём VM-ки и подцепляем user-data файл +module "vm" { + source = "../../../modules/proxmox/vm" + for_each = local.vms + + name = each.value.name + target_node = var.target_node + template_id = var.template_id + + cpu = each.value.cpu + cpu_type = try(each.value.cpu_type, "qemu64") + memory = each.value.memory + + disk_size = each.value.disk_size + storage = var.storage + bridge = var.bridge + + osd_storage = try(each.value.osd_storage, null) + osd_disks = try(each.value.osd_disks, []) + + user_data_file_id = proxmox_virtual_environment_file.user_data[each.key].id + + mac_address = each.value.mac +} diff --git a/terraform/stacks/proxmox/vm/providers.tf b/terraform/stacks/proxmox/vm/providers.tf new file mode 100644 index 0000000..ca453dd --- /dev/null +++ b/terraform/stacks/proxmox/vm/providers.tf @@ -0,0 +1,17 @@ +provider "proxmox" { + endpoint = var.pm_api_url + api_token = var.pm_api_token + insecure = true + + ssh { + agent = false + username = "root" + private_key = file("/workspaces/infrastructure/.ssh/dev-kyiv01-proxmox-main-01") + + node { + name = "proxmox-main-kyiv-01" + address = "176.36.225.227" + port = 25105 + } + } +} diff --git a/terraform/stacks/proxmox/vm/variables.tf b/terraform/stacks/proxmox/vm/variables.tf new file mode 100644 index 0000000..72bca51 --- /dev/null +++ b/terraform/stacks/proxmox/vm/variables.tf @@ -0,0 +1,50 @@ +variable "pm_api_url" { + type = string + description = "Proxmox API endpoint, e.g. https://proxmox:8006/api2/json" +} + +variable "pm_api_token" { + type = string + description = "Proxmox API token: root@pam!terraform=..." + sensitive = true +} + +variable "pm_user" { + type = string + description = "SSH username for Proxmox node" + default = "root" +} + +variable "pm_password" { + type = string + description = "SSH password for Proxmox node" + sensitive = true +} + +variable "target_node" { + type = string + description = "Target Proxmox node name" +} + +variable "template_id" { + type = number + description = "Template VM ID to clone from" +} + +variable "storage" { + type = string + description = "Default datastore for OS disk" + default = "local-lvm" +} + +variable "bridge" { + type = string + description = "Default VM bridge" + default = "vmbr0" +} + +variable "snippets_storage" { + type = string + description = "Datastore where 'snippets' content is enabled (usually 'local')" + default = "local" +} diff --git a/terraform/stacks/proxmox/vm/versions.tf b/terraform/stacks/proxmox/vm/versions.tf new file mode 100644 index 0000000..bb496bf --- /dev/null +++ b/terraform/stacks/proxmox/vm/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.6" + + required_providers { + proxmox = { + source = "bpg/proxmox" + version = "0.86.0" + } + } +}