This commit is contained in:
Hrankin, Aleksandr (contracted)
2026-02-19 11:34:13 +00:00
commit f243f440c3
191 changed files with 6183 additions and 0 deletions

View File

@@ -0,0 +1,19 @@
resource "helm_release" "ceph_csi_rbd" {
name = "ceph-csi-rbd"
namespace = kubernetes_namespace_v1.this.metadata[0].name
repository = "https://ceph.github.io/csi-charts"
chart = "ceph-csi-rbd"
version = var.chart_version
create_namespace = false
values = [yamlencode({
csiConfig = [{
clusterID = var.ceph_cluster_id
monitors = var.ceph_monitors
}]
provisioner = {
replicaCount = 1
}
})]
}

View File

@@ -0,0 +1,3 @@
resource "kubernetes_namespace_v1" "this" {
metadata { name = var.namespace }
}

View File

@@ -0,0 +1,15 @@
variable "namespace" {
type = string
}
variable "chart_version" {
type = string
}
variable "ceph_cluster_id" {
type = string
}
variable "ceph_monitors" {
type = list(string)
}

View File

@@ -0,0 +1,6 @@
terraform {
required_providers {
helm = { source = "hashicorp/helm" }
kubernetes = { source = "hashicorp/kubernetes" }
}
}

View File

@@ -0,0 +1,13 @@
resource "kubernetes_secret_v1" "csi_rbd_secret" {
metadata {
name = "csi-rbd-secret"
namespace = var.namespace
}
data = {
userID = var.ceph_user_id
userKey = var.ceph_user_key
}
type = "Opaque"
}

View File

@@ -0,0 +1,27 @@
resource "kubernetes_storage_class_v1" "ceph_rbd" {
metadata {
name = "ceph-rbd"
# если хочешь сделать default:
# annotations = {
# "storageclass.kubernetes.io/is-default-class" = "true"
# }
}
storage_provisioner = "rbd.csi.ceph.com"
reclaim_policy = "Delete"
volume_binding_mode = "Immediate"
allow_volume_expansion = true
parameters = {
clusterID = var.ceph_cluster_id
pool = var.ceph_rbd_pool
# ВАЖНО: это строки-ключи, строго без пробелов и без "/"
"csi.storage.k8s.io/provisioner-secret-name" = kubernetes_secret_v1.csi_rbd_secret.metadata[0].name
"csi.storage.k8s.io/provisioner-secret-namespace" = kubernetes_secret_v1.csi_rbd_secret.metadata[0].namespace
"csi.storage.k8s.io/node-stage-secret-name" = kubernetes_secret_v1.csi_rbd_secret.metadata[0].name
"csi.storage.k8s.io/node-stage-secret-namespace" = kubernetes_secret_v1.csi_rbd_secret.metadata[0].namespace
imageFeatures = "layering"
}
}

View File

@@ -0,0 +1,20 @@
variable "namespace" {
type = string
}
variable "ceph_cluster_id" {
type = string
}
variable "ceph_rbd_pool" {
type = string
}
variable "ceph_user_id" {
type = string
}
variable "ceph_user_key" {
type = string
sensitive = true
}

View File

@@ -0,0 +1,5 @@
terraform {
required_providers {
kubernetes = { source = "hashicorp/kubernetes" }
}
}

View File

@@ -0,0 +1,26 @@
resource "helm_release" "pgo" {
name = var.release_name
namespace = kubernetes_namespace_v1.this.metadata[0].name
# Crunchy публикует Helm chart в OCI registry
# helm install pgo oci://registry.developers.crunchydata.com/crunchydata/pgo :contentReference[oaicite:2]{index=2}
repository = "oci://registry.developers.crunchydata.com/crunchydata"
chart = "pgo"
version = var.chart_version
create_namespace = false
values = [
yamlencode({
# безопасные дефолты, без лишней магии
debug = var.debug
replicas = var.replicas
# Если хочешь ограничить оператор только этим namespace:
# singleNamespace = true
singleNamespace = var.single_namespace
installCRDs = true
})
]
}

View File

@@ -0,0 +1,5 @@
resource "kubernetes_namespace_v1" "this" {
metadata {
name = var.namespace
}
}

View File

@@ -0,0 +1,7 @@
output "namespace" {
value = kubernetes_namespace_v1.this.metadata[0].name
}
output "release_name" {
value = helm_release.pgo.name
}

View File

@@ -0,0 +1,33 @@
variable "namespace" {
type = string
description = "Namespace, куда ставим Crunchy operator"
default = "postgres-operator"
}
variable "release_name" {
type = string
description = "Helm release name"
default = "pgo"
}
variable "chart_version" {
type = string
description = "Версия чарта pgo (пинить обязательно для воспроизводимости)"
default = "6.0.0"
}
variable "debug" {
type = bool
default = false
}
variable "replicas" {
type = number
default = 1
}
variable "single_namespace" {
type = bool
description = "Если true — оператор управляет кластерами только в этом namespace"
default = true
}

View File

@@ -0,0 +1,6 @@
terraform {
required_providers {
helm = { source = "hashicorp/helm" }
kubernetes = { source = "hashicorp/kubernetes" }
}
}

View File

@@ -0,0 +1,60 @@
resource "kubernetes_manifest" "postgres_cluster" {
manifest = {
apiVersion = "postgres-operator.crunchydata.com/v1beta1"
kind = "PostgresCluster"
metadata = {
name = var.name
namespace = var.namespace
}
spec = {
postgresVersion = var.postgres_version
instances = [
{
name = "instance1"
dataVolumeClaimSpec = {
storageClassName = var.storage_class_name
accessModes = ["ReadWriteOnce"]
resources = {
requests = {
storage = var.instance_storage
}
}
}
}
]
backups = {
pgbackrest = {
repos = [
{
name = "repo1"
volume = {
volumeClaimSpec = {
storageClassName = var.storage_class_name
accessModes = ["ReadWriteOnce"]
resources = {
requests = {
storage = var.backup_storage
}
}
}
}
}
]
}
}
users = [
{
name = var.gitlab_db_user
databases = [
var.gitlab_db_name
]
}
]
}
}
}

View File

@@ -0,0 +1,7 @@
output "cluster_name" {
value = kubernetes_manifest.postgres_cluster.manifest["metadata"]["name"]
}
output "namespace" {
value = kubernetes_manifest.postgres_cluster.manifest["metadata"]["namespace"]
}

View File

@@ -0,0 +1,46 @@
variable "namespace" {
type = string
description = "Namespace, где будет PostgresCluster"
default = "postgres-operator"
}
variable "name" {
type = string
description = "Имя PostgresCluster"
default = "hippo"
}
variable "storage_class_name" {
type = string
description = "StorageClass для PVC (твой Ceph CSI RBD), например: ceph-rbd"
default = "ceph-rbd"
}
variable "postgres_version" {
type = number
description = "Major версия PostgreSQL (ставь ту, которую поддерживает твой CPK)"
default = 16
}
variable "instance_storage" {
type = string
description = "Размер диска под data"
default = "10Gi"
}
variable "backup_storage" {
type = string
description = "Размер диска под pgBackRest repo"
default = "10Gi"
}
variable "gitlab_db_user" {
type = string
default = "gitlab"
}
variable "gitlab_db_name" {
type = string
default = "gitlabhq_production"
}

View File

@@ -0,0 +1,7 @@
terraform {
required_providers {
kubernetes = {
source = "hashicorp/kubernetes"
}
}
}

View File

@@ -0,0 +1,7 @@
resource "helm_release" "metallb" {
name = "metallb"
repository = "https://metallb.github.io/metallb"
chart = "metallb"
namespace = "metallb-system"
create_namespace = true
}

View File

@@ -0,0 +1,7 @@
terraform {
required_providers {
helm = {
source = "hashicorp/helm"
}
}
}

View File

@@ -0,0 +1,31 @@
# Это пул IP-адресов, из которого MetalLB будет брать “external IP” и назначать их сервисам типа LoadBalancer
resource "kubernetes_manifest" "metallb_ip_pool" {
manifest = {
apiVersion = "metallb.io/v1beta1"
kind = "IPAddressPool"
metadata = {
name = var.pool_name
namespace = var.namespace
}
spec = {
addresses = var.addresses
}
}
}
# Это говорит MetalLB: рекламируй (announce) адреса из этого пула в L2 режиме.
resource "kubernetes_manifest" "metallb_l2" {
manifest = {
apiVersion = "metallb.io/v1beta1"
kind = "L2Advertisement"
metadata = {
name = var.l2_name
namespace = var.namespace
}
spec = {
ipAddressPools = [var.pool_name]
}
}
depends_on = [kubernetes_manifest.metallb_ip_pool]
}

View File

@@ -0,0 +1,3 @@
output "pool_name" {
value = var.pool_name
}

View File

@@ -0,0 +1,22 @@
variable "addresses" {
type = list(string)
description = "MetalLB address pool ranges"
}
variable "namespace" {
type = string
description = "Namespace where MetalLB is installed"
default = "metallb-system"
}
variable "pool_name" {
type = string
description = "IPAddressPool name"
default = "default-pool"
}
variable "l2_name" {
type = string
description = "L2Advertisement name"
default = "default-l2"
}

View File

@@ -0,0 +1,7 @@
terraform {
required_providers {
kubernetes = {
source = "hashicorp/kubernetes"
}
}
}

View File

@@ -0,0 +1,18 @@
locals {
ingress_nginx_values = merge(
{
controller = {
service = {
type = "LoadBalancer"
}
}
},
var.ingress_lb_ip == null ? {} : {
controller = {
service = {
loadBalancerIP = var.ingress_lb_ip
}
}
}
)
}

View File

@@ -0,0 +1,9 @@
resource "helm_release" "ingress_nginx" {
name = "ingress-nginx"
repository = "https://kubernetes.github.io/ingress-nginx"
chart = "ingress-nginx"
namespace = "ingress-nginx"
create_namespace = true
values = [yamlencode(local.ingress_nginx_values)]
}

View File

@@ -0,0 +1,10 @@
variable "ingress_lb_ip" {
type = string
description = "Static LB IP for ingress-nginx controller Service (must be from MetalLB pool). Leave null for dynamic."
default = null
}
variable "pool_name" {
type = string
description = "MetalLB address pool name for ingress-nginx Service annotation"
}

View File

@@ -0,0 +1,8 @@
resource "helm_release" "openebs" {
name = var.release_name
repository = "https://openebs.github.io/openebs"
chart = "openebs"
version = var.chart_version
namespace = var.namespace
create_namespace = true
}

View File

@@ -0,0 +1,25 @@
resource "kubernetes_storage_class_v1" "openebs_hostpath" {
metadata {
name = var.storageclass_name
annotations = {
"storageclass.kubernetes.io/is-default-class" = "true"
"openebs.io/cas-type" = "local"
}
}
storage_provisioner = "openebs.io/local"
reclaim_policy = "Delete"
volume_binding_mode = "WaitForFirstConsumer"
allow_volume_expansion = false
parameters = {
"cas.openebs.io/config" = <<-EOT
- name: StorageType
value: "hostpath"
- name: BasePath
value: "${var.base_path}"
EOT
}
depends_on = [helm_release.openebs]
}

View File

@@ -0,0 +1,26 @@
variable "namespace" {
type = string
default = "openebs"
}
variable "release_name" {
type = string
default = "openebs"
}
variable "chart_version" {
type = string
default = null
description = "Версия helm chart openebs (null = последняя доступная)."
}
variable "storageclass_name" {
type = string
default = "openebs-local-hostpath"
}
variable "base_path" {
type = string
default = "/var/openebs/local/"
description = "Путь на нодах для hostpath LocalPV (можно кастомизировать)."
}

View File

@@ -0,0 +1,6 @@
terraform {
required_providers {
helm = { source = "hashicorp/helm" }
kubernetes = { source = "hashicorp/kubernetes" }
}
}

View File

@@ -0,0 +1,39 @@
resource "helm_release" "valkey" {
name = var.release_name
namespace = var.namespace
repository = var.repository
chart = "valkey"
version = var.chart_version
create_namespace = false
values = [
yamlencode({
auth = {
enabled = true
usersExistingSecret = kubernetes_secret_v1.valkey_users.metadata[0].name
# ВАЖНО: 'default' обязан быть определён тут (или в aclConfig),
# иначе чарт ругнётся / будет небезопасная конфигурация
aclUsers = {
default = {
permissions = "~* &* +@all"
# password НЕ нужен, потому что берётся из usersExistingSecret
}
}
}
# (опционально) персистентность на Ceph RBD:
# dataStorage = {
# enabled = true
# requestedSize = "5Gi"
# className = "ceph-rbd"
# }
})
]
depends_on = [
kubernetes_namespace_v1.this,
kubernetes_secret_v1.valkey_users,
]
}

View File

@@ -0,0 +1,7 @@
resource "kubernetes_namespace_v1" "this" {
count = var.create_namespace ? 1 : 0
metadata {
name = var.namespace
}
}

View File

@@ -0,0 +1,13 @@
resource "kubernetes_secret_v1" "valkey_users" {
metadata {
name = "valkey-users"
namespace = var.namespace # "valkey"
}
type = "Opaque"
data = {
# ВАЖНО: ключ = username, по умолчанию чарт ожидает так
default = base64encode(var.valkey_password)
}
}

View File

@@ -0,0 +1,35 @@
variable "namespace" {
type = string
default = "valkey"
}
variable "create_namespace" {
type = bool
default = true
}
variable "release_name" {
type = string
default = "valkey"
}
variable "chart_version" {
type = string
default = "0.9.2"
}
variable "repository" {
type = string
default = "https://valkey.io/valkey-helm/"
}
# Если хочешь кастомизировать chart values — просто передай сюда yamlencode({...})
variable "values" {
type = list(string)
default = []
}
variable "valkey_password" {
type = string
sensitive = true
}

View File

@@ -0,0 +1,14 @@
terraform {
required_version = ">= 1.5.0"
required_providers {
helm = {
source = "hashicorp/helm"
version = ">= 3.0.0"
}
kubernetes = {
source = "hashicorp/kubernetes"
version = ">= 2.0.0"
}
}
}

View File

@@ -0,0 +1,7 @@
resource "powerdns_record" "this" {
zone = var.zone_name
name = var.name
type = var.type
ttl = var.ttl
records = var.records
}

View File

@@ -0,0 +1,19 @@
variable "zone_name" {
type = string
}
variable "name" {
type = string
}
variable "type" {
type = string
}
variable "ttl" {
type = number
}
variable "records" {
type = list(string)
}

View File

@@ -0,0 +1,10 @@
terraform {
required_version = ">= 1.3.0"
required_providers {
powerdns = {
source = "pan-net/powerdns"
# version = "1.5.0" # опционально: можно зафиксировать
}
}
}

View File

@@ -0,0 +1,6 @@
resource "powerdns_zone" "zone" {
name = var.zone_name
kind = var.zone_kind
soa_edit_api = var.soa_edit_api
nameservers = var.zone_nameservers
}

View File

@@ -0,0 +1,3 @@
output "name" {
value = powerdns_zone.zone.name
}

View File

@@ -0,0 +1,15 @@
variable "zone_name" {
type = string
}
variable "zone_kind" {
type = string
}
variable "soa_edit_api" {
type = string
}
variable "zone_nameservers" {
type = list(string)
}

View File

@@ -0,0 +1,10 @@
terraform {
required_version = ">= 1.3.0"
required_providers {
powerdns = {
source = "pan-net/powerdns"
# version = "1.5.0" # опционально: можно зафиксировать
}
}
}

View File

@@ -0,0 +1,72 @@
resource "proxmox_virtual_environment_container" "this" {
node_name = var.target_node
vm_id = var.vm_id
unprivileged = var.unprivileged
started = var.started
start_on_boot = var.start_on_boot
cpu {
cores = var.cores
units = var.cpu_units
}
memory {
dedicated = var.memory
swap = var.swap
}
disk {
datastore_id = var.rootfs_storage
size = var.rootfs_size_gib
}
features {
nesting = var.nesting
}
network_interface {
name = var.netif_name
bridge = var.bridge
enabled = true
firewall = var.firewall
}
operating_system {
template_file_id = var.template_file_id
type = var.os_type
}
initialization {
hostname = var.hostname
ip_config {
ipv4 {
address = var.ipv4_address
gateway = var.ipv4_gateway
}
}
# user_account делаем опциональным:
# - если задан пароль (не null) или есть ssh keys
dynamic "user_account" {
for_each = (var.password != null || length(var.ssh_public_keys) > 0) ? [1] : []
content {
# provider норм принимает keys list(string)
keys = var.ssh_public_keys
# password задаём только если не null
password = var.password
}
}
# DNS опционально
dynamic "dns" {
for_each = (var.dns_domain != null || length(var.dns_servers) > 0) ? [1] : []
content {
domain = var.dns_domain
servers = var.dns_servers
}
}
}
}

View File

@@ -0,0 +1,126 @@
variable "vm_id" {
type = number
}
variable "hostname" {
type = string
}
variable "target_node" {
type = string
}
variable "template_file_id" {
type = string
}
variable "os_type" {
type = string
default = "debian"
}
variable "unprivileged" {
type = bool
default = true
}
variable "nesting" {
type = bool
default = true
}
variable "cores" {
type = number
default = 1
}
# Proxmox cpuunits
variable "cpu_units" {
type = number
default = 1024
}
variable "memory" {
type = number
default = 512
}
variable "swap" {
type = number
default = 512
}
variable "rootfs_storage" {
type = string
default = "local-lvm"
}
variable "rootfs_size_gib" {
type = number
default = 8
}
variable "bridge" {
type = string
default = "vmbr0"
}
variable "netif_name" {
type = string
default = "eth0"
}
variable "firewall" {
type = bool
default = true
}
# DHCP: "dhcp"
# Static: "192.168.1.50/24"
variable "ipv4_address" {
type = string
default = "dhcp"
validation {
condition = var.ipv4_address == "dhcp" || can(cidrnetmask(var.ipv4_address))
error_message = "ipv4_address must be \"dhcp\" or a valid CIDR like 192.168.1.50/24."
}
}
# gateway допустим только если не dhcp
variable "ipv4_gateway" {
type = string
default = null
}
# Пароль опциональный (можешь управлять через ssh keys)
variable "password" {
type = string
default = null
sensitive = true
}
variable "ssh_public_keys" {
type = list(string)
default = []
}
variable "dns_domain" {
type = string
default = null
}
variable "dns_servers" {
type = list(string)
default = []
}
variable "started" {
type = bool
default = false
}
variable "start_on_boot" {
type = bool
default = false
}

View File

@@ -0,0 +1,10 @@
terraform {
required_version = ">= 1.6"
required_providers {
proxmox = {
source = "bpg/proxmox"
version = "0.86.0"
}
}
}

View File

@@ -0,0 +1,63 @@
resource "proxmox_virtual_environment_vm" "this" {
name = var.name
node_name = var.target_node
clone {
vm_id = var.template_id
full = true
}
cpu {
cores = var.cpu
type = var.cpu_type
}
memory {
dedicated = var.memory
}
# Предсказуемая SCSI нумерация
scsi_hardware = "virtio-scsi-single"
boot_order = ["scsi0"]
# OS disk (scsi0)
disk {
datastore_id = var.storage
size = var.disk_size
interface = "scsi0"
}
# OSD disks (scsi1, scsi2, ...)
dynamic "disk" {
for_each = (var.osd_storage != null && length(var.osd_disks) > 0) ? { for idx, size in var.osd_disks : idx => size } : {}
content {
datastore_id = var.osd_storage
size = disk.value
interface = "scsi${disk.key + 1}"
file_format = "raw"
cache = "none"
iothread = true
discard = "on"
}
}
network_device {
bridge = var.bridge
model = "virtio"
mac_address = var.mac_address
}
agent {
enabled = true
}
initialization {
user_data_file_id = var.user_data_file_id
ip_config {
ipv4 { address = "dhcp" }
}
}
}

View File

@@ -0,0 +1,55 @@
variable "name" {
type = string
}
variable "target_node" {
type = string
}
variable "template_id" {
type = number
}
variable "cpu" {
type = number
}
variable "cpu_type" {
type = string
}
variable "memory" {
type = number
}
variable "disk_size" {
type = number
}
variable "storage" {
type = string
}
variable "bridge" {
type = string
}
variable "osd_storage" {
type = string
default = null
}
variable "osd_disks" {
type = list(number)
default = []
}
variable "user_data_file_id" {
type = string
default = null
}
variable "mac_address" {
description = "Static MAC for VM NIC (for DHCP reservation)."
type = string
}

View File

@@ -0,0 +1,8 @@
terraform {
required_providers {
proxmox = {
source = "bpg/proxmox"
version = ">= 0.86.0"
}
}
}