This commit is contained in:
Hrankin, Aleksandr (contracted)
2026-02-19 11:34:13 +00:00
commit f243f440c3
191 changed files with 6183 additions and 0 deletions

View File

@@ -0,0 +1,19 @@
resource "helm_release" "ceph_csi_rbd" {
name = "ceph-csi-rbd"
namespace = kubernetes_namespace_v1.this.metadata[0].name
repository = "https://ceph.github.io/csi-charts"
chart = "ceph-csi-rbd"
version = var.chart_version
create_namespace = false
values = [yamlencode({
csiConfig = [{
clusterID = var.ceph_cluster_id
monitors = var.ceph_monitors
}]
provisioner = {
replicaCount = 1
}
})]
}

View File

@@ -0,0 +1,3 @@
resource "kubernetes_namespace_v1" "this" {
metadata { name = var.namespace }
}

View File

@@ -0,0 +1,15 @@
variable "namespace" {
type = string
}
variable "chart_version" {
type = string
}
variable "ceph_cluster_id" {
type = string
}
variable "ceph_monitors" {
type = list(string)
}

View File

@@ -0,0 +1,6 @@
terraform {
required_providers {
helm = { source = "hashicorp/helm" }
kubernetes = { source = "hashicorp/kubernetes" }
}
}

View File

@@ -0,0 +1,13 @@
resource "kubernetes_secret_v1" "csi_rbd_secret" {
metadata {
name = "csi-rbd-secret"
namespace = var.namespace
}
data = {
userID = var.ceph_user_id
userKey = var.ceph_user_key
}
type = "Opaque"
}

View File

@@ -0,0 +1,27 @@
resource "kubernetes_storage_class_v1" "ceph_rbd" {
metadata {
name = "ceph-rbd"
# если хочешь сделать default:
# annotations = {
# "storageclass.kubernetes.io/is-default-class" = "true"
# }
}
storage_provisioner = "rbd.csi.ceph.com"
reclaim_policy = "Delete"
volume_binding_mode = "Immediate"
allow_volume_expansion = true
parameters = {
clusterID = var.ceph_cluster_id
pool = var.ceph_rbd_pool
# ВАЖНО: это строки-ключи, строго без пробелов и без "/"
"csi.storage.k8s.io/provisioner-secret-name" = kubernetes_secret_v1.csi_rbd_secret.metadata[0].name
"csi.storage.k8s.io/provisioner-secret-namespace" = kubernetes_secret_v1.csi_rbd_secret.metadata[0].namespace
"csi.storage.k8s.io/node-stage-secret-name" = kubernetes_secret_v1.csi_rbd_secret.metadata[0].name
"csi.storage.k8s.io/node-stage-secret-namespace" = kubernetes_secret_v1.csi_rbd_secret.metadata[0].namespace
imageFeatures = "layering"
}
}

View File

@@ -0,0 +1,20 @@
variable "namespace" {
type = string
}
variable "ceph_cluster_id" {
type = string
}
variable "ceph_rbd_pool" {
type = string
}
variable "ceph_user_id" {
type = string
}
variable "ceph_user_key" {
type = string
sensitive = true
}

View File

@@ -0,0 +1,5 @@
terraform {
required_providers {
kubernetes = { source = "hashicorp/kubernetes" }
}
}

View File

@@ -0,0 +1,26 @@
resource "helm_release" "pgo" {
name = var.release_name
namespace = kubernetes_namespace_v1.this.metadata[0].name
# Crunchy публикует Helm chart в OCI registry
# helm install pgo oci://registry.developers.crunchydata.com/crunchydata/pgo :contentReference[oaicite:2]{index=2}
repository = "oci://registry.developers.crunchydata.com/crunchydata"
chart = "pgo"
version = var.chart_version
create_namespace = false
values = [
yamlencode({
# безопасные дефолты, без лишней магии
debug = var.debug
replicas = var.replicas
# Если хочешь ограничить оператор только этим namespace:
# singleNamespace = true
singleNamespace = var.single_namespace
installCRDs = true
})
]
}

View File

@@ -0,0 +1,5 @@
resource "kubernetes_namespace_v1" "this" {
metadata {
name = var.namespace
}
}

View File

@@ -0,0 +1,7 @@
output "namespace" {
value = kubernetes_namespace_v1.this.metadata[0].name
}
output "release_name" {
value = helm_release.pgo.name
}

View File

@@ -0,0 +1,33 @@
variable "namespace" {
type = string
description = "Namespace, куда ставим Crunchy operator"
default = "postgres-operator"
}
variable "release_name" {
type = string
description = "Helm release name"
default = "pgo"
}
variable "chart_version" {
type = string
description = "Версия чарта pgo (пинить обязательно для воспроизводимости)"
default = "6.0.0"
}
variable "debug" {
type = bool
default = false
}
variable "replicas" {
type = number
default = 1
}
variable "single_namespace" {
type = bool
description = "Если true — оператор управляет кластерами только в этом namespace"
default = true
}

View File

@@ -0,0 +1,6 @@
terraform {
required_providers {
helm = { source = "hashicorp/helm" }
kubernetes = { source = "hashicorp/kubernetes" }
}
}

View File

@@ -0,0 +1,60 @@
resource "kubernetes_manifest" "postgres_cluster" {
manifest = {
apiVersion = "postgres-operator.crunchydata.com/v1beta1"
kind = "PostgresCluster"
metadata = {
name = var.name
namespace = var.namespace
}
spec = {
postgresVersion = var.postgres_version
instances = [
{
name = "instance1"
dataVolumeClaimSpec = {
storageClassName = var.storage_class_name
accessModes = ["ReadWriteOnce"]
resources = {
requests = {
storage = var.instance_storage
}
}
}
}
]
backups = {
pgbackrest = {
repos = [
{
name = "repo1"
volume = {
volumeClaimSpec = {
storageClassName = var.storage_class_name
accessModes = ["ReadWriteOnce"]
resources = {
requests = {
storage = var.backup_storage
}
}
}
}
}
]
}
}
users = [
{
name = var.gitlab_db_user
databases = [
var.gitlab_db_name
]
}
]
}
}
}

View File

@@ -0,0 +1,7 @@
output "cluster_name" {
value = kubernetes_manifest.postgres_cluster.manifest["metadata"]["name"]
}
output "namespace" {
value = kubernetes_manifest.postgres_cluster.manifest["metadata"]["namespace"]
}

View File

@@ -0,0 +1,46 @@
variable "namespace" {
type = string
description = "Namespace, где будет PostgresCluster"
default = "postgres-operator"
}
variable "name" {
type = string
description = "Имя PostgresCluster"
default = "hippo"
}
variable "storage_class_name" {
type = string
description = "StorageClass для PVC (твой Ceph CSI RBD), например: ceph-rbd"
default = "ceph-rbd"
}
variable "postgres_version" {
type = number
description = "Major версия PostgreSQL (ставь ту, которую поддерживает твой CPK)"
default = 16
}
variable "instance_storage" {
type = string
description = "Размер диска под data"
default = "10Gi"
}
variable "backup_storage" {
type = string
description = "Размер диска под pgBackRest repo"
default = "10Gi"
}
variable "gitlab_db_user" {
type = string
default = "gitlab"
}
variable "gitlab_db_name" {
type = string
default = "gitlabhq_production"
}

View File

@@ -0,0 +1,7 @@
terraform {
required_providers {
kubernetes = {
source = "hashicorp/kubernetes"
}
}
}

View File

@@ -0,0 +1,7 @@
resource "helm_release" "metallb" {
name = "metallb"
repository = "https://metallb.github.io/metallb"
chart = "metallb"
namespace = "metallb-system"
create_namespace = true
}

View File

@@ -0,0 +1,7 @@
terraform {
required_providers {
helm = {
source = "hashicorp/helm"
}
}
}

View File

@@ -0,0 +1,31 @@
# Это пул IP-адресов, из которого MetalLB будет брать “external IP” и назначать их сервисам типа LoadBalancer
resource "kubernetes_manifest" "metallb_ip_pool" {
manifest = {
apiVersion = "metallb.io/v1beta1"
kind = "IPAddressPool"
metadata = {
name = var.pool_name
namespace = var.namespace
}
spec = {
addresses = var.addresses
}
}
}
# Это говорит MetalLB: рекламируй (announce) адреса из этого пула в L2 режиме.
resource "kubernetes_manifest" "metallb_l2" {
manifest = {
apiVersion = "metallb.io/v1beta1"
kind = "L2Advertisement"
metadata = {
name = var.l2_name
namespace = var.namespace
}
spec = {
ipAddressPools = [var.pool_name]
}
}
depends_on = [kubernetes_manifest.metallb_ip_pool]
}

View File

@@ -0,0 +1,3 @@
output "pool_name" {
value = var.pool_name
}

View File

@@ -0,0 +1,22 @@
variable "addresses" {
type = list(string)
description = "MetalLB address pool ranges"
}
variable "namespace" {
type = string
description = "Namespace where MetalLB is installed"
default = "metallb-system"
}
variable "pool_name" {
type = string
description = "IPAddressPool name"
default = "default-pool"
}
variable "l2_name" {
type = string
description = "L2Advertisement name"
default = "default-l2"
}

View File

@@ -0,0 +1,7 @@
terraform {
required_providers {
kubernetes = {
source = "hashicorp/kubernetes"
}
}
}

View File

@@ -0,0 +1,18 @@
locals {
ingress_nginx_values = merge(
{
controller = {
service = {
type = "LoadBalancer"
}
}
},
var.ingress_lb_ip == null ? {} : {
controller = {
service = {
loadBalancerIP = var.ingress_lb_ip
}
}
}
)
}

View File

@@ -0,0 +1,9 @@
resource "helm_release" "ingress_nginx" {
name = "ingress-nginx"
repository = "https://kubernetes.github.io/ingress-nginx"
chart = "ingress-nginx"
namespace = "ingress-nginx"
create_namespace = true
values = [yamlencode(local.ingress_nginx_values)]
}

View File

@@ -0,0 +1,10 @@
variable "ingress_lb_ip" {
type = string
description = "Static LB IP for ingress-nginx controller Service (must be from MetalLB pool). Leave null for dynamic."
default = null
}
variable "pool_name" {
type = string
description = "MetalLB address pool name for ingress-nginx Service annotation"
}

View File

@@ -0,0 +1,8 @@
resource "helm_release" "openebs" {
name = var.release_name
repository = "https://openebs.github.io/openebs"
chart = "openebs"
version = var.chart_version
namespace = var.namespace
create_namespace = true
}

View File

@@ -0,0 +1,25 @@
resource "kubernetes_storage_class_v1" "openebs_hostpath" {
metadata {
name = var.storageclass_name
annotations = {
"storageclass.kubernetes.io/is-default-class" = "true"
"openebs.io/cas-type" = "local"
}
}
storage_provisioner = "openebs.io/local"
reclaim_policy = "Delete"
volume_binding_mode = "WaitForFirstConsumer"
allow_volume_expansion = false
parameters = {
"cas.openebs.io/config" = <<-EOT
- name: StorageType
value: "hostpath"
- name: BasePath
value: "${var.base_path}"
EOT
}
depends_on = [helm_release.openebs]
}

View File

@@ -0,0 +1,26 @@
variable "namespace" {
type = string
default = "openebs"
}
variable "release_name" {
type = string
default = "openebs"
}
variable "chart_version" {
type = string
default = null
description = "Версия helm chart openebs (null = последняя доступная)."
}
variable "storageclass_name" {
type = string
default = "openebs-local-hostpath"
}
variable "base_path" {
type = string
default = "/var/openebs/local/"
description = "Путь на нодах для hostpath LocalPV (можно кастомизировать)."
}

View File

@@ -0,0 +1,6 @@
terraform {
required_providers {
helm = { source = "hashicorp/helm" }
kubernetes = { source = "hashicorp/kubernetes" }
}
}

View File

@@ -0,0 +1,39 @@
resource "helm_release" "valkey" {
name = var.release_name
namespace = var.namespace
repository = var.repository
chart = "valkey"
version = var.chart_version
create_namespace = false
values = [
yamlencode({
auth = {
enabled = true
usersExistingSecret = kubernetes_secret_v1.valkey_users.metadata[0].name
# ВАЖНО: 'default' обязан быть определён тут (или в aclConfig),
# иначе чарт ругнётся / будет небезопасная конфигурация
aclUsers = {
default = {
permissions = "~* &* +@all"
# password НЕ нужен, потому что берётся из usersExistingSecret
}
}
}
# (опционально) персистентность на Ceph RBD:
# dataStorage = {
# enabled = true
# requestedSize = "5Gi"
# className = "ceph-rbd"
# }
})
]
depends_on = [
kubernetes_namespace_v1.this,
kubernetes_secret_v1.valkey_users,
]
}

View File

@@ -0,0 +1,7 @@
resource "kubernetes_namespace_v1" "this" {
count = var.create_namespace ? 1 : 0
metadata {
name = var.namespace
}
}

View File

@@ -0,0 +1,13 @@
resource "kubernetes_secret_v1" "valkey_users" {
metadata {
name = "valkey-users"
namespace = var.namespace # "valkey"
}
type = "Opaque"
data = {
# ВАЖНО: ключ = username, по умолчанию чарт ожидает так
default = base64encode(var.valkey_password)
}
}

View File

@@ -0,0 +1,35 @@
variable "namespace" {
type = string
default = "valkey"
}
variable "create_namespace" {
type = bool
default = true
}
variable "release_name" {
type = string
default = "valkey"
}
variable "chart_version" {
type = string
default = "0.9.2"
}
variable "repository" {
type = string
default = "https://valkey.io/valkey-helm/"
}
# Если хочешь кастомизировать chart values — просто передай сюда yamlencode({...})
variable "values" {
type = list(string)
default = []
}
variable "valkey_password" {
type = string
sensitive = true
}

View File

@@ -0,0 +1,14 @@
terraform {
required_version = ">= 1.5.0"
required_providers {
helm = {
source = "hashicorp/helm"
version = ">= 3.0.0"
}
kubernetes = {
source = "hashicorp/kubernetes"
version = ">= 2.0.0"
}
}
}