init
This commit is contained in:
19
terraform/modules/k8s/ceph/k8s-ceph-csi-rbd/helm.tf
Normal file
19
terraform/modules/k8s/ceph/k8s-ceph-csi-rbd/helm.tf
Normal file
@@ -0,0 +1,19 @@
|
||||
resource "helm_release" "ceph_csi_rbd" {
|
||||
name = "ceph-csi-rbd"
|
||||
namespace = kubernetes_namespace_v1.this.metadata[0].name
|
||||
repository = "https://ceph.github.io/csi-charts"
|
||||
chart = "ceph-csi-rbd"
|
||||
version = var.chart_version
|
||||
|
||||
create_namespace = false
|
||||
|
||||
values = [yamlencode({
|
||||
csiConfig = [{
|
||||
clusterID = var.ceph_cluster_id
|
||||
monitors = var.ceph_monitors
|
||||
}]
|
||||
provisioner = {
|
||||
replicaCount = 1
|
||||
}
|
||||
})]
|
||||
}
|
||||
3
terraform/modules/k8s/ceph/k8s-ceph-csi-rbd/namespace.tf
Normal file
3
terraform/modules/k8s/ceph/k8s-ceph-csi-rbd/namespace.tf
Normal file
@@ -0,0 +1,3 @@
|
||||
resource "kubernetes_namespace_v1" "this" {
|
||||
metadata { name = var.namespace }
|
||||
}
|
||||
15
terraform/modules/k8s/ceph/k8s-ceph-csi-rbd/variables.tf
Normal file
15
terraform/modules/k8s/ceph/k8s-ceph-csi-rbd/variables.tf
Normal file
@@ -0,0 +1,15 @@
|
||||
variable "namespace" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "chart_version" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "ceph_cluster_id" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "ceph_monitors" {
|
||||
type = list(string)
|
||||
}
|
||||
6
terraform/modules/k8s/ceph/k8s-ceph-csi-rbd/versions.tf
Normal file
6
terraform/modules/k8s/ceph/k8s-ceph-csi-rbd/versions.tf
Normal file
@@ -0,0 +1,6 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
helm = { source = "hashicorp/helm" }
|
||||
kubernetes = { source = "hashicorp/kubernetes" }
|
||||
}
|
||||
}
|
||||
13
terraform/modules/k8s/ceph/k8s-ceph-rbd-storage/secret.tf
Normal file
13
terraform/modules/k8s/ceph/k8s-ceph-rbd-storage/secret.tf
Normal file
@@ -0,0 +1,13 @@
|
||||
resource "kubernetes_secret_v1" "csi_rbd_secret" {
|
||||
metadata {
|
||||
name = "csi-rbd-secret"
|
||||
namespace = var.namespace
|
||||
}
|
||||
|
||||
data = {
|
||||
userID = var.ceph_user_id
|
||||
userKey = var.ceph_user_key
|
||||
}
|
||||
|
||||
type = "Opaque"
|
||||
}
|
||||
@@ -0,0 +1,27 @@
|
||||
resource "kubernetes_storage_class_v1" "ceph_rbd" {
|
||||
metadata {
|
||||
name = "ceph-rbd"
|
||||
# если хочешь сделать default:
|
||||
# annotations = {
|
||||
# "storageclass.kubernetes.io/is-default-class" = "true"
|
||||
# }
|
||||
}
|
||||
|
||||
storage_provisioner = "rbd.csi.ceph.com"
|
||||
reclaim_policy = "Delete"
|
||||
volume_binding_mode = "Immediate"
|
||||
allow_volume_expansion = true
|
||||
|
||||
parameters = {
|
||||
clusterID = var.ceph_cluster_id
|
||||
pool = var.ceph_rbd_pool
|
||||
|
||||
# ВАЖНО: это строки-ключи, строго без пробелов и без "/"
|
||||
"csi.storage.k8s.io/provisioner-secret-name" = kubernetes_secret_v1.csi_rbd_secret.metadata[0].name
|
||||
"csi.storage.k8s.io/provisioner-secret-namespace" = kubernetes_secret_v1.csi_rbd_secret.metadata[0].namespace
|
||||
"csi.storage.k8s.io/node-stage-secret-name" = kubernetes_secret_v1.csi_rbd_secret.metadata[0].name
|
||||
"csi.storage.k8s.io/node-stage-secret-namespace" = kubernetes_secret_v1.csi_rbd_secret.metadata[0].namespace
|
||||
|
||||
imageFeatures = "layering"
|
||||
}
|
||||
}
|
||||
20
terraform/modules/k8s/ceph/k8s-ceph-rbd-storage/variables.tf
Normal file
20
terraform/modules/k8s/ceph/k8s-ceph-rbd-storage/variables.tf
Normal file
@@ -0,0 +1,20 @@
|
||||
variable "namespace" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "ceph_cluster_id" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "ceph_rbd_pool" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "ceph_user_id" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "ceph_user_key" {
|
||||
type = string
|
||||
sensitive = true
|
||||
}
|
||||
@@ -0,0 +1,5 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
kubernetes = { source = "hashicorp/kubernetes" }
|
||||
}
|
||||
}
|
||||
26
terraform/modules/k8s/crunchy-data/operator/helm.tf
Normal file
26
terraform/modules/k8s/crunchy-data/operator/helm.tf
Normal file
@@ -0,0 +1,26 @@
|
||||
resource "helm_release" "pgo" {
|
||||
name = var.release_name
|
||||
namespace = kubernetes_namespace_v1.this.metadata[0].name
|
||||
|
||||
# Crunchy публикует Helm chart в OCI registry
|
||||
# helm install pgo oci://registry.developers.crunchydata.com/crunchydata/pgo :contentReference[oaicite:2]{index=2}
|
||||
repository = "oci://registry.developers.crunchydata.com/crunchydata"
|
||||
chart = "pgo"
|
||||
version = var.chart_version
|
||||
|
||||
create_namespace = false
|
||||
|
||||
values = [
|
||||
yamlencode({
|
||||
# безопасные дефолты, без лишней магии
|
||||
debug = var.debug
|
||||
replicas = var.replicas
|
||||
|
||||
# Если хочешь ограничить оператор только этим namespace:
|
||||
# singleNamespace = true
|
||||
singleNamespace = var.single_namespace
|
||||
|
||||
installCRDs = true
|
||||
})
|
||||
]
|
||||
}
|
||||
5
terraform/modules/k8s/crunchy-data/operator/namespace.tf
Normal file
5
terraform/modules/k8s/crunchy-data/operator/namespace.tf
Normal file
@@ -0,0 +1,5 @@
|
||||
resource "kubernetes_namespace_v1" "this" {
|
||||
metadata {
|
||||
name = var.namespace
|
||||
}
|
||||
}
|
||||
7
terraform/modules/k8s/crunchy-data/operator/outputs.tf
Normal file
7
terraform/modules/k8s/crunchy-data/operator/outputs.tf
Normal file
@@ -0,0 +1,7 @@
|
||||
output "namespace" {
|
||||
value = kubernetes_namespace_v1.this.metadata[0].name
|
||||
}
|
||||
|
||||
output "release_name" {
|
||||
value = helm_release.pgo.name
|
||||
}
|
||||
33
terraform/modules/k8s/crunchy-data/operator/variables.tf
Normal file
33
terraform/modules/k8s/crunchy-data/operator/variables.tf
Normal file
@@ -0,0 +1,33 @@
|
||||
variable "namespace" {
|
||||
type = string
|
||||
description = "Namespace, куда ставим Crunchy operator"
|
||||
default = "postgres-operator"
|
||||
}
|
||||
|
||||
variable "release_name" {
|
||||
type = string
|
||||
description = "Helm release name"
|
||||
default = "pgo"
|
||||
}
|
||||
|
||||
variable "chart_version" {
|
||||
type = string
|
||||
description = "Версия чарта pgo (пинить обязательно для воспроизводимости)"
|
||||
default = "6.0.0"
|
||||
}
|
||||
|
||||
variable "debug" {
|
||||
type = bool
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "replicas" {
|
||||
type = number
|
||||
default = 1
|
||||
}
|
||||
|
||||
variable "single_namespace" {
|
||||
type = bool
|
||||
description = "Если true — оператор управляет кластерами только в этом namespace"
|
||||
default = true
|
||||
}
|
||||
6
terraform/modules/k8s/crunchy-data/operator/versions.tf
Normal file
6
terraform/modules/k8s/crunchy-data/operator/versions.tf
Normal file
@@ -0,0 +1,6 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
helm = { source = "hashicorp/helm" }
|
||||
kubernetes = { source = "hashicorp/kubernetes" }
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,60 @@
|
||||
resource "kubernetes_manifest" "postgres_cluster" {
|
||||
manifest = {
|
||||
apiVersion = "postgres-operator.crunchydata.com/v1beta1"
|
||||
kind = "PostgresCluster"
|
||||
|
||||
metadata = {
|
||||
name = var.name
|
||||
namespace = var.namespace
|
||||
}
|
||||
|
||||
spec = {
|
||||
postgresVersion = var.postgres_version
|
||||
|
||||
instances = [
|
||||
{
|
||||
name = "instance1"
|
||||
dataVolumeClaimSpec = {
|
||||
storageClassName = var.storage_class_name
|
||||
accessModes = ["ReadWriteOnce"]
|
||||
resources = {
|
||||
requests = {
|
||||
storage = var.instance_storage
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
backups = {
|
||||
pgbackrest = {
|
||||
repos = [
|
||||
{
|
||||
name = "repo1"
|
||||
volume = {
|
||||
volumeClaimSpec = {
|
||||
storageClassName = var.storage_class_name
|
||||
accessModes = ["ReadWriteOnce"]
|
||||
resources = {
|
||||
requests = {
|
||||
storage = var.backup_storage
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
users = [
|
||||
{
|
||||
name = var.gitlab_db_user
|
||||
databases = [
|
||||
var.gitlab_db_name
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,7 @@
|
||||
output "cluster_name" {
|
||||
value = kubernetes_manifest.postgres_cluster.manifest["metadata"]["name"]
|
||||
}
|
||||
|
||||
output "namespace" {
|
||||
value = kubernetes_manifest.postgres_cluster.manifest["metadata"]["namespace"]
|
||||
}
|
||||
@@ -0,0 +1,46 @@
|
||||
variable "namespace" {
|
||||
type = string
|
||||
description = "Namespace, где будет PostgresCluster"
|
||||
default = "postgres-operator"
|
||||
}
|
||||
|
||||
variable "name" {
|
||||
type = string
|
||||
description = "Имя PostgresCluster"
|
||||
default = "hippo"
|
||||
}
|
||||
|
||||
variable "storage_class_name" {
|
||||
type = string
|
||||
description = "StorageClass для PVC (твой Ceph CSI RBD), например: ceph-rbd"
|
||||
default = "ceph-rbd"
|
||||
}
|
||||
|
||||
variable "postgres_version" {
|
||||
type = number
|
||||
description = "Major версия PostgreSQL (ставь ту, которую поддерживает твой CPK)"
|
||||
default = 16
|
||||
}
|
||||
|
||||
variable "instance_storage" {
|
||||
type = string
|
||||
description = "Размер диска под data"
|
||||
default = "10Gi"
|
||||
}
|
||||
|
||||
variable "backup_storage" {
|
||||
type = string
|
||||
description = "Размер диска под pgBackRest repo"
|
||||
default = "10Gi"
|
||||
}
|
||||
|
||||
variable "gitlab_db_user" {
|
||||
type = string
|
||||
default = "gitlab"
|
||||
}
|
||||
|
||||
variable "gitlab_db_name" {
|
||||
type = string
|
||||
default = "gitlabhq_production"
|
||||
}
|
||||
|
||||
@@ -0,0 +1,7 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
kubernetes = {
|
||||
source = "hashicorp/kubernetes"
|
||||
}
|
||||
}
|
||||
}
|
||||
7
terraform/modules/k8s/metallb/helm/main.tf
Normal file
7
terraform/modules/k8s/metallb/helm/main.tf
Normal file
@@ -0,0 +1,7 @@
|
||||
resource "helm_release" "metallb" {
|
||||
name = "metallb"
|
||||
repository = "https://metallb.github.io/metallb"
|
||||
chart = "metallb"
|
||||
namespace = "metallb-system"
|
||||
create_namespace = true
|
||||
}
|
||||
7
terraform/modules/k8s/metallb/helm/versions.tf
Normal file
7
terraform/modules/k8s/metallb/helm/versions.tf
Normal file
@@ -0,0 +1,7 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
helm = {
|
||||
source = "hashicorp/helm"
|
||||
}
|
||||
}
|
||||
}
|
||||
31
terraform/modules/k8s/metallb/resources/manifest.tf
Normal file
31
terraform/modules/k8s/metallb/resources/manifest.tf
Normal file
@@ -0,0 +1,31 @@
|
||||
# Это пул IP-адресов, из которого MetalLB будет брать “external IP” и назначать их сервисам типа LoadBalancer
|
||||
resource "kubernetes_manifest" "metallb_ip_pool" {
|
||||
manifest = {
|
||||
apiVersion = "metallb.io/v1beta1"
|
||||
kind = "IPAddressPool"
|
||||
metadata = {
|
||||
name = var.pool_name
|
||||
namespace = var.namespace
|
||||
}
|
||||
spec = {
|
||||
addresses = var.addresses
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Это говорит MetalLB: рекламируй (announce) адреса из этого пула в L2 режиме.
|
||||
resource "kubernetes_manifest" "metallb_l2" {
|
||||
manifest = {
|
||||
apiVersion = "metallb.io/v1beta1"
|
||||
kind = "L2Advertisement"
|
||||
metadata = {
|
||||
name = var.l2_name
|
||||
namespace = var.namespace
|
||||
}
|
||||
spec = {
|
||||
ipAddressPools = [var.pool_name]
|
||||
}
|
||||
}
|
||||
|
||||
depends_on = [kubernetes_manifest.metallb_ip_pool]
|
||||
}
|
||||
3
terraform/modules/k8s/metallb/resources/outputs.tf
Normal file
3
terraform/modules/k8s/metallb/resources/outputs.tf
Normal file
@@ -0,0 +1,3 @@
|
||||
output "pool_name" {
|
||||
value = var.pool_name
|
||||
}
|
||||
22
terraform/modules/k8s/metallb/resources/variables.tf
Normal file
22
terraform/modules/k8s/metallb/resources/variables.tf
Normal file
@@ -0,0 +1,22 @@
|
||||
variable "addresses" {
|
||||
type = list(string)
|
||||
description = "MetalLB address pool ranges"
|
||||
}
|
||||
|
||||
variable "namespace" {
|
||||
type = string
|
||||
description = "Namespace where MetalLB is installed"
|
||||
default = "metallb-system"
|
||||
}
|
||||
|
||||
variable "pool_name" {
|
||||
type = string
|
||||
description = "IPAddressPool name"
|
||||
default = "default-pool"
|
||||
}
|
||||
|
||||
variable "l2_name" {
|
||||
type = string
|
||||
description = "L2Advertisement name"
|
||||
default = "default-l2"
|
||||
}
|
||||
7
terraform/modules/k8s/metallb/resources/versions.tf
Normal file
7
terraform/modules/k8s/metallb/resources/versions.tf
Normal file
@@ -0,0 +1,7 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
kubernetes = {
|
||||
source = "hashicorp/kubernetes"
|
||||
}
|
||||
}
|
||||
}
|
||||
18
terraform/modules/k8s/nginx_ingress/helm/locals.tf
Normal file
18
terraform/modules/k8s/nginx_ingress/helm/locals.tf
Normal file
@@ -0,0 +1,18 @@
|
||||
locals {
|
||||
ingress_nginx_values = merge(
|
||||
{
|
||||
controller = {
|
||||
service = {
|
||||
type = "LoadBalancer"
|
||||
}
|
||||
}
|
||||
},
|
||||
var.ingress_lb_ip == null ? {} : {
|
||||
controller = {
|
||||
service = {
|
||||
loadBalancerIP = var.ingress_lb_ip
|
||||
}
|
||||
}
|
||||
}
|
||||
)
|
||||
}
|
||||
9
terraform/modules/k8s/nginx_ingress/helm/main.tf
Normal file
9
terraform/modules/k8s/nginx_ingress/helm/main.tf
Normal file
@@ -0,0 +1,9 @@
|
||||
resource "helm_release" "ingress_nginx" {
|
||||
name = "ingress-nginx"
|
||||
repository = "https://kubernetes.github.io/ingress-nginx"
|
||||
chart = "ingress-nginx"
|
||||
namespace = "ingress-nginx"
|
||||
create_namespace = true
|
||||
|
||||
values = [yamlencode(local.ingress_nginx_values)]
|
||||
}
|
||||
10
terraform/modules/k8s/nginx_ingress/helm/variables.tf
Normal file
10
terraform/modules/k8s/nginx_ingress/helm/variables.tf
Normal file
@@ -0,0 +1,10 @@
|
||||
variable "ingress_lb_ip" {
|
||||
type = string
|
||||
description = "Static LB IP for ingress-nginx controller Service (must be from MetalLB pool). Leave null for dynamic."
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "pool_name" {
|
||||
type = string
|
||||
description = "MetalLB address pool name for ingress-nginx Service annotation"
|
||||
}
|
||||
8
terraform/modules/k8s/openebs/helm.tf
Normal file
8
terraform/modules/k8s/openebs/helm.tf
Normal file
@@ -0,0 +1,8 @@
|
||||
resource "helm_release" "openebs" {
|
||||
name = var.release_name
|
||||
repository = "https://openebs.github.io/openebs"
|
||||
chart = "openebs"
|
||||
version = var.chart_version
|
||||
namespace = var.namespace
|
||||
create_namespace = true
|
||||
}
|
||||
25
terraform/modules/k8s/openebs/storage_class.tf
Normal file
25
terraform/modules/k8s/openebs/storage_class.tf
Normal file
@@ -0,0 +1,25 @@
|
||||
resource "kubernetes_storage_class_v1" "openebs_hostpath" {
|
||||
metadata {
|
||||
name = var.storageclass_name
|
||||
annotations = {
|
||||
"storageclass.kubernetes.io/is-default-class" = "true"
|
||||
"openebs.io/cas-type" = "local"
|
||||
}
|
||||
}
|
||||
|
||||
storage_provisioner = "openebs.io/local"
|
||||
reclaim_policy = "Delete"
|
||||
volume_binding_mode = "WaitForFirstConsumer"
|
||||
allow_volume_expansion = false
|
||||
|
||||
parameters = {
|
||||
"cas.openebs.io/config" = <<-EOT
|
||||
- name: StorageType
|
||||
value: "hostpath"
|
||||
- name: BasePath
|
||||
value: "${var.base_path}"
|
||||
EOT
|
||||
}
|
||||
|
||||
depends_on = [helm_release.openebs]
|
||||
}
|
||||
26
terraform/modules/k8s/openebs/variables.tf
Normal file
26
terraform/modules/k8s/openebs/variables.tf
Normal file
@@ -0,0 +1,26 @@
|
||||
variable "namespace" {
|
||||
type = string
|
||||
default = "openebs"
|
||||
}
|
||||
|
||||
variable "release_name" {
|
||||
type = string
|
||||
default = "openebs"
|
||||
}
|
||||
|
||||
variable "chart_version" {
|
||||
type = string
|
||||
default = null
|
||||
description = "Версия helm chart openebs (null = последняя доступная)."
|
||||
}
|
||||
|
||||
variable "storageclass_name" {
|
||||
type = string
|
||||
default = "openebs-local-hostpath"
|
||||
}
|
||||
|
||||
variable "base_path" {
|
||||
type = string
|
||||
default = "/var/openebs/local/"
|
||||
description = "Путь на нодах для hostpath LocalPV (можно кастомизировать)."
|
||||
}
|
||||
6
terraform/modules/k8s/openebs/versions.tf
Normal file
6
terraform/modules/k8s/openebs/versions.tf
Normal file
@@ -0,0 +1,6 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
helm = { source = "hashicorp/helm" }
|
||||
kubernetes = { source = "hashicorp/kubernetes" }
|
||||
}
|
||||
}
|
||||
39
terraform/modules/k8s/valkey/helm.tf
Normal file
39
terraform/modules/k8s/valkey/helm.tf
Normal file
@@ -0,0 +1,39 @@
|
||||
resource "helm_release" "valkey" {
|
||||
name = var.release_name
|
||||
namespace = var.namespace
|
||||
|
||||
repository = var.repository
|
||||
chart = "valkey"
|
||||
version = var.chart_version
|
||||
create_namespace = false
|
||||
|
||||
values = [
|
||||
yamlencode({
|
||||
auth = {
|
||||
enabled = true
|
||||
usersExistingSecret = kubernetes_secret_v1.valkey_users.metadata[0].name
|
||||
|
||||
# ВАЖНО: 'default' обязан быть определён тут (или в aclConfig),
|
||||
# иначе чарт ругнётся / будет небезопасная конфигурация
|
||||
aclUsers = {
|
||||
default = {
|
||||
permissions = "~* &* +@all"
|
||||
# password НЕ нужен, потому что берётся из usersExistingSecret
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# (опционально) персистентность на Ceph RBD:
|
||||
# dataStorage = {
|
||||
# enabled = true
|
||||
# requestedSize = "5Gi"
|
||||
# className = "ceph-rbd"
|
||||
# }
|
||||
})
|
||||
]
|
||||
|
||||
depends_on = [
|
||||
kubernetes_namespace_v1.this,
|
||||
kubernetes_secret_v1.valkey_users,
|
||||
]
|
||||
}
|
||||
7
terraform/modules/k8s/valkey/namespace.tf
Normal file
7
terraform/modules/k8s/valkey/namespace.tf
Normal file
@@ -0,0 +1,7 @@
|
||||
resource "kubernetes_namespace_v1" "this" {
|
||||
count = var.create_namespace ? 1 : 0
|
||||
|
||||
metadata {
|
||||
name = var.namespace
|
||||
}
|
||||
}
|
||||
13
terraform/modules/k8s/valkey/secret.tf
Normal file
13
terraform/modules/k8s/valkey/secret.tf
Normal file
@@ -0,0 +1,13 @@
|
||||
resource "kubernetes_secret_v1" "valkey_users" {
|
||||
metadata {
|
||||
name = "valkey-users"
|
||||
namespace = var.namespace # "valkey"
|
||||
}
|
||||
|
||||
type = "Opaque"
|
||||
|
||||
data = {
|
||||
# ВАЖНО: ключ = username, по умолчанию чарт ожидает так
|
||||
default = base64encode(var.valkey_password)
|
||||
}
|
||||
}
|
||||
35
terraform/modules/k8s/valkey/variables.tf
Normal file
35
terraform/modules/k8s/valkey/variables.tf
Normal file
@@ -0,0 +1,35 @@
|
||||
variable "namespace" {
|
||||
type = string
|
||||
default = "valkey"
|
||||
}
|
||||
|
||||
variable "create_namespace" {
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
|
||||
variable "release_name" {
|
||||
type = string
|
||||
default = "valkey"
|
||||
}
|
||||
|
||||
variable "chart_version" {
|
||||
type = string
|
||||
default = "0.9.2"
|
||||
}
|
||||
|
||||
variable "repository" {
|
||||
type = string
|
||||
default = "https://valkey.io/valkey-helm/"
|
||||
}
|
||||
|
||||
# Если хочешь кастомизировать chart values — просто передай сюда yamlencode({...})
|
||||
variable "values" {
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "valkey_password" {
|
||||
type = string
|
||||
sensitive = true
|
||||
}
|
||||
14
terraform/modules/k8s/valkey/versions.tf
Normal file
14
terraform/modules/k8s/valkey/versions.tf
Normal file
@@ -0,0 +1,14 @@
|
||||
terraform {
|
||||
required_version = ">= 1.5.0"
|
||||
|
||||
required_providers {
|
||||
helm = {
|
||||
source = "hashicorp/helm"
|
||||
version = ">= 3.0.0"
|
||||
}
|
||||
kubernetes = {
|
||||
source = "hashicorp/kubernetes"
|
||||
version = ">= 2.0.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
7
terraform/modules/powerdns/record/main.tf
Normal file
7
terraform/modules/powerdns/record/main.tf
Normal file
@@ -0,0 +1,7 @@
|
||||
resource "powerdns_record" "this" {
|
||||
zone = var.zone_name
|
||||
name = var.name
|
||||
type = var.type
|
||||
ttl = var.ttl
|
||||
records = var.records
|
||||
}
|
||||
19
terraform/modules/powerdns/record/variables.tf
Normal file
19
terraform/modules/powerdns/record/variables.tf
Normal file
@@ -0,0 +1,19 @@
|
||||
variable "zone_name" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "name" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "type" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "ttl" {
|
||||
type = number
|
||||
}
|
||||
|
||||
variable "records" {
|
||||
type = list(string)
|
||||
}
|
||||
10
terraform/modules/powerdns/record/versions.tf
Normal file
10
terraform/modules/powerdns/record/versions.tf
Normal file
@@ -0,0 +1,10 @@
|
||||
terraform {
|
||||
required_version = ">= 1.3.0"
|
||||
|
||||
required_providers {
|
||||
powerdns = {
|
||||
source = "pan-net/powerdns"
|
||||
# version = "1.5.0" # опционально: можно зафиксировать
|
||||
}
|
||||
}
|
||||
}
|
||||
6
terraform/modules/powerdns/zone/main.tf
Normal file
6
terraform/modules/powerdns/zone/main.tf
Normal file
@@ -0,0 +1,6 @@
|
||||
resource "powerdns_zone" "zone" {
|
||||
name = var.zone_name
|
||||
kind = var.zone_kind
|
||||
soa_edit_api = var.soa_edit_api
|
||||
nameservers = var.zone_nameservers
|
||||
}
|
||||
3
terraform/modules/powerdns/zone/outputs.tf
Normal file
3
terraform/modules/powerdns/zone/outputs.tf
Normal file
@@ -0,0 +1,3 @@
|
||||
output "name" {
|
||||
value = powerdns_zone.zone.name
|
||||
}
|
||||
15
terraform/modules/powerdns/zone/variables.tf
Normal file
15
terraform/modules/powerdns/zone/variables.tf
Normal file
@@ -0,0 +1,15 @@
|
||||
variable "zone_name" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "zone_kind" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "soa_edit_api" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "zone_nameservers" {
|
||||
type = list(string)
|
||||
}
|
||||
10
terraform/modules/powerdns/zone/versions.tf
Normal file
10
terraform/modules/powerdns/zone/versions.tf
Normal file
@@ -0,0 +1,10 @@
|
||||
terraform {
|
||||
required_version = ">= 1.3.0"
|
||||
|
||||
required_providers {
|
||||
powerdns = {
|
||||
source = "pan-net/powerdns"
|
||||
# version = "1.5.0" # опционально: можно зафиксировать
|
||||
}
|
||||
}
|
||||
}
|
||||
72
terraform/modules/proxmox/lxc/main.tf
Normal file
72
terraform/modules/proxmox/lxc/main.tf
Normal file
@@ -0,0 +1,72 @@
|
||||
resource "proxmox_virtual_environment_container" "this" {
|
||||
node_name = var.target_node
|
||||
vm_id = var.vm_id
|
||||
unprivileged = var.unprivileged
|
||||
|
||||
started = var.started
|
||||
start_on_boot = var.start_on_boot
|
||||
|
||||
cpu {
|
||||
cores = var.cores
|
||||
units = var.cpu_units
|
||||
}
|
||||
|
||||
memory {
|
||||
dedicated = var.memory
|
||||
swap = var.swap
|
||||
}
|
||||
|
||||
disk {
|
||||
datastore_id = var.rootfs_storage
|
||||
size = var.rootfs_size_gib
|
||||
}
|
||||
|
||||
features {
|
||||
nesting = var.nesting
|
||||
}
|
||||
|
||||
network_interface {
|
||||
name = var.netif_name
|
||||
bridge = var.bridge
|
||||
enabled = true
|
||||
firewall = var.firewall
|
||||
}
|
||||
|
||||
operating_system {
|
||||
template_file_id = var.template_file_id
|
||||
type = var.os_type
|
||||
}
|
||||
|
||||
initialization {
|
||||
hostname = var.hostname
|
||||
|
||||
ip_config {
|
||||
ipv4 {
|
||||
address = var.ipv4_address
|
||||
gateway = var.ipv4_gateway
|
||||
}
|
||||
}
|
||||
|
||||
# user_account делаем опциональным:
|
||||
# - если задан пароль (не null) или есть ssh keys
|
||||
dynamic "user_account" {
|
||||
for_each = (var.password != null || length(var.ssh_public_keys) > 0) ? [1] : []
|
||||
content {
|
||||
# provider норм принимает keys list(string)
|
||||
keys = var.ssh_public_keys
|
||||
|
||||
# password задаём только если не null
|
||||
password = var.password
|
||||
}
|
||||
}
|
||||
|
||||
# DNS опционально
|
||||
dynamic "dns" {
|
||||
for_each = (var.dns_domain != null || length(var.dns_servers) > 0) ? [1] : []
|
||||
content {
|
||||
domain = var.dns_domain
|
||||
servers = var.dns_servers
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
126
terraform/modules/proxmox/lxc/variables.tf
Normal file
126
terraform/modules/proxmox/lxc/variables.tf
Normal file
@@ -0,0 +1,126 @@
|
||||
variable "vm_id" {
|
||||
type = number
|
||||
}
|
||||
|
||||
variable "hostname" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "target_node" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "template_file_id" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "os_type" {
|
||||
type = string
|
||||
default = "debian"
|
||||
}
|
||||
|
||||
variable "unprivileged" {
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
|
||||
variable "nesting" {
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
|
||||
variable "cores" {
|
||||
type = number
|
||||
default = 1
|
||||
}
|
||||
|
||||
# Proxmox cpuunits
|
||||
variable "cpu_units" {
|
||||
type = number
|
||||
default = 1024
|
||||
}
|
||||
|
||||
variable "memory" {
|
||||
type = number
|
||||
default = 512
|
||||
}
|
||||
|
||||
variable "swap" {
|
||||
type = number
|
||||
default = 512
|
||||
}
|
||||
|
||||
variable "rootfs_storage" {
|
||||
type = string
|
||||
default = "local-lvm"
|
||||
}
|
||||
|
||||
variable "rootfs_size_gib" {
|
||||
type = number
|
||||
default = 8
|
||||
}
|
||||
|
||||
variable "bridge" {
|
||||
type = string
|
||||
default = "vmbr0"
|
||||
}
|
||||
|
||||
variable "netif_name" {
|
||||
type = string
|
||||
default = "eth0"
|
||||
}
|
||||
|
||||
variable "firewall" {
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
|
||||
# DHCP: "dhcp"
|
||||
# Static: "192.168.1.50/24"
|
||||
variable "ipv4_address" {
|
||||
type = string
|
||||
default = "dhcp"
|
||||
|
||||
validation {
|
||||
condition = var.ipv4_address == "dhcp" || can(cidrnetmask(var.ipv4_address))
|
||||
error_message = "ipv4_address must be \"dhcp\" or a valid CIDR like 192.168.1.50/24."
|
||||
}
|
||||
}
|
||||
|
||||
# gateway допустим только если не dhcp
|
||||
variable "ipv4_gateway" {
|
||||
type = string
|
||||
default = null
|
||||
}
|
||||
|
||||
# Пароль опциональный (можешь управлять через ssh keys)
|
||||
variable "password" {
|
||||
type = string
|
||||
default = null
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
variable "ssh_public_keys" {
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "dns_domain" {
|
||||
type = string
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "dns_servers" {
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "started" {
|
||||
type = bool
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "start_on_boot" {
|
||||
type = bool
|
||||
default = false
|
||||
}
|
||||
10
terraform/modules/proxmox/lxc/versions.tf
Normal file
10
terraform/modules/proxmox/lxc/versions.tf
Normal file
@@ -0,0 +1,10 @@
|
||||
terraform {
|
||||
required_version = ">= 1.6"
|
||||
|
||||
required_providers {
|
||||
proxmox = {
|
||||
source = "bpg/proxmox"
|
||||
version = "0.86.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
63
terraform/modules/proxmox/vm/main.tf
Normal file
63
terraform/modules/proxmox/vm/main.tf
Normal file
@@ -0,0 +1,63 @@
|
||||
resource "proxmox_virtual_environment_vm" "this" {
|
||||
name = var.name
|
||||
node_name = var.target_node
|
||||
|
||||
clone {
|
||||
vm_id = var.template_id
|
||||
full = true
|
||||
}
|
||||
|
||||
cpu {
|
||||
cores = var.cpu
|
||||
type = var.cpu_type
|
||||
}
|
||||
|
||||
memory {
|
||||
dedicated = var.memory
|
||||
}
|
||||
|
||||
# Предсказуемая SCSI нумерация
|
||||
scsi_hardware = "virtio-scsi-single"
|
||||
boot_order = ["scsi0"]
|
||||
|
||||
# OS disk (scsi0)
|
||||
disk {
|
||||
datastore_id = var.storage
|
||||
size = var.disk_size
|
||||
interface = "scsi0"
|
||||
}
|
||||
|
||||
# OSD disks (scsi1, scsi2, ...)
|
||||
dynamic "disk" {
|
||||
for_each = (var.osd_storage != null && length(var.osd_disks) > 0) ? { for idx, size in var.osd_disks : idx => size } : {}
|
||||
|
||||
content {
|
||||
datastore_id = var.osd_storage
|
||||
size = disk.value
|
||||
interface = "scsi${disk.key + 1}"
|
||||
|
||||
file_format = "raw"
|
||||
cache = "none"
|
||||
iothread = true
|
||||
discard = "on"
|
||||
}
|
||||
}
|
||||
|
||||
network_device {
|
||||
bridge = var.bridge
|
||||
model = "virtio"
|
||||
mac_address = var.mac_address
|
||||
}
|
||||
|
||||
agent {
|
||||
enabled = true
|
||||
}
|
||||
|
||||
initialization {
|
||||
user_data_file_id = var.user_data_file_id
|
||||
|
||||
ip_config {
|
||||
ipv4 { address = "dhcp" }
|
||||
}
|
||||
}
|
||||
}
|
||||
55
terraform/modules/proxmox/vm/variables.tf
Normal file
55
terraform/modules/proxmox/vm/variables.tf
Normal file
@@ -0,0 +1,55 @@
|
||||
variable "name" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "target_node" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "template_id" {
|
||||
type = number
|
||||
}
|
||||
|
||||
variable "cpu" {
|
||||
type = number
|
||||
}
|
||||
|
||||
variable "cpu_type" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "memory" {
|
||||
type = number
|
||||
}
|
||||
|
||||
variable "disk_size" {
|
||||
type = number
|
||||
}
|
||||
|
||||
variable "storage" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "bridge" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "osd_storage" {
|
||||
type = string
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "osd_disks" {
|
||||
type = list(number)
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "user_data_file_id" {
|
||||
type = string
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "mac_address" {
|
||||
description = "Static MAC for VM NIC (for DHCP reservation)."
|
||||
type = string
|
||||
}
|
||||
8
terraform/modules/proxmox/vm/versions.tf
Normal file
8
terraform/modules/proxmox/vm/versions.tf
Normal file
@@ -0,0 +1,8 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
proxmox = {
|
||||
source = "bpg/proxmox"
|
||||
version = ">= 0.86.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
5
terraform/readme.md
Normal file
5
terraform/readme.md
Normal file
@@ -0,0 +1,5 @@
|
||||
```bash
|
||||
terraform init
|
||||
terraform plan -var-file="terraform.tfvars"
|
||||
terraform apply -var-file="terraform.tfvars"
|
||||
```
|
||||
18
terraform/stacks/k8s/configs/config
Normal file
18
terraform/stacks/k8s/configs/config
Normal file
@@ -0,0 +1,18 @@
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
insecure-skip-tls-verify: true
|
||||
server: https://localhost:10563
|
||||
name: kubernetes
|
||||
contexts:
|
||||
- context:
|
||||
cluster: kubernetes
|
||||
user: kubernetes-admin
|
||||
name: kubernetes-admin@kubernetes
|
||||
current-context: kubernetes-admin@kubernetes
|
||||
kind: Config
|
||||
users:
|
||||
- name: kubernetes-admin
|
||||
user:
|
||||
client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURLVENDQWhHZ0F3SUJBZ0lJYVI1WXRlRHdabjR3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB5TmpBeE1EUXhOak0xTXpSYUZ3MHlOekF4TURReE5qUXdNelJhTUR3eApIekFkQmdOVkJBb1RGbXQxWW1WaFpHMDZZMngxYzNSbGNpMWhaRzFwYm5NeEdUQVhCZ05WQkFNVEVHdDFZbVZ5CmJtVjBaWE10WVdSdGFXNHdnZ0VpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElCRHdBd2dnRUtBb0lCQVFESEN4TmgKS3JBQXdPd2ZjK3U3NW1jaUU4RVRwaGM4blNkRWxtVXNJWFdINE5YWkxCK2dzWGtOdzc2NXBxcWkvVjU4cTI0egpVNHJrOE9xRHFoVmFYT2psZmNFSTh1blZaM3BsVEhGRS9uU00rZWkwOUpEVGpzUlNBR2JTWU9iMnFHWWtpTC9sCnlKOCtNNTR0Q0FMZWRtWWk1SGJOdHpoOFFsWHdvK2ozMHRiU1QvSmtGbXNKaTFubk50Q29KYlRSR3ZNVmFEaWkKZS91cDJzcU8rc3ZHS2RxL2E5NStROE9ST1prL3JuWHVqTzZxcjNUMWNnTmJPQlVLdDNGQ1pXK3gyamRwRzRPRgpqUVc4cUs2eHNKMFgrZmh0MHNMZC9NU1pKajdNL1VBalhYb3N6Zm9qS29IMUd4dHZxU2RCTXFLUjQ2T1ZmVjFhCldhSENvLzIzUnJJdUJPTmxBZ01CQUFHalZqQlVNQTRHQTFVZER3RUIvd1FFQXdJRm9EQVRCZ05WSFNVRUREQUsKQmdnckJnRUZCUWNEQWpBTUJnTlZIUk1CQWY4RUFqQUFNQjhHQTFVZEl3UVlNQmFBRkE1SkFIdkhVZEhURFNPRwpmdmdYR1k1VHkzU3BNQTBHQ1NxR1NJYjNEUUVCQ3dVQUE0SUJBUUJBTHRKMXJHaUZMaU83NmtZZDBDaVNvM0FICmYxbHdWRjJLREExMENTS2FqdmR2QmZlajI5cm9vYm9mMGM5UHVCVWtNYzR2ZitoeEY1a0lhK21BM3FDRmRFK2cKbW1VUVlFdFJXWWZRTmREYStWbTFVSVBJOGpUOXgvSWRYanpvY0UzL1FQZ0JBVEFTMVRmYVBJRktLZU9qMy9sNApDS0UwMks2RklzUklTVVhsMVdnS093SGxrOEwyMThsUTg0WVFZNG4yd1FSNzM3eTdUTnRLZ3BjeU5VN1ZLdFhnCnQ2Z1p4NkxqbnRVZGRzTlkyazg5Q3dmM0lUSENqN040SDE5Mll3VFBZajd0NkI5Q1Y4SXVaZEtKaWpFNkFYbHMKU2J0WjRYWStiUTdGaWIwM25CbTRSSXdMeEdVV3JMbkFnYzJBRnFGK29xSmc5SFFzdEgxVS8rOGhwWkkzCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
|
||||
client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBeHdzVFlTcXdBTURzSDNQcnUrWm5JaFBCRTZZWFBKMG5SSlpsTENGMWgrRFYyU3dmCm9MRjVEY08rdWFhcW92MWVmS3R1TTFPSzVQRHFnNm9WV2x6bzVYM0JDUExwMVdkNlpVeHhSUDUwalBub3RQU1EKMDQ3RVVnQm0wbURtOXFobUpJaS81Y2lmUGpPZUxRZ0MzblptSXVSMnpiYzRmRUpWOEtQbzk5TFcway95WkJacgpDWXRaNXpiUXFDVzAwUnJ6RldnNG9udjdxZHJLanZyTHhpbmF2MnZlZmtQRGtUbVpQNjUxN296dXFxOTA5WElECld6Z1ZDcmR4UW1WdnNkbzNhUnVEaFkwRnZLaXVzYkNkRi9uNGJkTEMzZnpFbVNZK3pQMUFJMTE2TE0zNkl5cUIKOVJzYmI2a25RVEtpa2VPamxYMWRXbG1od3FQOXQwYXlMZ1RqWlFJREFRQUJBb0lCQUdIU2hxNjlmUXlSeERwZApEV0VtaGs5UUtCY1JET0NFVi9VMGVQcXNZd2ZwcncveHlJV0FVMzg1VWJZS1BGaW9iMVNVS0MvZmdpYXNaSVZiCkJPMkpOZ2o3dWZBOCtrRWdmeDNqNk5OQXYyVTl1V21kdk1QTXFaMm5odUVrTUw3TzJveGdEUGZRbHJyS1FaWGUKRnhMZ1c2Z1FZbmNOOGh2WHVrYnZONkw4b3dsdTFOc01HVFJPdG10NEQ0WVptSnlGKzNsakZQcGF3TFlZL3M5awp5TGVaRXBDd2VCdEpDS1ZyODhaMXVVaVh2Mzg0cGEzMVA2VjFnRUt5SFQya3lGUXFvdWNLUDE0Y0FrazNyb0JGCkJ0cjc1WHBjUHYvZGExY2gvU3VQZDdscnV4UUtpZ1dWOWtNZG1TczVON0c2Rm5ZYS9jbnpxUWovZFNmV3lMUkgKRHZmTUN3MENnWUVBMDc4VXZjUVU2aUZBMm1ZLzNFQW5Zbmg1UzhJTE5OSXVQY1pMTkhqSVp5WGlyRFJ4VjRKNApXMWlZdWhUK0lVVFkwYWptVmhLUStMTkhJK0hzTkZOL2svdmM0cTVaa0czWUlMNk5pbWd3Y3FCNjVnbUMrNlo2ClJJQ3Y3YnBkUm9mYTdCMit3TjcxeEx1S282d2RyblArYmNKbzhaY09LQmYvRDlXa0RmNlZUM3NDZ1lFQThLUlkKNDZRWDMxYlRxSFhTWGhQSHV6QWdvMEFHd2JPRlAxT2pPeG4xVFBWQnNHa2N5dnhwOGtYNUlYS2ZNdExZWnpUbQpqdmpjV1dlNHFZTlZiUTF2VVpxT05aSUVrYjZNbGF6NW0xaVZMa3FucktubkJaaHRNNGtyMzhTUEpPY0dZazlHClVlaDBFZmhOZ3Y2Z1VtTTFBSUJTR1NVcjc1OHUvOFdrMzNCL3NwOENnWUJoQUsxNHpjWWpCLzdVem5pODVxcmsKUW5xV3lSc25KSTVJZ0huZFhPTzUxVEpGWDNUNCtPMDRNNXNyekFncnA0V0liczZ1YWF6K01lc0tOaXBtUWtZMAp2ZklQNm4xZlcrTGlCVW1FT1h6UVZsSlc1YzZhaUVhRThVc25KZlFySm51VkpYOUlqaHVhOTZ0b2xhVzNVSzRqCkRDZlZYVFVBQ3hZdTQ5bFhDK1RNMXdLQmdRRE43cGJ6R0RZbHRwUWpFZEVaR1N4UGtId2R1R2tQMHFVdzhFNDgKQVpiZWFQUHlGOEhBSkFvMmZwTVlnSktrVjdOQmZ3L2ZRakN2Z2dlUmFRYnQ4QlZYYkVCT3I4cWhQc1BvUXNMSQpvaUhvSDVNbU82K3NKaWt0ZFRIS3FOY202VjJaTytZZHFpUEtUUWRvRnFiMFdsbTlPQk1KMmJtanNrSHlPQjFECjZXNGVXUUtCZ1FERWY4bzdNZUxLditZdXliTW85MnZXaDhiODBsVDhyVGxYa1hDakU3TkRtU1FUbWZEOVFRNFIKeWJ4SHlmR2dwZFIzN1EraWwxWGxoSllmZERFOTNEZW5ZeXdQaUNyWnJNaVVWcWRRQW1JMGc2WjRCSi91RDNZNwpPc3JSUUhvL0VBSnc5aUdHeXVzUmpyNEpPMUFrWDZwbGo5VTU4ZWtIRStSMGh0RW5RUXRzaXc9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=
|
||||
122
terraform/stacks/k8s/main.tf
Normal file
122
terraform/stacks/k8s/main.tf
Normal file
@@ -0,0 +1,122 @@
|
||||
module "metallb_helm" {
|
||||
source = "../../modules/k8s/metallb/helm"
|
||||
|
||||
providers = {
|
||||
helm = helm
|
||||
}
|
||||
}
|
||||
|
||||
module "metallb_resources" {
|
||||
source = "../../modules/k8s/metallb/resources"
|
||||
|
||||
providers = {
|
||||
kubernetes = kubernetes
|
||||
}
|
||||
|
||||
addresses = ["192.168.0.230-192.168.0.250"]
|
||||
|
||||
depends_on = [module.metallb_helm]
|
||||
}
|
||||
|
||||
module "nginx_ingress" {
|
||||
source = "../../modules/k8s/nginx_ingress/helm"
|
||||
|
||||
pool_name = module.metallb_resources.pool_name
|
||||
|
||||
depends_on = [module.metallb_resources]
|
||||
}
|
||||
|
||||
# # ceph
|
||||
# module "ceph_csi_rbd" {
|
||||
# source = "../../modules/k8s/ceph/k8s-ceph-csi-rbd"
|
||||
|
||||
# providers = {
|
||||
# helm = helm
|
||||
# kubernetes = kubernetes
|
||||
# }
|
||||
|
||||
# namespace = var.ceph_csi_namespace
|
||||
# chart_version = var.ceph_csi_chart_version
|
||||
|
||||
# ceph_cluster_id = var.ceph_cluster_id
|
||||
# ceph_monitors = var.ceph_monitors
|
||||
# }
|
||||
|
||||
# module "ceph_rbd_storage" {
|
||||
# source = "../../modules/k8s/ceph/k8s-ceph-rbd-storage"
|
||||
|
||||
# providers = {
|
||||
# kubernetes = kubernetes
|
||||
# }
|
||||
|
||||
# namespace = var.ceph_csi_namespace
|
||||
# ceph_cluster_id = var.ceph_cluster_id
|
||||
# ceph_rbd_pool = var.ceph_rbd_pool
|
||||
|
||||
# ceph_user_id = var.ceph_user_id
|
||||
# ceph_user_key = var.ceph_user_key
|
||||
|
||||
# # чтобы Secret/SC создавались после установки CSI
|
||||
# depends_on = [module.ceph_csi_rbd]
|
||||
# }
|
||||
|
||||
module "openebs" {
|
||||
source = "../../modules/k8s/openebs"
|
||||
storageclass_name = "openebs-hostpath-custom"
|
||||
base_path = "/var/openebs/local/"
|
||||
}
|
||||
|
||||
module "crunchy_operator" {
|
||||
source = "../../modules/k8s/crunchy-data/operator"
|
||||
|
||||
providers = {
|
||||
kubernetes = kubernetes
|
||||
helm = helm
|
||||
}
|
||||
|
||||
namespace = var.crunchy_data_namespace
|
||||
chart_version = var.pgo_chart_version
|
||||
release_name = "pgo"
|
||||
single_namespace = true
|
||||
replicas = 1
|
||||
debug = false
|
||||
}
|
||||
|
||||
module "crunchy_postgres_cluster" {
|
||||
source = "../../modules/k8s/crunchy-data/postgres-cluster"
|
||||
|
||||
providers = {
|
||||
kubernetes = kubernetes
|
||||
}
|
||||
|
||||
namespace = module.crunchy_operator.namespace
|
||||
name = var.cluster_name
|
||||
storage_class_name = var.storage_class_name
|
||||
|
||||
postgres_version = 16
|
||||
instance_storage = "20Gi"
|
||||
backup_storage = "20Gi"
|
||||
|
||||
# важно: CRD должны появиться после установки оператора
|
||||
depends_on = [module.crunchy_operator]
|
||||
}
|
||||
|
||||
# valkey
|
||||
module "valkey" {
|
||||
source = "../../modules/k8s/valkey"
|
||||
|
||||
providers = {
|
||||
kubernetes = kubernetes
|
||||
helm = helm
|
||||
}
|
||||
|
||||
namespace = var.valkey_namespace
|
||||
create_namespace = true
|
||||
|
||||
release_name = var.release_name
|
||||
chart_version = var.chart_version
|
||||
|
||||
values = var.values
|
||||
|
||||
valkey_password = "password"
|
||||
}
|
||||
9
terraform/stacks/k8s/providers.tf
Normal file
9
terraform/stacks/k8s/providers.tf
Normal file
@@ -0,0 +1,9 @@
|
||||
provider "kubernetes" {
|
||||
config_path = var.kubeconfig_path
|
||||
}
|
||||
|
||||
provider "helm" {
|
||||
kubernetes = {
|
||||
config_path = var.kubeconfig_path
|
||||
}
|
||||
}
|
||||
84
terraform/stacks/k8s/variables.tf
Normal file
84
terraform/stacks/k8s/variables.tf
Normal file
@@ -0,0 +1,84 @@
|
||||
variable "kubeconfig_path" {
|
||||
type = string
|
||||
description = "Path to kubeconfig"
|
||||
}
|
||||
|
||||
# ceph
|
||||
variable "ceph_cluster_id" {
|
||||
type = string
|
||||
description = "Ceph FSID (ceph fsid)"
|
||||
}
|
||||
|
||||
variable "ceph_monitors" {
|
||||
type = list(string)
|
||||
description = "Ceph MON endpoints, e.g. [\"192.168.0.100:6789\", \"192.168.0.101:6789\"]"
|
||||
}
|
||||
|
||||
variable "ceph_rbd_pool" {
|
||||
type = string
|
||||
default = "k8s-rbd"
|
||||
}
|
||||
|
||||
variable "ceph_user_id" {
|
||||
type = string
|
||||
default = "k8s-rbd-csi" # без 'client.'
|
||||
}
|
||||
|
||||
variable "ceph_user_key" {
|
||||
type = string
|
||||
sensitive = true
|
||||
description = "Key from: ceph auth get client.k8s-rbd-csi"
|
||||
}
|
||||
|
||||
variable "ceph_csi_namespace" {
|
||||
type = string
|
||||
default = "ceph-csi"
|
||||
}
|
||||
|
||||
variable "ceph_csi_chart_version" {
|
||||
type = string
|
||||
default = "3.11.0"
|
||||
}
|
||||
|
||||
# crunchy-data
|
||||
variable "storage_class_name" {
|
||||
type = string
|
||||
description = "Твой Ceph RBD storageclass"
|
||||
default = "ceph-rbd"
|
||||
}
|
||||
|
||||
variable "crunchy_data_namespace" {
|
||||
type = string
|
||||
default = "postgres-operator"
|
||||
}
|
||||
|
||||
variable "pgo_chart_version" {
|
||||
type = string
|
||||
default = "6.0.0"
|
||||
}
|
||||
|
||||
variable "cluster_name" {
|
||||
type = string
|
||||
default = "hippo"
|
||||
}
|
||||
|
||||
# valkey
|
||||
variable "valkey_namespace" {
|
||||
type = string
|
||||
default = "valkey"
|
||||
}
|
||||
|
||||
variable "release_name" {
|
||||
type = string
|
||||
default = "valkey"
|
||||
}
|
||||
|
||||
variable "chart_version" {
|
||||
type = string
|
||||
default = "0.9.2"
|
||||
}
|
||||
|
||||
variable "values" {
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
14
terraform/stacks/k8s/versions.tf
Normal file
14
terraform/stacks/k8s/versions.tf
Normal file
@@ -0,0 +1,14 @@
|
||||
terraform {
|
||||
required_version = ">= 1.5.0"
|
||||
|
||||
required_providers {
|
||||
helm = {
|
||||
source = "hashicorp/helm"
|
||||
version = ">= 3.0.0"
|
||||
}
|
||||
kubernetes = {
|
||||
source = "hashicorp/kubernetes"
|
||||
version = ">= 2.0.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
41
terraform/stacks/powerdns/main.tf
Normal file
41
terraform/stacks/powerdns/main.tf
Normal file
@@ -0,0 +1,41 @@
|
||||
# ---------------------------
|
||||
# Zones (many)
|
||||
# ---------------------------
|
||||
module "zones" {
|
||||
for_each = var.zones
|
||||
source = "../../modules/powerdns/zone"
|
||||
|
||||
zone_name = each.key
|
||||
zone_kind = each.value.zone_kind
|
||||
soa_edit_api = each.value.soa_edit_api
|
||||
zone_nameservers = each.value.zone_nameservers
|
||||
}
|
||||
|
||||
# ---------------------------
|
||||
# Records (flatten -> many)
|
||||
# ---------------------------
|
||||
locals {
|
||||
records_flat = merge([
|
||||
for zone_name, z in var.zones : {
|
||||
for rec_key, rec in z.records :
|
||||
"${zone_name}::${rec_key}" => {
|
||||
zone_name = zone_name
|
||||
name = rec.name
|
||||
type = rec.type
|
||||
ttl = rec.ttl
|
||||
records = rec.records
|
||||
}
|
||||
}
|
||||
]...)
|
||||
}
|
||||
|
||||
module "records" {
|
||||
for_each = local.records_flat
|
||||
source = "../../modules/powerdns/record"
|
||||
|
||||
zone_name = module.zones[each.value.zone_name].name
|
||||
name = each.value.name
|
||||
type = each.value.type
|
||||
ttl = each.value.ttl
|
||||
records = each.value.records
|
||||
}
|
||||
4
terraform/stacks/powerdns/providers.tf
Normal file
4
terraform/stacks/powerdns/providers.tf
Normal file
@@ -0,0 +1,4 @@
|
||||
provider "powerdns" {
|
||||
server_url = var.pdns_server_url
|
||||
api_key = var.pdns_api_key
|
||||
}
|
||||
23
terraform/stacks/powerdns/variables.tf
Normal file
23
terraform/stacks/powerdns/variables.tf
Normal file
@@ -0,0 +1,23 @@
|
||||
variable "pdns_server_url" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "pdns_api_key" {
|
||||
type = string
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
variable "zones" {
|
||||
type = map(object({
|
||||
zone_kind = string
|
||||
soa_edit_api = string
|
||||
zone_nameservers = list(string)
|
||||
|
||||
records = map(object({
|
||||
name = string
|
||||
type = string
|
||||
ttl = number
|
||||
records = list(string)
|
||||
}))
|
||||
}))
|
||||
}
|
||||
10
terraform/stacks/powerdns/versions.tf
Normal file
10
terraform/stacks/powerdns/versions.tf
Normal file
@@ -0,0 +1,10 @@
|
||||
terraform {
|
||||
required_version = ">= 1.3.0"
|
||||
|
||||
required_providers {
|
||||
powerdns = {
|
||||
source = "pan-net/powerdns"
|
||||
# version = "1.5.0" # опционально: можно зафиксировать
|
||||
}
|
||||
}
|
||||
}
|
||||
37
terraform/stacks/proxmox/lxc/main.tf
Normal file
37
terraform/stacks/proxmox/lxc/main.tf
Normal file
@@ -0,0 +1,37 @@
|
||||
module "lxc_packer_main" {
|
||||
source = "../../../modules/proxmox/lxc"
|
||||
|
||||
vm_id = var.lxc_packer_main_vm_id
|
||||
hostname = var.lxc_packer_main_hostname
|
||||
target_node = var.target_node
|
||||
|
||||
template_file_id = var.lxc_template_file_id
|
||||
os_type = var.lxc_os_type
|
||||
|
||||
unprivileged = var.lxc_unprivileged
|
||||
nesting = var.lxc_nesting
|
||||
|
||||
cores = var.lxc_cores
|
||||
cpu_units = var.lxc_cpu_units
|
||||
memory = var.lxc_memory
|
||||
swap = var.lxc_swap
|
||||
|
||||
rootfs_storage = var.lxc_rootfs_storage
|
||||
rootfs_size_gib = var.lxc_rootfs_size_gib
|
||||
|
||||
bridge = var.bridge
|
||||
netif_name = var.lxc_netif_name
|
||||
firewall = var.lxc_firewall
|
||||
|
||||
ipv4_address = var.lxc_ipv4_address
|
||||
ipv4_gateway = var.lxc_ipv4_gateway
|
||||
|
||||
dns_domain = var.lxc_dns_domain
|
||||
dns_servers = var.lxc_dns_servers
|
||||
|
||||
started = var.lxc_started
|
||||
start_on_boot = var.lxc_start_on_boot
|
||||
|
||||
password = var.lxc_root_password
|
||||
ssh_public_keys = var.lxc_ssh_public_keys
|
||||
}
|
||||
10
terraform/stacks/proxmox/lxc/providers.tf
Executable file
10
terraform/stacks/proxmox/lxc/providers.tf
Executable file
@@ -0,0 +1,10 @@
|
||||
provider "proxmox" {
|
||||
endpoint = var.pm_api_url
|
||||
api_token = var.pm_api_token
|
||||
insecure = true
|
||||
|
||||
ssh {
|
||||
username = var.pm_user
|
||||
password = var.pm_password
|
||||
}
|
||||
}
|
||||
137
terraform/stacks/proxmox/lxc/variables.tf
Normal file
137
terraform/stacks/proxmox/lxc/variables.tf
Normal file
@@ -0,0 +1,137 @@
|
||||
# --- Proxmox provider creds ---
|
||||
variable "pm_api_url" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "pm_api_token" {
|
||||
type = string
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
variable "pm_user" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "pm_password" {
|
||||
type = string
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
# --- Target infra ---
|
||||
variable "target_node" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "bridge" {
|
||||
type = string
|
||||
default = "vmbr0"
|
||||
}
|
||||
|
||||
# --- LXC конкретный контейнер ---
|
||||
variable "lxc_packer_main_vm_id" {
|
||||
type = number
|
||||
}
|
||||
|
||||
variable "lxc_packer_main_hostname" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "lxc_template_file_id" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "lxc_os_type" {
|
||||
type = string
|
||||
default = "debian"
|
||||
}
|
||||
|
||||
variable "lxc_unprivileged" {
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
|
||||
variable "lxc_nesting" {
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
|
||||
variable "lxc_cores" {
|
||||
type = number
|
||||
default = 1
|
||||
}
|
||||
|
||||
variable "lxc_cpu_units" {
|
||||
type = number
|
||||
default = 1024
|
||||
}
|
||||
|
||||
variable "lxc_memory" {
|
||||
type = number
|
||||
default = 512
|
||||
}
|
||||
|
||||
variable "lxc_swap" {
|
||||
type = number
|
||||
default = 512
|
||||
}
|
||||
|
||||
variable "lxc_rootfs_storage" {
|
||||
type = string
|
||||
default = "local-lvm"
|
||||
}
|
||||
|
||||
variable "lxc_rootfs_size_gib" {
|
||||
type = number
|
||||
default = 8
|
||||
}
|
||||
|
||||
variable "lxc_netif_name" {
|
||||
type = string
|
||||
default = "eth0"
|
||||
}
|
||||
|
||||
variable "lxc_firewall" {
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
|
||||
variable "lxc_ipv4_address" {
|
||||
type = string
|
||||
default = "dhcp"
|
||||
}
|
||||
|
||||
variable "lxc_ipv4_gateway" {
|
||||
type = string
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "lxc_dns_domain" {
|
||||
type = string
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "lxc_dns_servers" {
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "lxc_started" {
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
|
||||
variable "lxc_start_on_boot" {
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
|
||||
variable "lxc_root_password" {
|
||||
type = string
|
||||
sensitive = true
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "lxc_ssh_public_keys" {
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
10
terraform/stacks/proxmox/lxc/versions.tf
Normal file
10
terraform/stacks/proxmox/lxc/versions.tf
Normal file
@@ -0,0 +1,10 @@
|
||||
terraform {
|
||||
required_version = ">= 1.6"
|
||||
|
||||
required_providers {
|
||||
proxmox = {
|
||||
source = "bpg/proxmox"
|
||||
version = "0.86.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
36
terraform/stacks/proxmox/vm/cloud-init/user-data.yaml.tpl
Normal file
36
terraform/stacks/proxmox/vm/cloud-init/user-data.yaml.tpl
Normal file
@@ -0,0 +1,36 @@
|
||||
#cloud-config
|
||||
hostname: ${hostname}
|
||||
manage_etc_hosts: true
|
||||
|
||||
package_update: true
|
||||
package_upgrade: true
|
||||
|
||||
packages:
|
||||
- parted
|
||||
|
||||
# user
|
||||
users:
|
||||
- name: "adminuser"
|
||||
groups: sudo
|
||||
sudo: ALL=(ALL) NOPASSWD:ALL
|
||||
lock_passwd: false
|
||||
passwd: "$6$qL4GPP3AhSodbF9U$Lu4.VSpCSlAVPNIZyPNme0AH8HhbVYE6SAm3P3Er7KSLIYydj799tZBz/n6NRzzRYhyQh9a4h8m8WCbjw2nXg1"
|
||||
shell: /bin/bash
|
||||
ssh_authorized_keys:
|
||||
- "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBcTy4Zcj3MHkW7XvnZhakl64vZXnjzDJymYlo+Ax8FM dev-kyiv01-vm-default-main-01-adminuser"
|
||||
|
||||
ssh_pwauth: false
|
||||
|
||||
runcmd:
|
||||
- |
|
||||
set -euxo pipefail
|
||||
|
||||
# растянуть extended + LVM partition до конца диска
|
||||
growpart /dev/sda 2 || true
|
||||
growpart /dev/sda 5 || true
|
||||
parted -s /dev/sda "resizepart 2 100%" "resizepart 5 100%" || true
|
||||
partprobe /dev/sda || true
|
||||
|
||||
# растянуть PV -> LV(root) -> FS
|
||||
pvresize /dev/sda5
|
||||
lvextend -l +100%FREE -r /dev/vg0/root
|
||||
72
terraform/stacks/proxmox/vm/locals.tf
Normal file
72
terraform/stacks/proxmox/vm/locals.tf
Normal file
@@ -0,0 +1,72 @@
|
||||
locals {
|
||||
vms = {
|
||||
dev_kyiv01_vm_dns_main_01 = {
|
||||
name = "dev-kyiv01-vm-dns-main-01"
|
||||
cpu = 2
|
||||
cpu_type = "x86-64-v2"
|
||||
memory = 3072
|
||||
disk_size = 20
|
||||
mac = "02:7A:4C:11:90:64"
|
||||
}
|
||||
|
||||
dev_kyiv01_vm_ntp_main_01 = {
|
||||
name = "dev-kyiv01-vm-ntp-main-01"
|
||||
cpu = 1
|
||||
cpu_type = "x86-64-v2"
|
||||
memory = 2048
|
||||
disk_size = 8
|
||||
mac = "02:7A:4C:11:90:65"
|
||||
}
|
||||
|
||||
dev_kyiv01_vm_ceph_main_01 = {
|
||||
name = "dev-kyiv01-vm-ceph-main-01"
|
||||
cpu = 2
|
||||
cpu_type = "x86-64-v2"
|
||||
memory = 4096
|
||||
disk_size = 30
|
||||
mac = "02:7A:4C:11:90:66"
|
||||
osd_storage = "ceph-osd"
|
||||
osd_disks = [150, 150]
|
||||
}
|
||||
|
||||
dev_kyiv01_vm_ceph_main_02 = {
|
||||
name = "dev-kyiv01-vm-ceph-main-02"
|
||||
cpu = 2
|
||||
cpu_type = "x86-64-v2"
|
||||
memory = 4096
|
||||
disk_size = 30
|
||||
mac = "02:7A:4C:11:90:67"
|
||||
osd_storage = "ceph-osd"
|
||||
osd_disks = [150, 150]
|
||||
}
|
||||
|
||||
dev_kyiv01_vm_ceph_main_03 = {
|
||||
name = "dev-kyiv01-vm-ceph-main-03"
|
||||
cpu = 2
|
||||
cpu_type = "x86-64-v2"
|
||||
memory = 4096
|
||||
disk_size = 30
|
||||
mac = "02:7A:4C:11:90:68"
|
||||
osd_storage = "ceph-osd"
|
||||
osd_disks = [150, 150]
|
||||
}
|
||||
|
||||
dev_kyiv01_vm_k8s_master_01 = {
|
||||
name = "dev-kyiv01-vm-k8s-master-01"
|
||||
cpu = 2
|
||||
cpu_type = "x86-64-v2"
|
||||
memory = 4096
|
||||
disk_size = 40
|
||||
mac = "02:7A:4C:11:90:69"
|
||||
}
|
||||
|
||||
dev_kyiv01_vm_k8s_worker_01 = {
|
||||
name = "dev-kyiv01-vm-k8s-worker-01"
|
||||
cpu = 4
|
||||
cpu_type = "x86-64-v2"
|
||||
memory = 8192
|
||||
disk_size = 60
|
||||
mac = "02:7A:4C:11:90:6A"
|
||||
}
|
||||
}
|
||||
}
|
||||
41
terraform/stacks/proxmox/vm/main.tf
Normal file
41
terraform/stacks/proxmox/vm/main.tf
Normal file
@@ -0,0 +1,41 @@
|
||||
# 1) Для каждой VM создаём snippet user-data (cloud-init)
|
||||
resource "proxmox_virtual_environment_file" "user_data" {
|
||||
for_each = local.vms
|
||||
|
||||
content_type = "snippets"
|
||||
datastore_id = var.snippets_storage
|
||||
node_name = var.target_node
|
||||
|
||||
source_raw {
|
||||
data = templatefile("${path.module}/cloud-init/user-data.yaml.tpl", {
|
||||
hostname = each.value.name
|
||||
})
|
||||
|
||||
file_name = "user-data-${each.value.name}.yaml"
|
||||
}
|
||||
}
|
||||
|
||||
# 2) Создаём VM-ки и подцепляем user-data файл
|
||||
module "vm" {
|
||||
source = "../../../modules/proxmox/vm"
|
||||
for_each = local.vms
|
||||
|
||||
name = each.value.name
|
||||
target_node = var.target_node
|
||||
template_id = var.template_id
|
||||
|
||||
cpu = each.value.cpu
|
||||
cpu_type = try(each.value.cpu_type, "qemu64")
|
||||
memory = each.value.memory
|
||||
|
||||
disk_size = each.value.disk_size
|
||||
storage = var.storage
|
||||
bridge = var.bridge
|
||||
|
||||
osd_storage = try(each.value.osd_storage, null)
|
||||
osd_disks = try(each.value.osd_disks, [])
|
||||
|
||||
user_data_file_id = proxmox_virtual_environment_file.user_data[each.key].id
|
||||
|
||||
mac_address = each.value.mac
|
||||
}
|
||||
17
terraform/stacks/proxmox/vm/providers.tf
Normal file
17
terraform/stacks/proxmox/vm/providers.tf
Normal file
@@ -0,0 +1,17 @@
|
||||
provider "proxmox" {
|
||||
endpoint = var.pm_api_url
|
||||
api_token = var.pm_api_token
|
||||
insecure = true
|
||||
|
||||
ssh {
|
||||
agent = false
|
||||
username = "root"
|
||||
private_key = file("/workspaces/infrastructure/.ssh/dev-kyiv01-proxmox-main-01")
|
||||
|
||||
node {
|
||||
name = "proxmox-main-kyiv-01"
|
||||
address = "176.36.225.227"
|
||||
port = 25105
|
||||
}
|
||||
}
|
||||
}
|
||||
50
terraform/stacks/proxmox/vm/variables.tf
Normal file
50
terraform/stacks/proxmox/vm/variables.tf
Normal file
@@ -0,0 +1,50 @@
|
||||
variable "pm_api_url" {
|
||||
type = string
|
||||
description = "Proxmox API endpoint, e.g. https://proxmox:8006/api2/json"
|
||||
}
|
||||
|
||||
variable "pm_api_token" {
|
||||
type = string
|
||||
description = "Proxmox API token: root@pam!terraform=..."
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
variable "pm_user" {
|
||||
type = string
|
||||
description = "SSH username for Proxmox node"
|
||||
default = "root"
|
||||
}
|
||||
|
||||
variable "pm_password" {
|
||||
type = string
|
||||
description = "SSH password for Proxmox node"
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
variable "target_node" {
|
||||
type = string
|
||||
description = "Target Proxmox node name"
|
||||
}
|
||||
|
||||
variable "template_id" {
|
||||
type = number
|
||||
description = "Template VM ID to clone from"
|
||||
}
|
||||
|
||||
variable "storage" {
|
||||
type = string
|
||||
description = "Default datastore for OS disk"
|
||||
default = "local-lvm"
|
||||
}
|
||||
|
||||
variable "bridge" {
|
||||
type = string
|
||||
description = "Default VM bridge"
|
||||
default = "vmbr0"
|
||||
}
|
||||
|
||||
variable "snippets_storage" {
|
||||
type = string
|
||||
description = "Datastore where 'snippets' content is enabled (usually 'local')"
|
||||
default = "local"
|
||||
}
|
||||
10
terraform/stacks/proxmox/vm/versions.tf
Normal file
10
terraform/stacks/proxmox/vm/versions.tf
Normal file
@@ -0,0 +1,10 @@
|
||||
terraform {
|
||||
required_version = ">= 1.6"
|
||||
|
||||
required_providers {
|
||||
proxmox = {
|
||||
source = "bpg/proxmox"
|
||||
version = "0.86.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user