This commit is contained in:
Hrankin, Aleksandr (contracted)
2026-02-19 11:34:13 +00:00
commit f243f440c3
191 changed files with 6183 additions and 0 deletions

View File

@@ -0,0 +1,18 @@
apiVersion: v1
clusters:
- cluster:
insecure-skip-tls-verify: true
server: https://localhost:10563
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: kubernetes-admin
name: kubernetes-admin@kubernetes
current-context: kubernetes-admin@kubernetes
kind: Config
users:
- name: kubernetes-admin
user:
client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURLVENDQWhHZ0F3SUJBZ0lJYVI1WXRlRHdabjR3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB5TmpBeE1EUXhOak0xTXpSYUZ3MHlOekF4TURReE5qUXdNelJhTUR3eApIekFkQmdOVkJBb1RGbXQxWW1WaFpHMDZZMngxYzNSbGNpMWhaRzFwYm5NeEdUQVhCZ05WQkFNVEVHdDFZbVZ5CmJtVjBaWE10WVdSdGFXNHdnZ0VpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElCRHdBd2dnRUtBb0lCQVFESEN4TmgKS3JBQXdPd2ZjK3U3NW1jaUU4RVRwaGM4blNkRWxtVXNJWFdINE5YWkxCK2dzWGtOdzc2NXBxcWkvVjU4cTI0egpVNHJrOE9xRHFoVmFYT2psZmNFSTh1blZaM3BsVEhGRS9uU00rZWkwOUpEVGpzUlNBR2JTWU9iMnFHWWtpTC9sCnlKOCtNNTR0Q0FMZWRtWWk1SGJOdHpoOFFsWHdvK2ozMHRiU1QvSmtGbXNKaTFubk50Q29KYlRSR3ZNVmFEaWkKZS91cDJzcU8rc3ZHS2RxL2E5NStROE9ST1prL3JuWHVqTzZxcjNUMWNnTmJPQlVLdDNGQ1pXK3gyamRwRzRPRgpqUVc4cUs2eHNKMFgrZmh0MHNMZC9NU1pKajdNL1VBalhYb3N6Zm9qS29IMUd4dHZxU2RCTXFLUjQ2T1ZmVjFhCldhSENvLzIzUnJJdUJPTmxBZ01CQUFHalZqQlVNQTRHQTFVZER3RUIvd1FFQXdJRm9EQVRCZ05WSFNVRUREQUsKQmdnckJnRUZCUWNEQWpBTUJnTlZIUk1CQWY4RUFqQUFNQjhHQTFVZEl3UVlNQmFBRkE1SkFIdkhVZEhURFNPRwpmdmdYR1k1VHkzU3BNQTBHQ1NxR1NJYjNEUUVCQ3dVQUE0SUJBUUJBTHRKMXJHaUZMaU83NmtZZDBDaVNvM0FICmYxbHdWRjJLREExMENTS2FqdmR2QmZlajI5cm9vYm9mMGM5UHVCVWtNYzR2ZitoeEY1a0lhK21BM3FDRmRFK2cKbW1VUVlFdFJXWWZRTmREYStWbTFVSVBJOGpUOXgvSWRYanpvY0UzL1FQZ0JBVEFTMVRmYVBJRktLZU9qMy9sNApDS0UwMks2RklzUklTVVhsMVdnS093SGxrOEwyMThsUTg0WVFZNG4yd1FSNzM3eTdUTnRLZ3BjeU5VN1ZLdFhnCnQ2Z1p4NkxqbnRVZGRzTlkyazg5Q3dmM0lUSENqN040SDE5Mll3VFBZajd0NkI5Q1Y4SXVaZEtKaWpFNkFYbHMKU2J0WjRYWStiUTdGaWIwM25CbTRSSXdMeEdVV3JMbkFnYzJBRnFGK29xSmc5SFFzdEgxVS8rOGhwWkkzCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBeHdzVFlTcXdBTURzSDNQcnUrWm5JaFBCRTZZWFBKMG5SSlpsTENGMWgrRFYyU3dmCm9MRjVEY08rdWFhcW92MWVmS3R1TTFPSzVQRHFnNm9WV2x6bzVYM0JDUExwMVdkNlpVeHhSUDUwalBub3RQU1EKMDQ3RVVnQm0wbURtOXFobUpJaS81Y2lmUGpPZUxRZ0MzblptSXVSMnpiYzRmRUpWOEtQbzk5TFcway95WkJacgpDWXRaNXpiUXFDVzAwUnJ6RldnNG9udjdxZHJLanZyTHhpbmF2MnZlZmtQRGtUbVpQNjUxN296dXFxOTA5WElECld6Z1ZDcmR4UW1WdnNkbzNhUnVEaFkwRnZLaXVzYkNkRi9uNGJkTEMzZnpFbVNZK3pQMUFJMTE2TE0zNkl5cUIKOVJzYmI2a25RVEtpa2VPamxYMWRXbG1od3FQOXQwYXlMZ1RqWlFJREFRQUJBb0lCQUdIU2hxNjlmUXlSeERwZApEV0VtaGs5UUtCY1JET0NFVi9VMGVQcXNZd2ZwcncveHlJV0FVMzg1VWJZS1BGaW9iMVNVS0MvZmdpYXNaSVZiCkJPMkpOZ2o3dWZBOCtrRWdmeDNqNk5OQXYyVTl1V21kdk1QTXFaMm5odUVrTUw3TzJveGdEUGZRbHJyS1FaWGUKRnhMZ1c2Z1FZbmNOOGh2WHVrYnZONkw4b3dsdTFOc01HVFJPdG10NEQ0WVptSnlGKzNsakZQcGF3TFlZL3M5awp5TGVaRXBDd2VCdEpDS1ZyODhaMXVVaVh2Mzg0cGEzMVA2VjFnRUt5SFQya3lGUXFvdWNLUDE0Y0FrazNyb0JGCkJ0cjc1WHBjUHYvZGExY2gvU3VQZDdscnV4UUtpZ1dWOWtNZG1TczVON0c2Rm5ZYS9jbnpxUWovZFNmV3lMUkgKRHZmTUN3MENnWUVBMDc4VXZjUVU2aUZBMm1ZLzNFQW5Zbmg1UzhJTE5OSXVQY1pMTkhqSVp5WGlyRFJ4VjRKNApXMWlZdWhUK0lVVFkwYWptVmhLUStMTkhJK0hzTkZOL2svdmM0cTVaa0czWUlMNk5pbWd3Y3FCNjVnbUMrNlo2ClJJQ3Y3YnBkUm9mYTdCMit3TjcxeEx1S282d2RyblArYmNKbzhaY09LQmYvRDlXa0RmNlZUM3NDZ1lFQThLUlkKNDZRWDMxYlRxSFhTWGhQSHV6QWdvMEFHd2JPRlAxT2pPeG4xVFBWQnNHa2N5dnhwOGtYNUlYS2ZNdExZWnpUbQpqdmpjV1dlNHFZTlZiUTF2VVpxT05aSUVrYjZNbGF6NW0xaVZMa3FucktubkJaaHRNNGtyMzhTUEpPY0dZazlHClVlaDBFZmhOZ3Y2Z1VtTTFBSUJTR1NVcjc1OHUvOFdrMzNCL3NwOENnWUJoQUsxNHpjWWpCLzdVem5pODVxcmsKUW5xV3lSc25KSTVJZ0huZFhPTzUxVEpGWDNUNCtPMDRNNXNyekFncnA0V0liczZ1YWF6K01lc0tOaXBtUWtZMAp2ZklQNm4xZlcrTGlCVW1FT1h6UVZsSlc1YzZhaUVhRThVc25KZlFySm51VkpYOUlqaHVhOTZ0b2xhVzNVSzRqCkRDZlZYVFVBQ3hZdTQ5bFhDK1RNMXdLQmdRRE43cGJ6R0RZbHRwUWpFZEVaR1N4UGtId2R1R2tQMHFVdzhFNDgKQVpiZWFQUHlGOEhBSkFvMmZwTVlnSktrVjdOQmZ3L2ZRakN2Z2dlUmFRYnQ4QlZYYkVCT3I4cWhQc1BvUXNMSQpvaUhvSDVNbU82K3NKaWt0ZFRIS3FOY202VjJaTytZZHFpUEtUUWRvRnFiMFdsbTlPQk1KMmJtanNrSHlPQjFECjZXNGVXUUtCZ1FERWY4bzdNZUxLditZdXliTW85MnZXaDhiODBsVDhyVGxYa1hDakU3TkRtU1FUbWZEOVFRNFIKeWJ4SHlmR2dwZFIzN1EraWwxWGxoSllmZERFOTNEZW5ZeXdQaUNyWnJNaVVWcWRRQW1JMGc2WjRCSi91RDNZNwpPc3JSUUhvL0VBSnc5aUdHeXVzUmpyNEpPMUFrWDZwbGo5VTU4ZWtIRStSMGh0RW5RUXRzaXc9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=

View File

@@ -0,0 +1,122 @@
module "metallb_helm" {
source = "../../modules/k8s/metallb/helm"
providers = {
helm = helm
}
}
module "metallb_resources" {
source = "../../modules/k8s/metallb/resources"
providers = {
kubernetes = kubernetes
}
addresses = ["192.168.0.230-192.168.0.250"]
depends_on = [module.metallb_helm]
}
module "nginx_ingress" {
source = "../../modules/k8s/nginx_ingress/helm"
pool_name = module.metallb_resources.pool_name
depends_on = [module.metallb_resources]
}
# # ceph
# module "ceph_csi_rbd" {
# source = "../../modules/k8s/ceph/k8s-ceph-csi-rbd"
# providers = {
# helm = helm
# kubernetes = kubernetes
# }
# namespace = var.ceph_csi_namespace
# chart_version = var.ceph_csi_chart_version
# ceph_cluster_id = var.ceph_cluster_id
# ceph_monitors = var.ceph_monitors
# }
# module "ceph_rbd_storage" {
# source = "../../modules/k8s/ceph/k8s-ceph-rbd-storage"
# providers = {
# kubernetes = kubernetes
# }
# namespace = var.ceph_csi_namespace
# ceph_cluster_id = var.ceph_cluster_id
# ceph_rbd_pool = var.ceph_rbd_pool
# ceph_user_id = var.ceph_user_id
# ceph_user_key = var.ceph_user_key
# # чтобы Secret/SC создавались после установки CSI
# depends_on = [module.ceph_csi_rbd]
# }
module "openebs" {
source = "../../modules/k8s/openebs"
storageclass_name = "openebs-hostpath-custom"
base_path = "/var/openebs/local/"
}
module "crunchy_operator" {
source = "../../modules/k8s/crunchy-data/operator"
providers = {
kubernetes = kubernetes
helm = helm
}
namespace = var.crunchy_data_namespace
chart_version = var.pgo_chart_version
release_name = "pgo"
single_namespace = true
replicas = 1
debug = false
}
module "crunchy_postgres_cluster" {
source = "../../modules/k8s/crunchy-data/postgres-cluster"
providers = {
kubernetes = kubernetes
}
namespace = module.crunchy_operator.namespace
name = var.cluster_name
storage_class_name = var.storage_class_name
postgres_version = 16
instance_storage = "20Gi"
backup_storage = "20Gi"
# важно: CRD должны появиться после установки оператора
depends_on = [module.crunchy_operator]
}
# valkey
module "valkey" {
source = "../../modules/k8s/valkey"
providers = {
kubernetes = kubernetes
helm = helm
}
namespace = var.valkey_namespace
create_namespace = true
release_name = var.release_name
chart_version = var.chart_version
values = var.values
valkey_password = "password"
}

View File

@@ -0,0 +1,9 @@
provider "kubernetes" {
config_path = var.kubeconfig_path
}
provider "helm" {
kubernetes = {
config_path = var.kubeconfig_path
}
}

View File

@@ -0,0 +1,84 @@
variable "kubeconfig_path" {
type = string
description = "Path to kubeconfig"
}
# ceph
variable "ceph_cluster_id" {
type = string
description = "Ceph FSID (ceph fsid)"
}
variable "ceph_monitors" {
type = list(string)
description = "Ceph MON endpoints, e.g. [\"192.168.0.100:6789\", \"192.168.0.101:6789\"]"
}
variable "ceph_rbd_pool" {
type = string
default = "k8s-rbd"
}
variable "ceph_user_id" {
type = string
default = "k8s-rbd-csi" # без 'client.'
}
variable "ceph_user_key" {
type = string
sensitive = true
description = "Key from: ceph auth get client.k8s-rbd-csi"
}
variable "ceph_csi_namespace" {
type = string
default = "ceph-csi"
}
variable "ceph_csi_chart_version" {
type = string
default = "3.11.0"
}
# crunchy-data
variable "storage_class_name" {
type = string
description = "Твой Ceph RBD storageclass"
default = "ceph-rbd"
}
variable "crunchy_data_namespace" {
type = string
default = "postgres-operator"
}
variable "pgo_chart_version" {
type = string
default = "6.0.0"
}
variable "cluster_name" {
type = string
default = "hippo"
}
# valkey
variable "valkey_namespace" {
type = string
default = "valkey"
}
variable "release_name" {
type = string
default = "valkey"
}
variable "chart_version" {
type = string
default = "0.9.2"
}
variable "values" {
type = list(string)
default = []
}

View File

@@ -0,0 +1,14 @@
terraform {
required_version = ">= 1.5.0"
required_providers {
helm = {
source = "hashicorp/helm"
version = ">= 3.0.0"
}
kubernetes = {
source = "hashicorp/kubernetes"
version = ">= 2.0.0"
}
}
}

View File

@@ -0,0 +1,41 @@
# ---------------------------
# Zones (many)
# ---------------------------
module "zones" {
for_each = var.zones
source = "../../modules/powerdns/zone"
zone_name = each.key
zone_kind = each.value.zone_kind
soa_edit_api = each.value.soa_edit_api
zone_nameservers = each.value.zone_nameservers
}
# ---------------------------
# Records (flatten -> many)
# ---------------------------
locals {
records_flat = merge([
for zone_name, z in var.zones : {
for rec_key, rec in z.records :
"${zone_name}::${rec_key}" => {
zone_name = zone_name
name = rec.name
type = rec.type
ttl = rec.ttl
records = rec.records
}
}
]...)
}
module "records" {
for_each = local.records_flat
source = "../../modules/powerdns/record"
zone_name = module.zones[each.value.zone_name].name
name = each.value.name
type = each.value.type
ttl = each.value.ttl
records = each.value.records
}

View File

@@ -0,0 +1,4 @@
provider "powerdns" {
server_url = var.pdns_server_url
api_key = var.pdns_api_key
}

View File

@@ -0,0 +1,23 @@
variable "pdns_server_url" {
type = string
}
variable "pdns_api_key" {
type = string
sensitive = true
}
variable "zones" {
type = map(object({
zone_kind = string
soa_edit_api = string
zone_nameservers = list(string)
records = map(object({
name = string
type = string
ttl = number
records = list(string)
}))
}))
}

View File

@@ -0,0 +1,10 @@
terraform {
required_version = ">= 1.3.0"
required_providers {
powerdns = {
source = "pan-net/powerdns"
# version = "1.5.0" # опционально: можно зафиксировать
}
}
}

View File

@@ -0,0 +1,37 @@
module "lxc_packer_main" {
source = "../../../modules/proxmox/lxc"
vm_id = var.lxc_packer_main_vm_id
hostname = var.lxc_packer_main_hostname
target_node = var.target_node
template_file_id = var.lxc_template_file_id
os_type = var.lxc_os_type
unprivileged = var.lxc_unprivileged
nesting = var.lxc_nesting
cores = var.lxc_cores
cpu_units = var.lxc_cpu_units
memory = var.lxc_memory
swap = var.lxc_swap
rootfs_storage = var.lxc_rootfs_storage
rootfs_size_gib = var.lxc_rootfs_size_gib
bridge = var.bridge
netif_name = var.lxc_netif_name
firewall = var.lxc_firewall
ipv4_address = var.lxc_ipv4_address
ipv4_gateway = var.lxc_ipv4_gateway
dns_domain = var.lxc_dns_domain
dns_servers = var.lxc_dns_servers
started = var.lxc_started
start_on_boot = var.lxc_start_on_boot
password = var.lxc_root_password
ssh_public_keys = var.lxc_ssh_public_keys
}

View File

@@ -0,0 +1,10 @@
provider "proxmox" {
endpoint = var.pm_api_url
api_token = var.pm_api_token
insecure = true
ssh {
username = var.pm_user
password = var.pm_password
}
}

View File

@@ -0,0 +1,137 @@
# --- Proxmox provider creds ---
variable "pm_api_url" {
type = string
}
variable "pm_api_token" {
type = string
sensitive = true
}
variable "pm_user" {
type = string
}
variable "pm_password" {
type = string
sensitive = true
}
# --- Target infra ---
variable "target_node" {
type = string
}
variable "bridge" {
type = string
default = "vmbr0"
}
# --- LXC конкретный контейнер ---
variable "lxc_packer_main_vm_id" {
type = number
}
variable "lxc_packer_main_hostname" {
type = string
}
variable "lxc_template_file_id" {
type = string
}
variable "lxc_os_type" {
type = string
default = "debian"
}
variable "lxc_unprivileged" {
type = bool
default = true
}
variable "lxc_nesting" {
type = bool
default = true
}
variable "lxc_cores" {
type = number
default = 1
}
variable "lxc_cpu_units" {
type = number
default = 1024
}
variable "lxc_memory" {
type = number
default = 512
}
variable "lxc_swap" {
type = number
default = 512
}
variable "lxc_rootfs_storage" {
type = string
default = "local-lvm"
}
variable "lxc_rootfs_size_gib" {
type = number
default = 8
}
variable "lxc_netif_name" {
type = string
default = "eth0"
}
variable "lxc_firewall" {
type = bool
default = true
}
variable "lxc_ipv4_address" {
type = string
default = "dhcp"
}
variable "lxc_ipv4_gateway" {
type = string
default = null
}
variable "lxc_dns_domain" {
type = string
default = null
}
variable "lxc_dns_servers" {
type = list(string)
default = []
}
variable "lxc_started" {
type = bool
default = true
}
variable "lxc_start_on_boot" {
type = bool
default = true
}
variable "lxc_root_password" {
type = string
sensitive = true
default = null
}
variable "lxc_ssh_public_keys" {
type = list(string)
default = []
}

View File

@@ -0,0 +1,10 @@
terraform {
required_version = ">= 1.6"
required_providers {
proxmox = {
source = "bpg/proxmox"
version = "0.86.0"
}
}
}

View File

@@ -0,0 +1,36 @@
#cloud-config
hostname: ${hostname}
manage_etc_hosts: true
package_update: true
package_upgrade: true
packages:
- parted
# user
users:
- name: "adminuser"
groups: sudo
sudo: ALL=(ALL) NOPASSWD:ALL
lock_passwd: false
passwd: "$6$qL4GPP3AhSodbF9U$Lu4.VSpCSlAVPNIZyPNme0AH8HhbVYE6SAm3P3Er7KSLIYydj799tZBz/n6NRzzRYhyQh9a4h8m8WCbjw2nXg1"
shell: /bin/bash
ssh_authorized_keys:
- "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBcTy4Zcj3MHkW7XvnZhakl64vZXnjzDJymYlo+Ax8FM dev-kyiv01-vm-default-main-01-adminuser"
ssh_pwauth: false
runcmd:
- |
set -euxo pipefail
# растянуть extended + LVM partition до конца диска
growpart /dev/sda 2 || true
growpart /dev/sda 5 || true
parted -s /dev/sda "resizepart 2 100%" "resizepart 5 100%" || true
partprobe /dev/sda || true
# растянуть PV -> LV(root) -> FS
pvresize /dev/sda5
lvextend -l +100%FREE -r /dev/vg0/root

View File

@@ -0,0 +1,72 @@
locals {
vms = {
dev_kyiv01_vm_dns_main_01 = {
name = "dev-kyiv01-vm-dns-main-01"
cpu = 2
cpu_type = "x86-64-v2"
memory = 3072
disk_size = 20
mac = "02:7A:4C:11:90:64"
}
dev_kyiv01_vm_ntp_main_01 = {
name = "dev-kyiv01-vm-ntp-main-01"
cpu = 1
cpu_type = "x86-64-v2"
memory = 2048
disk_size = 8
mac = "02:7A:4C:11:90:65"
}
dev_kyiv01_vm_ceph_main_01 = {
name = "dev-kyiv01-vm-ceph-main-01"
cpu = 2
cpu_type = "x86-64-v2"
memory = 4096
disk_size = 30
mac = "02:7A:4C:11:90:66"
osd_storage = "ceph-osd"
osd_disks = [150, 150]
}
dev_kyiv01_vm_ceph_main_02 = {
name = "dev-kyiv01-vm-ceph-main-02"
cpu = 2
cpu_type = "x86-64-v2"
memory = 4096
disk_size = 30
mac = "02:7A:4C:11:90:67"
osd_storage = "ceph-osd"
osd_disks = [150, 150]
}
dev_kyiv01_vm_ceph_main_03 = {
name = "dev-kyiv01-vm-ceph-main-03"
cpu = 2
cpu_type = "x86-64-v2"
memory = 4096
disk_size = 30
mac = "02:7A:4C:11:90:68"
osd_storage = "ceph-osd"
osd_disks = [150, 150]
}
dev_kyiv01_vm_k8s_master_01 = {
name = "dev-kyiv01-vm-k8s-master-01"
cpu = 2
cpu_type = "x86-64-v2"
memory = 4096
disk_size = 40
mac = "02:7A:4C:11:90:69"
}
dev_kyiv01_vm_k8s_worker_01 = {
name = "dev-kyiv01-vm-k8s-worker-01"
cpu = 4
cpu_type = "x86-64-v2"
memory = 8192
disk_size = 60
mac = "02:7A:4C:11:90:6A"
}
}
}

View File

@@ -0,0 +1,41 @@
# 1) Для каждой VM создаём snippet user-data (cloud-init)
resource "proxmox_virtual_environment_file" "user_data" {
for_each = local.vms
content_type = "snippets"
datastore_id = var.snippets_storage
node_name = var.target_node
source_raw {
data = templatefile("${path.module}/cloud-init/user-data.yaml.tpl", {
hostname = each.value.name
})
file_name = "user-data-${each.value.name}.yaml"
}
}
# 2) Создаём VM-ки и подцепляем user-data файл
module "vm" {
source = "../../../modules/proxmox/vm"
for_each = local.vms
name = each.value.name
target_node = var.target_node
template_id = var.template_id
cpu = each.value.cpu
cpu_type = try(each.value.cpu_type, "qemu64")
memory = each.value.memory
disk_size = each.value.disk_size
storage = var.storage
bridge = var.bridge
osd_storage = try(each.value.osd_storage, null)
osd_disks = try(each.value.osd_disks, [])
user_data_file_id = proxmox_virtual_environment_file.user_data[each.key].id
mac_address = each.value.mac
}

View File

@@ -0,0 +1,17 @@
provider "proxmox" {
endpoint = var.pm_api_url
api_token = var.pm_api_token
insecure = true
ssh {
agent = false
username = "root"
private_key = file("/workspaces/infrastructure/.ssh/dev-kyiv01-proxmox-main-01")
node {
name = "proxmox-main-kyiv-01"
address = "176.36.225.227"
port = 25105
}
}
}

View File

@@ -0,0 +1,50 @@
variable "pm_api_url" {
type = string
description = "Proxmox API endpoint, e.g. https://proxmox:8006/api2/json"
}
variable "pm_api_token" {
type = string
description = "Proxmox API token: root@pam!terraform=..."
sensitive = true
}
variable "pm_user" {
type = string
description = "SSH username for Proxmox node"
default = "root"
}
variable "pm_password" {
type = string
description = "SSH password for Proxmox node"
sensitive = true
}
variable "target_node" {
type = string
description = "Target Proxmox node name"
}
variable "template_id" {
type = number
description = "Template VM ID to clone from"
}
variable "storage" {
type = string
description = "Default datastore for OS disk"
default = "local-lvm"
}
variable "bridge" {
type = string
description = "Default VM bridge"
default = "vmbr0"
}
variable "snippets_storage" {
type = string
description = "Datastore where 'snippets' content is enabled (usually 'local')"
default = "local"
}

View File

@@ -0,0 +1,10 @@
terraform {
required_version = ">= 1.6"
required_providers {
proxmox = {
source = "bpg/proxmox"
version = "0.86.0"
}
}
}