This commit is contained in:
Hrankin, Aleksandr (contracted)
2026-02-19 11:34:13 +00:00
commit f243f440c3
191 changed files with 6183 additions and 0 deletions

View File

@@ -0,0 +1,8 @@
---
- name: Ensure required Python libraries are installed
ansible.builtin.apt:
name:
- python3-pip
- python3-kubernetes
state: present
update_cache: yes

View File

@@ -0,0 +1,3 @@
```bash
curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash
```

View File

@@ -0,0 +1,20 @@
---
- name: Download Helm install script
ansible.builtin.get_url:
url: https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3
dest: /tmp/get-helm-3.sh
mode: '0755'
- name: Install Helm
ansible.builtin.command: /tmp/get-helm-3.sh
args:
creates: /usr/local/bin/helm
- name: Verify Helm installation
ansible.builtin.command: helm version
register: helm_version_output
changed_when: false
- name: Show Helm version
ansible.builtin.debug:
var: helm_version_output.stdout

View File

@@ -0,0 +1,172 @@
# roles/k8s/k8scommon/tasks/main.yml
---
# === 1. Обновление пакетов и базовые утилиты ===
- name: Install base packages
ansible.builtin.apt:
update_cache: yes
name:
- apt-transport-https
- ca-certificates
- curl
- gnupg
- lsb-release
state: present
# === 2. Отключить swap ===
- name: Disable swap immediately
ansible.builtin.command: swapoff -a
changed_when: false
- name: Backup fstab
ansible.builtin.copy:
src: /etc/fstab
dest: /etc/fstab.bak
remote_src: yes
force: no
- name: Comment out swap entries in fstab
ansible.builtin.replace:
path: /etc/fstab
regexp: '^\s*([^#].*\s+swap\s+.*)$'
replace: '# \1'
# === 3. Модули ядра ===
- name: Write kernel modules config for Kubernetes
ansible.builtin.copy:
dest: /etc/modules-load.d/k8s.conf
content: |
overlay
br_netfilter
- name: Load overlay module
ansible.builtin.command: modprobe overlay
changed_when: false
- name: Load br_netfilter module
ansible.builtin.command: modprobe br_netfilter
changed_when: false
# === 4. sysctl для Kubernetes / containerd ===
- name: Configure Kubernetes sysctl params
ansible.builtin.copy:
dest: /etc/sysctl.d/99-kubernetes-cri.conf
content: |
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward = 1
- name: Apply sysctl settings
ansible.builtin.command: sysctl --system
changed_when: false
# === 5. Установить containerd ===
- name: Install containerd
ansible.builtin.apt:
update_cache: yes
name: containerd
state: present
- name: Ensure containerd config directory exists
ansible.builtin.file:
path: /etc/containerd
state: directory
mode: '0755'
# ВАЖНО: всегда пересоздаём config.toml, как в manual script
- name: Generate default containerd config (overwrite)
ansible.builtin.shell: |
set -o errexit
containerd config default > /etc/containerd/config.toml
args:
executable: /bin/bash
- name: Enable SystemdCgroup in containerd config
ansible.builtin.replace:
path: /etc/containerd/config.toml
regexp: 'SystemdCgroup = false'
replace: 'SystemdCgroup = true'
- name: Set correct CNI bin_dir in containerd config
ansible.builtin.replace:
path: /etc/containerd/config.toml
regexp: 'bin_dir = .*'
replace: 'bin_dir = "/opt/cni/bin"'
- name: Set correct CNI conf_dir in containerd config
ansible.builtin.replace:
path: /etc/containerd/config.toml
regexp: 'conf_dir = .*'
replace: 'conf_dir = "/etc/cni/net.d"'
- name: Enable and restart containerd
ansible.builtin.systemd:
name: containerd
enabled: true
state: restarted
# === 6. Подготовить директории для CNI ===
- name: Ensure CNI directories exist
ansible.builtin.file:
path: "{{ item }}"
state: directory
mode: '0755'
loop:
- /opt/cni/bin
- /etc/cni/net.d
# /usr/lib/cni → /opt/cni/bin, только если /usr/lib/cni не существует
- name: Check if /usr/lib/cni exists
ansible.builtin.stat:
path: /usr/lib/cni
register: cni_usr_lib
- name: Create symlink /usr/lib/cni -> /opt/cni/bin (if not exists)
ansible.builtin.file:
src: /opt/cni/bin
dest: /usr/lib/cni
state: link
when: not cni_usr_lib.stat.exists
# === 7. Репозиторий Kubernetes v1.34 ===
- name: Ensure apt keyrings directory exists
ansible.builtin.file:
path: /etc/apt/keyrings
state: directory
mode: '0755'
- name: Download Kubernetes repo key
ansible.builtin.shell: |
set -o errexit
curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.34/deb/Release.key \
| gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
args:
executable: /bin/bash
creates: /etc/apt/keyrings/kubernetes-apt-keyring.gpg
- name: Add Kubernetes apt repository
ansible.builtin.copy:
dest: /etc/apt/sources.list.d/kubernetes.list
content: |
deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.34/deb/ /
- name: Update apt cache after adding Kubernetes repo
ansible.builtin.apt:
update_cache: yes
# === 8. Установить kubelet, kubeadm, kubectl и зафиксировать версии ===
- name: Install kubelet, kubeadm, kubectl
ansible.builtin.apt:
name:
- kubelet
- kubeadm
- kubectl
state: present
update_cache: yes
- name: Hold Kubernetes packages
ansible.builtin.command: apt-mark hold kubelet kubeadm kubectl
register: hold_result
changed_when: >-
'hold' in hold_result.stdout
or 'marked' in hold_result.stdout
or hold_result.rc == 0

View File

@@ -0,0 +1,136 @@
# roles/k8s/k8smaster/tasks/main.yml
---
# === 9. kubeadm init (аналог шага 14) ===
- name: Initialize Kubernetes control plane (kubeadm init)
ansible.builtin.command: >
kubeadm init
--apiserver-advertise-address={{ ansible_default_ipv4.address }}
--pod-network-cidr=10.244.0.0/16
args:
creates: /etc/kubernetes/admin.conf
# === 10. kubeconfig для root и пользователя ===
- name: Ensure kubeconfig directory for root exists
ansible.builtin.file:
path: /root/.kube
state: directory
mode: "0700"
- name: Copy admin kubeconfig for root
ansible.builtin.copy:
src: /etc/kubernetes/admin.conf
dest: /root/.kube/config
owner: root
group: root
mode: "0600"
remote_src: yes
- name: Ensure kubeconfig directory for user exists
ansible.builtin.file:
path: "/home/adminuser/.kube"
state: directory
owner: "adminuser"
group: "adminuser"
mode: "0700"
- name: Copy admin kubeconfig to user home
ansible.builtin.copy:
src: /etc/kubernetes/admin.conf
dest: "/home/adminuser/.kube/config"
owner: "adminuser"
group: "adminuser"
mode: "0600"
remote_src: yes
# === 11. Ждём API-сервер ===
- name: Wait for Kubernetes API to become reachable
ansible.builtin.command: kubectl get --raw=/healthz
register: api_health
until: api_health.rc == 0
retries: 30
delay: 10
environment:
KUBECONFIG: /etc/kubernetes/admin.conf
# === 12. Ставим Flannel CNI (НЕ ждём Ready ноды до него) ===
- name: Install Flannel CNI
ansible.builtin.command: >
kubectl apply --validate=false
-f https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml
register: flannel_result
until: flannel_result.rc == 0
retries: 10
delay: 6
environment:
KUBECONFIG: /etc/kubernetes/admin.conf
- name: Wait for flannel DaemonSet to be Ready
ansible.builtin.command: >
kubectl -n kube-flannel rollout status daemonset/kube-flannel-ds --timeout=300s
register: flannel_rollout
until: flannel_rollout.rc == 0
retries: 5
delay: 15
environment:
KUBECONFIG: /etc/kubernetes/admin.conf
# === 13. Теперь ждём, пока нода станет Ready ===
- name: Wait for control-plane node to become Ready
ansible.builtin.shell: |
kubectl get node "$(hostname -s)" \
-o jsonpath='{.status.conditions[?(@.type=="Ready")].status}'
register: node_ready
until: node_ready.stdout == "True"
retries: 30
delay: 10
environment:
KUBECONFIG: /etc/kubernetes/admin.conf
# === 14. Ждём CoreDNS ===
- name: Wait for CoreDNS deployment to be Ready
ansible.builtin.command: >
kubectl -n kube-system rollout status deployment/coredns --timeout=300s
register: coredns_rollout
until: coredns_rollout.rc == 0
retries: 5
delay: 15
environment:
KUBECONFIG: /etc/kubernetes/admin.conf
# === 14. Разрешаем поды на master (как шаг 18), если нужно ===
- name: Allow scheduling pods on control-plane node
ansible.builtin.command: >
kubectl taint nodes --all node-role.kubernetes.io/control-plane-
environment:
KUBECONFIG: /etc/kubernetes/admin.conf
when: false
# === 15. Проверка статуса кластера ===
- name: Get nodes
ansible.builtin.command: kubectl get nodes
register: nodes_out
environment:
KUBECONFIG: /etc/kubernetes/admin.conf
- name: Show nodes
ansible.builtin.debug:
var: nodes_out.stdout
- name: Get all pods in all namespaces
ansible.builtin.command: kubectl get pods -A
register: pods_out
environment:
KUBECONFIG: /etc/kubernetes/admin.conf
- name: Show pods
ansible.builtin.debug:
var: pods_out.stdout
# === 16. Вывести join-команду (как шаг 20) ===
- name: Get kubeadm join command
ansible.builtin.command: kubeadm token create --print-join-command
register: join_cmd
- name: Show join command
ansible.builtin.debug:
msg: "Use this command on workers: {{ join_cmd.stdout }}"

View File

@@ -0,0 +1,13 @@
---
# === 2. Join в кластер (аналог kubeadm join в ручном скрипте) ===
- name: Join node to Kubernetes cluster
ansible.builtin.command: "{{ k8s_kubeadm_join_command }}"
args:
creates: /etc/kubernetes/kubelet.conf
# === 3. Убедиться, что kubelet включён и работает ===
- name: Ensure kubelet is enabled and running
ansible.builtin.systemd:
name: kubelet
enabled: true
state: started

View File

@@ -0,0 +1,109 @@
```bash
# === Стать root (если ещё не) ===
sudo -i
```
```bash
# === 1. Обновление пакетов и базовые утилиты ===
apt-get update -y
apt-get install -y apt-transport-https ca-certificates curl gnupg lsb-release
```
```bash
# === 2. Отключить swap немедленно ===
swapoff -a
```
```bash
# === 3. Убрать swap из /etc/fstab (чтобы не включался после перезагрузки) ===
cp /etc/fstab /etc/fstab.bak
sed -i '/ swap / s/^/#/' /etc/fstab
```
```bash
# === 4. Включить модули ядра overlay и br_netfilter ===
cat <<EOF >/etc/modules-load.d/k8s.conf
overlay
br_netfilter
EOF
modprobe overlay
modprobe br_netfilter
```
```bash
# === 5. Настроить sysctl для Kubernetes и containerd ===
cat <<EOF >/etc/sysctl.d/99-kubernetes-cri.conf
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward = 1
EOF
sysctl --system
```
```bash
# === 6. Установить containerd ===
apt-get install -y containerd
```
```bash
# === 7. Сгенерировать конфиг containerd и включить SystemdCgroup ===
mkdir -p /etc/containerd
containerd config default >/etc/containerd/config.toml
# Включаем SystemdCgroup
sed -i 's/SystemdCgroup = false/SystemdCgroup = true/' /etc/containerd/config.toml
# (Опционально) Убедиться, что пути CNI прописаны как /opt/cni/bin и /etc/cni/net.d
sed -i 's@bin_dir = .*@bin_dir = "/opt/cni/bin"@' /etc/containerd/config.toml
sed -i 's@conf_dir = .*@conf_dir = "/etc/cni/net.d"@' /etc/containerd/config.toml
systemctl restart containerd
systemctl enable containerd
```
```bash
# === 8. Подготовить директории для CNI-плагинов ===
mkdir -p /opt/cni/bin
mkdir -p /etc/cni/net.d
```
```bash
# === 9. Фикс пути для flannel: /usr/lib/cni → /opt/cni/bin ===
# ВАЖНО: если каталог /usr/lib/cni уже существует — ЭТУ команду пропусти
ln -s /opt/cni/bin /usr/lib/cni
```
<!-- # === 9. Установить CNI-плагины (официальный набор) ===
```bash
curl -L -o /tmp/cni-plugins.tgz \
"https://github.com/containernetworking/plugins/releases/download/v1.5.1/cni-plugins-linux-amd64-v1.5.1.tgz"
tar -C /opt/cni/bin -xzvf /tmp/cni-plugins.tgz
``` -->
<!-- # === 10. (Опционально) Симлинк /usr/lib/cni -> /opt/cni/bin, если НЕ существует ===
if [ ! -e /usr/lib/cni ]; then
ln -s /opt/cni/bin /usr/lib/cni
fi -->
```bash
# === 10. Добавить официальный репозиторий Kubernetes (pkgs.k8s.io, ветка v1.34) ===
mkdir -p /etc/apt/keyrings
curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.34/deb/Release.key \
| gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
cat <<EOF >/etc/apt/sources.list.d/kubernetes.list
deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.34/deb/ /
EOF
apt-get update -y
```
```bash
# === 11. Установить kubelet, kubeadm, kubectl и зафиксировать версии ===
apt-get install -y kubelet kubeadm kubectl
apt-mark hold kubelet kubeadm kubectl
```

View File

@@ -0,0 +1,53 @@
```bash
# === 13. Посмотреть IP адреса мастера ===
hostname -I
# Запомни нужный IP (например, 192.168.0.26) и подставь его в следующую команду.
# POD CIDR под Flannel — 10.244.0.0/16
```
```bash
# === 14. Инициализация control-plane (kubeadm init) ===
kubeadm init \
--apiserver-advertise-address=192.168.0.154 \
--pod-network-cidr=10.244.0.0/16
```
```bash
# === 15. Настроить kubeconfig для root (чтобы kubectl работал без доп. флагов) ===
mkdir -p /root/.kube
cp /etc/kubernetes/admin.conf /root/.kube/config
chown root:root /root/.kube/config
```
```bash
# === 16. (Опционально) Скопировать kubeconfig обычному пользователю adminuser ===
# ЗАМЕНИ adminuser на своё имя пользователя
mkdir -p /home/adminuser/.kube
cp /etc/kubernetes/admin.conf /home/adminuser/.kube/config
chown adminuser:adminuser /home/adminuser/.kube/config
```
```bash
# === 17. Установить Flannel как CNI-плагин ===
kubectl apply -f https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml
```
```bash
# === 18. (Опционально) Разрешить запуск pod'ов на master (single-node кластер) ===
# Если хочешь использовать мастер и как worker:
kubectl taint nodes --all node-role.kubernetes.io/control-plane-
```
```bash
# === 19. Проверить статус кластера ===
kubectl get nodes
kubectl get pods -A
```
```bash
# === 20. Получить команду для присоединения worker-узлов ===
kubeadm token create --print-join-command
# Скопируй полностью выведенную команду "kubeadm join ..." — она понадобится на worker.
```

View File

@@ -0,0 +1,14 @@
```bash
# === 21. Выполнить join-команду, полученную на мастере ===
# Пример (ЭТО ТОЛЬКО ПРИМЕР, ИСПОЛЬЗУЙ СВОЮ КОМАНДУ ИЗ ШАГА 20):
kubeadm join 192.168.0.154:6443 --token 9jz5xr.xvwirgtsp2v2brge \
--discovery-token-ca-cert-hash sha256:e09d4918b52e647af493e8345504ecb9907e79637a52932e730df350d3f76ede
```
```bash
# === 22. Проверить с мастера, что worker добавился в кластер ===
# Команды выполняются на master-узле:
kubectl get nodes
kubectl get pods -A
```

View File

@@ -0,0 +1,87 @@
```bash
helm repo add codecentric https://codecentric.github.io/helm-charts
helm repo update
```
```bash
kubectl create namespace keycloak
```
```bash
vim values-keycloak.yaml
# Какой именно Keycloak ставим
image:
repository: quay.io/keycloak/keycloak
# Подставь нужную версию, например ту, которую ты хочешь зафиксировать
# (пример — 26.0.7, но лучше глянуть актуальные теги на quay.io/keycloak/keycloak)
tag: "26.0.7"
pullPolicy: IfNotPresent
replicas: 1
# HTTP-путь, по которому будет доступен Keycloak
http:
# "/" или "/auth" — на твой вкус, я делаю "/" для простоты
relativePath: "/"
# Подключение к внешней БД PostgreSQL
database:
vendor: postgres
hostname: postgres-postgresql.postgres.svc.cluster.local
port: 5432
database: keycloak
username: keycloak_user
password: "password"
# Команда запуска Keycloak (рекомендуемый стиль с kc.sh)
command:
- "/opt/keycloak/bin/kc.sh"
- "start"
- "--http-enabled=true"
- "--http-port=8080"
- "--hostname-strict=false"
- "--hostname-strict-https=false"
- "--proxy=edge"
# Ingress NGINX на keycloak.local
ingress:
enabled: true
ingressClassName: "nginx"
annotations:
nginx.ingress.kubernetes.io/ssl-redirect: "false"
rules:
- host: "keycloak.local"
paths:
- path: '{{ tpl .Values.http.relativePath $ | trimSuffix "/" }}/'
pathType: Prefix
tls: [] # позже можно включить TLS через cert-manager
# Переменные окружения Keycloak
extraEnv: |
# Админ и пароль
- name: KEYCLOAK_ADMIN
value: admin
- name: KEYCLOAK_ADMIN_PASSWORD
value: password
# Настройки прокси / hostname
- name: KC_PROXY
value: edge
- name: KC_HOSTNAME
value: "keycloak.local"
# JGroups discovery через headless-сервис чарта
- name: JAVA_OPTS_APPEND
value: >-
-XX:+UseContainerSupport
-XX:MaxRAMPercentage=50.0
-Djava.awt.headless=true
-Djgroups.dns.query={{ include "keycloak.fullname" . }}-headless
helm install keycloak codecentric/keycloakx \
--namespace keycloak \
--values values-keycloak.yaml
```

View File

@@ -0,0 +1,288 @@
```bash
helm repo add jaconi https://charts.jaconi.io
helm repo update
```
```bash
fullnameOverride: "netbird"
config:
database:
DB_TYPE: postgres
HOST: postgres-postgresql.postgres.svc.cluster.local
PORT: 5432
NAME: netbird
USER: netbird_user
PASSWD: password
relay:
enabled: true
config:
NB_EXPOSED_ADDRESS: "netbird-relay.netbird.svc.cluster.local:33080"
signal:
enabled: true
management:
enabled: true
config:
NETBIRD_SIGNAL_URI: "netbird-signal.netbird.svc.cluster.local:10000"
NETBIRD_SIGNAL_PROTOCOL: "https"
NETBIRD_RELAY_DOMAIN: "netbird-relay.netbird.svc.cluster.local"
NETBIRD_RELAY_PORT: "33080"
NETBIRD_STUN_URI: "stun:netbird-signal.netbird.svc.cluster.local:3478"
NETBIRD_TURN_URI: "turn:netbird-signal.netbird.svc.cluster.local:3478"
dashboard:
enabled: true
service:
type: ClusterIP
ingress:
enabled: false
```
```bash
openssl rand -hex 32
kubectl create secret generic netbird-relay-secret \
-n netbird \
--from-literal=netbird-relay-secret-key="8626c1ed1c8cfcb13df6c65819042771a2bf7a280c16f0ba54abea8cde7b560d"
```
```bash
helm install netbird jaconi/netbird \
-n netbird \
--create-namespace \
-f netbird-values.yaml
or
helm upgrade netbird jaconi/netbird \
-n netbird \
-f netbird-values.yaml
```
```bash
kubectl -n netbird get pods
kubectl -n netbird get svc
kubectl -n netbird get ingress
```
<!-- dashboard -->
```bash
vim netbird-dashboard-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: netbird-dashboard
namespace: netbird
labels:
app: netbird-dashboard
spec:
replicas: 1
selector:
matchLabels:
app: netbird-dashboard
template:
metadata:
labels:
app: netbird-dashboard
spec:
containers:
- name: dashboard
image: netbirdio/dashboard:0.45.1
ports:
- containerPort: 80
env:
- name: NB_MANAGEMENT_API_ENDPOINT
value: "http://netbird.local:30830"
```
```bash
vim netbird-dashboard-service.yaml
apiVersion: v1
kind: Service
metadata:
name: netbird-dashboard
namespace: netbird
spec:
selector:
app: netbird-dashboard
ports:
- protocol: TCP
port: 80
targetPort: 80
type: ClusterIP
```
```bash
vim netbird-dashboard-ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: netbird-dashboard
namespace: netbird
spec:
ingressClassName: nginx
rules:
- host: netbird.local
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: netbird-dashboard
port:
number: 80
```
```bash
kubectl apply -f netbird-dashboard-deployment.yaml
kubectl apply -f netbird-dashboard-service.yaml
kubectl apply -f netbird-dashboard-ingress.yaml
```
```bash
C:\Windows\System32\drivers\etc\hosts
```
# k8s
```bash
vim netbird-application.yaml
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: netbird # как будет называться приложение в ArgoCD
namespace: argocd # namespace, где установлен ArgoCD
spec:
project: default
source:
repoURL: https://charts.jaconi.io # тот самый helm repo
chart: netbird # имя чарта
targetRevision: "*" # можно зафиксировать версию, пока пусть будет любая
helm:
releaseName: netbird # как будто ты делал "helm install netbird ..."
values: |-
fullnameOverride: "netbird"
config:
database:
DB_TYPE: postgres
HOST: postgres-postgresql.postgres.svc.cluster.local
PORT: 5432
NAME: netbird
USER: netbird_user
PASSWD: password
relay:
enabled: true
config:
NB_EXPOSED_ADDRESS: "netbird-relay.netbird.svc.cluster.local:33080"
signal:
enabled: true
management:
enabled: true
config:
NETBIRD_SIGNAL_URI: "netbird-signal.netbird.svc.cluster.local:10000"
NETBIRD_SIGNAL_PROTOCOL: "https"
NETBIRD_RELAY_DOMAIN: "netbird-relay.netbird.svc.cluster.local"
NETBIRD_RELAY_PORT: "33080"
NETBIRD_STUN_URI: "stun:netbird-signal.netbird.svc.cluster.local:3478"
NETBIRD_TURN_URI: "turn:netbird-signal.netbird.svc.cluster.local:3478"
dashboard:
enabled: true
service:
type: ClusterIP
ingress:
enabled: true
className: nginx
hosts:
- host: netbird.local
paths:
- path: /
pathType: Prefix
destination:
server: https://kubernetes.default.svc
namespace: netbird # сюда чарты будут ставиться
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true
kubectl apply -f netbird-application.yaml -n argocd
```
```bash
kubectl create namespace netbird || true
kubectl create secret generic netbird-relay-secret \
-n netbird \
--from-literal=netbird-relay-secret-key="8626c1ed1c8cfcb13df6c65819042771a2bf7a280c16f0ba54abea8cde7b560d"
```
```bash
helm repo add jaconi https://charts.jaconi.io
helm repo update
vim netbird-dashboard-values.yaml
image:
# Версия образа UI; есть тег v2.22.2 на Docker Hub
# см. netbirdio/dashboard:v2.22.2 :contentReference[oaicite:2]{index=2}
tag: v2.22.2
auth:
# OIDC-провайдер (например, Keycloak)
authority: https://keycloak.example.com/realms/homelab
audience: netbird
clientID: netbird
supportedScopes: >
openid profile email offline_access netbird-api
netbird:
# HTTP API management-сервиса NetBird (тот же, к которому коннектятся клиенты)
managementApiEndpoint: https://netbird.example.com
# gRPC endpoint того же сервиса
managementGrpcApiEndpoint: https://netbird.example.com
ingress:
enabled: true
className: nginx
annotations:
# Пример для cert-manager, можно убрать если не используешь
cert-manager.io/cluster-issuer: letsencrypt
hosts:
- host: netbird.example.com
paths:
- path: /
pathType: Prefix
tls:
- secretName: netbird-tls-certificate
hosts:
- netbird.example.com
# namespace можно выбрать любой, но обычно используют netbird
kubectl create namespace netbird --dry-run=client -o yaml | kubectl apply -f -
helm install netbird-dashboard jaconi/netbird-dashboard \
--namespace netbird \
--values netbird-dashboard-values.yaml
```