init
This commit is contained in:
43
ansible/roles/ceph/00_install/tasks/main.yml
Normal file
43
ansible/roles/ceph/00_install/tasks/main.yml
Normal file
@@ -0,0 +1,43 @@
|
||||
---
|
||||
- name: apt update
|
||||
ansible.builtin.apt:
|
||||
update_cache: true
|
||||
|
||||
- name: apt upgrade
|
||||
ansible.builtin.apt:
|
||||
upgrade: dist
|
||||
|
||||
- name: install base packages
|
||||
ansible.builtin.apt:
|
||||
name:
|
||||
- ca-certificates
|
||||
- curl
|
||||
- gnupg
|
||||
- lvm2
|
||||
- podman
|
||||
state: present
|
||||
|
||||
- name: swapoff
|
||||
ansible.builtin.command: swapoff -a
|
||||
changed_when: true
|
||||
|
||||
- name: comment swap in /etc/fstab
|
||||
ansible.builtin.replace:
|
||||
path: /etc/fstab
|
||||
regexp: '^([^#].*\s+swap\s+.*)$'
|
||||
replace: '# \1'
|
||||
|
||||
- name: install cephadm and ceph-common
|
||||
ansible.builtin.apt:
|
||||
name:
|
||||
- cephadm
|
||||
- ceph-common
|
||||
state: present
|
||||
|
||||
- name: cephadm version
|
||||
ansible.builtin.command: cephadm version
|
||||
changed_when: false
|
||||
|
||||
- name: ceph -v
|
||||
ansible.builtin.command: ceph -v
|
||||
changed_when: false
|
||||
9
ansible/roles/ceph/01_bootstrap/tasks/main.yml
Normal file
9
ansible/roles/ceph/01_bootstrap/tasks/main.yml
Normal file
@@ -0,0 +1,9 @@
|
||||
---
|
||||
- name: cephadm bootstrap
|
||||
ansible.builtin.command: >
|
||||
cephadm bootstrap
|
||||
--mon-ip 192.168.0.102
|
||||
--initial-dashboard-user admin
|
||||
--initial-dashboard-password password
|
||||
--allow-fqdn-hostname
|
||||
changed_when: true
|
||||
25
ansible/roles/ceph/02_share_pubkey/tasks/main.yml
Normal file
25
ansible/roles/ceph/02_share_pubkey/tasks/main.yml
Normal file
@@ -0,0 +1,25 @@
|
||||
---
|
||||
- name: get cephadm pub key (run once on ceph01)
|
||||
ansible.builtin.command: ceph cephadm get-pub-key
|
||||
register: ceph_pubkey_cmd
|
||||
changed_when: false
|
||||
delegate_to: dev-kyiv01-vm-ceph-main-01
|
||||
run_once: true
|
||||
|
||||
- name: set ceph pubkey fact for this play
|
||||
ansible.builtin.set_fact:
|
||||
ceph_pubkey: "{{ ceph_pubkey_cmd.stdout }}"
|
||||
run_once: true
|
||||
|
||||
- name: add ceph pub key to root authorized_keys
|
||||
ansible.posix.authorized_key:
|
||||
user: root
|
||||
key: "{{ ceph_pubkey }}"
|
||||
state: present
|
||||
when: inventory_hostname in ["dev-kyiv01-vm-ceph-main-02", "dev-kyiv01-vm-ceph-main-03"]
|
||||
|
||||
- name: restart ssh
|
||||
ansible.builtin.service:
|
||||
name: ssh
|
||||
state: restarted
|
||||
when: inventory_hostname in ["dev-kyiv01-vm-ceph-main-02", "dev-kyiv01-vm-ceph-main-03"]
|
||||
40
ansible/roles/ceph/03_setup_cluster/tasks/main.yml
Normal file
40
ansible/roles/ceph/03_setup_cluster/tasks/main.yml
Normal file
@@ -0,0 +1,40 @@
|
||||
---
|
||||
- name: add host ceph02
|
||||
ansible.builtin.command: >
|
||||
ceph orch host add dev-kyiv01-vm-ceph-main-02 192.168.0.103
|
||||
changed_when: true
|
||||
|
||||
- name: add host ceph03
|
||||
ansible.builtin.command: >
|
||||
ceph orch host add dev-kyiv01-vm-ceph-main-03 192.168.0.104
|
||||
changed_when: true
|
||||
|
||||
- name: add osd ceph01 sdb
|
||||
ansible.builtin.command: >
|
||||
ceph orch daemon add osd dev-kyiv01-vm-ceph-main-01:/dev/sdb
|
||||
changed_when: true
|
||||
|
||||
- name: add osd ceph01 sdc
|
||||
ansible.builtin.command: >
|
||||
ceph orch daemon add osd dev-kyiv01-vm-ceph-main-01:/dev/sdc
|
||||
changed_when: true
|
||||
|
||||
- name: add osd ceph02 sdb
|
||||
ansible.builtin.command: >
|
||||
ceph orch daemon add osd dev-kyiv01-vm-ceph-main-02:/dev/sdb
|
||||
changed_when: true
|
||||
|
||||
- name: add osd ceph02 sdc
|
||||
ansible.builtin.command: >
|
||||
ceph orch daemon add osd dev-kyiv01-vm-ceph-main-02:/dev/sdc
|
||||
changed_when: true
|
||||
|
||||
- name: add osd ceph03 sdb
|
||||
ansible.builtin.command: >
|
||||
ceph orch daemon add osd dev-kyiv01-vm-ceph-main-03:/dev/sdb
|
||||
changed_when: true
|
||||
|
||||
- name: add osd ceph03 sdc
|
||||
ansible.builtin.command: >
|
||||
ceph orch daemon add osd dev-kyiv01-vm-ceph-main-03:/dev/sdc
|
||||
changed_when: true
|
||||
48
ansible/roles/ceph/04_setup_rgw/readme.md
Normal file
48
ansible/roles/ceph/04_setup_rgw/readme.md
Normal file
@@ -0,0 +1,48 @@
|
||||
# "[1/8] Проверка кластера"
|
||||
ceph -s
|
||||
ceph orch status
|
||||
|
||||
# "[2/8] Создаём realm/zonegroup/zone (если уже есть — будет ошибка; можно игнорировать или сначала проверить list)"
|
||||
radosgw-admin realm create --rgw-realm=default --default || true
|
||||
radosgw-admin zonegroup create --rgw-zonegroup=default --master --default || true
|
||||
radosgw-admin zone create \
|
||||
--rgw-zonegroup=default \
|
||||
--rgw-zone=default \
|
||||
--master \
|
||||
--default || true
|
||||
|
||||
# "[3/8] Коммит периода (обновляем конфиг мультисайта)"
|
||||
radosgw-admin period update --commit
|
||||
|
||||
# "[4/8] Проверка realm/zone"
|
||||
radosgw-admin realm list
|
||||
radosgw-admin zone list
|
||||
|
||||
# "[5/8] Деплой RGW сервисом через cephadm/orchestrator"
|
||||
ceph orch apply rgw default --placement="1"
|
||||
|
||||
# "[6/8] Проверка что RGW поднялся"
|
||||
ceph orch ls
|
||||
ceph orch ps --service-name rgw.default
|
||||
ss -lntp | grep -E 'rgw|civetweb|beast|7480|80|443' || true
|
||||
|
||||
# "[7/8] Создаём admin (system) пользователя — ТОЛЬКО для ops"
|
||||
# Важно: system user не для приложений, а для админских операций/автоматизации ops
|
||||
radosgw-admin user create \
|
||||
--uid="admin" \
|
||||
--display-name="RGW Admin (system)" \
|
||||
--system || true
|
||||
|
||||
# "[8/8] Создаём пользователя для Crunchy pgBackRest + бакет"
|
||||
# Создаём отдельного юзера под pgBackRest
|
||||
radosgw-admin user create \
|
||||
--uid="crunchy-backup" \
|
||||
--display-name="Crunchy pgBackRest" || true
|
||||
|
||||
# Создаём бакет и назначаем владельца (uid должен существовать)
|
||||
radosgw-admin bucket create \
|
||||
--bucket="crunchy-pgbackrest" \
|
||||
--uid="crunchy-backup" || true
|
||||
|
||||
# "=== Итог: креды для Crunchy ==="
|
||||
radosgw-admin user info --uid="crunchy-backup"
|
||||
28
ansible/roles/ceph/05_create_k8s_pool/readme.md
Normal file
28
ansible/roles/ceph/05_create_k8s_pool/readme.md
Normal file
@@ -0,0 +1,28 @@
|
||||
```bash
|
||||
ceph -s
|
||||
ceph fsid
|
||||
ceph mon dump | egrep 'mon\.' -n
|
||||
ceph osd pool ls
|
||||
|
||||
# создать pool (pg_num подбирай под размер кластера; для старта можно 64/128)
|
||||
ceph osd pool create k8s-rbd 128
|
||||
|
||||
# включить application "rbd" (важно для CSI)
|
||||
ceph osd pool application enable k8s-rbd rbd
|
||||
|
||||
# (опционально) инициализировать rbd метаданные
|
||||
rbd pool init k8s-rbd
|
||||
|
||||
# (опционально) выставить репликацию size=3 (или как у тебя принято)
|
||||
ceph osd pool set k8s-rbd size 3
|
||||
|
||||
ceph auth get-or-create client.k8s-rbd-csi \
|
||||
mon 'profile rbd' \
|
||||
osd "profile rbd pool=k8s-rbd" \
|
||||
mgr "profile rbd"
|
||||
|
||||
# посмотреть ключ
|
||||
ceph auth get client.k8s-rbd-csi
|
||||
|
||||
|
||||
```
|
||||
85
ansible/roles/ceph/readme.md
Normal file
85
ansible/roles/ceph/readme.md
Normal file
@@ -0,0 +1,85 @@
|
||||
# ЭТАП 0. Подготовка ОС (на всех Ceph-нодах)
|
||||
|
||||
## обновление системы
|
||||
apt update && apt upgrade -y
|
||||
|
||||
## базовые пакеты (без chrony/dns/hosts)
|
||||
apt install -y \
|
||||
ca-certificates \
|
||||
curl \
|
||||
gnupg \
|
||||
lvm2 \
|
||||
podman
|
||||
|
||||
## отключаем swap (ОБЯЗАТЕЛЬНО для k8s; для Ceph не строго, но лучше сразу)
|
||||
swapoff -a
|
||||
sed -i '/ swap / s/^/#/' /etc/fstab
|
||||
|
||||
## проверка дисков (убедись, что OSD диски пустые)
|
||||
lsblk
|
||||
|
||||
# ЭТАП 1. Установка Cephadm (на bootstrap-ноде и затем на всех нодах)
|
||||
|
||||
apt install -y cephadm ceph-common
|
||||
cephadm version
|
||||
ceph -v
|
||||
|
||||
# ЭТАП 2. Bootstrap кластера (только на первой ноде / mon)
|
||||
|
||||
cephadm bootstrap \
|
||||
--mon-ip 192.168.0.102 \
|
||||
--initial-dashboard-user admin \
|
||||
--initial-dashboard-password password \
|
||||
--allow-fqdn-hostname
|
||||
|
||||
ceph -s
|
||||
ceph orch ps
|
||||
|
||||
# ЭТАП 3. Добавляем остальные ноды в orchestrator
|
||||
|
||||
ceph cephadm get-pub-key
|
||||
systemctl restart ssh
|
||||
|
||||
ceph orch host add dev-kyiv01-vm-ceph-main-02 192.168.0.103
|
||||
ceph orch host add dev-kyiv01-vm-ceph-main-03 192.168.0.104
|
||||
|
||||
ceph orch host ls
|
||||
|
||||
|
||||
# ЭТАП 4. Добавляем OSD (на каждой ноде)
|
||||
|
||||
## bootstrap-node (локальная)
|
||||
ceph orch daemon add osd dev-kyiv01-vm-ceph-main-01:/dev/sdb
|
||||
ceph orch daemon add osd dev-kyiv01-vm-ceph-main-01:/dev/sdc
|
||||
|
||||
## vm-ceph-kyiv-02
|
||||
ceph orch daemon add osd dev-kyiv01-vm-ceph-main-02:/dev/sdb
|
||||
ceph orch daemon add osd dev-kyiv01-vm-ceph-main-02:/dev/sdc
|
||||
|
||||
## vm-ceph-kyiv-03
|
||||
ceph orch daemon add osd dev-kyiv01-vm-ceph-main-03:/dev/sdb
|
||||
ceph orch daemon add osd dev-kyiv01-vm-ceph-main-03:/dev/sdc
|
||||
|
||||
## Проверка:
|
||||
|
||||
ceph osd tree
|
||||
ceph -s
|
||||
|
||||
|
||||
# ЭТАП 5. Пул под k8s RBD
|
||||
|
||||
ceph osd pool create k8s-rbd 64
|
||||
ceph osd pool application enable k8s-rbd rbd
|
||||
|
||||
ceph osd pool ls
|
||||
ceph osd pool get k8s-rbd all
|
||||
|
||||
## Мини-чеклист
|
||||
|
||||
ceph -s
|
||||
ceph orch host ls
|
||||
ceph orch ps
|
||||
ceph osd tree
|
||||
|
||||
# Delete broken cluster
|
||||
cephadm rm-cluster --force --fsid e3b4050a-e8be-11f0-84c2-027a4c119066
|
||||
Reference in New Issue
Block a user