#ceph config show mon.ceph-mon-01
поскипано
osd_pool_default_min_size 1 file
osd_pool_default_size 2 file
# ceph status
cluster:
id: (удалил)
health: HEALTH_WARN
mons are allowing insecure global_id reclaim
OSD count 2 < osd_pool_default_size 3
too many PGs per OSD (281 > max 250)
services:
mon: 3 daemons, quorum ceph-mon-01,ceph-mon-02,ceph-mon-03 (age 16h)
mgr: ceph-mon-03(active, since 7w)
mds: cephfs:1 {0=ceph-mon-02=up:active}
osd: 2 osds: 2 up (since 70m), 2 in (since 9d)
rgw: 1 daemon active (ceph-mon-01.rgw0)
task status:
data:
pools: 9 pools, 281 pgs
objects: 155.09k objects, 489 GiB
usage: 982 GiB used, 818 GiB / 1.8 TiB avail
pgs: 281 active+clean
io:
client: 22 KiB/s rd, 12 KiB/s wr, 21 op/s rd, 16 op/s wr
]# ceph osd tree
ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF
-1 1.75777 root default
-3 0.87888 host ceph-osd-01
0 hdd 0.87888 osd.0 up 1.00000 1.00000
-5 0.87889 host ceph-osd-02
1 hdd 0.87889 osd.1 up 1.00000 1.00000
# 3.1. Удаление старых резервных копий | поиск файлов в папке
- name: Get all files
find:
paths: "/backups"
file_type: file
recurse: no
register: install_artifacts
# 3.2. Удаление старых резервных копий | делаем сортировку, отделяем старые
- name: Determine old files
set_fact:
old_dirs: "{{ (install_artifacts.files|sort(attribute='mtime', reverse=True))[5:] }}"
# 3.3. Удаление старых резервных копий | удаляем самые старые файлы
- name: Remove old directories
file:
path: "{{ item.path }}"
state: absent
with_items: "{{ old_dirs }}"
when: install_artifacts.matched > 5