From ea624e471cd8a7b615dae886004cf14723493bf4 Mon Sep 17 00:00:00 2001 From: neeraj pratap singh Date: Tue, 8 Oct 2024 14:53:33 +0530 Subject: [PATCH] qa: add test for subvolume rm with retained snapshots when cluster is full Fixes: https://tracker.ceph.com/issues/67330 Signed-off-by: Neeraj Pratap Singh Conflicts: qa/suites/fs/full/tasks/mgr-osd-full.yaml --- qa/suites/fs/full/tasks/mgr-osd-full.yaml | 5 ++ ...l_rm_retained_snap_when_cluster_is_full.sh | 67 +++++++++++++++++++ 2 files changed, 72 insertions(+) create mode 100755 qa/workunits/fs/full/subvol_rm_retained_snap_when_cluster_is_full.sh diff --git a/qa/suites/fs/full/tasks/mgr-osd-full.yaml b/qa/suites/fs/full/tasks/mgr-osd-full.yaml index fffd3f1d7c3..9e448ce617f 100644 --- a/qa/suites/fs/full/tasks/mgr-osd-full.yaml +++ b/qa/suites/fs/full/tasks/mgr-osd-full.yaml @@ -34,3 +34,8 @@ tasks: clients: client.0: - fs/full/subvolume_ls.sh +- workunit: + cleanup: true + clients: + client.0: + - fs/full/subvol_rm_retained_snap_when_cluster_is_full.sh diff --git a/qa/workunits/fs/full/subvol_rm_retained_snap_when_cluster_is_full.sh b/qa/workunits/fs/full/subvol_rm_retained_snap_when_cluster_is_full.sh new file mode 100755 index 00000000000..c8fb480aa9d --- /dev/null +++ b/qa/workunits/fs/full/subvol_rm_retained_snap_when_cluster_is_full.sh @@ -0,0 +1,67 @@ +#!/usr/bin/env bash +set -ex + +# Test that command 'ceph fs subvolume rm --retained-snapshots' fails when the +# OSD is full. +# +# A subvolume is created on a cluser with OSD size 2GB and a 1GB file is written on +# the subvolume. The OSD size is then set to below 500MB so that OSD is treated as +# full. Now the subvolume is removed but snapshots are retained. + +expect_failure() { + if "$@"; then return 1; else return 0; fi +} + +ceph fs subvolume create cephfs sub_0 +subvol_path=$(ceph fs subvolume getpath cephfs sub_0 2>/dev/null) + +echo "Printing system disk usages for host as well Ceph before writing on subvolume" +df -h +ceph osd df + +sudo dd if=/dev/urandom of=$CEPH_MNT$subvol_path/1GB_file-1 status=progress bs=1M count=1000 + +ceph osd set-full-ratio 0.2 +ceph osd set-nearfull-ratio 0.16 +ceph osd set-backfillfull-ratio 0.18 + +timeout=30 +while [ $timeout -gt 0 ] +do + health=$(ceph health detail) + [[ $health = *"OSD_FULL"* ]] && echo "OSD is full" && break + echo "Wating for osd to be full: $timeout" + sleep 1 + let "timeout-=1" +done + +echo "Printing disk usage for host as well as Ceph after OSD ratios have been set" +df -h +ceph osd df + +#Take snapshot +ceph fs subvolume snapshot create cephfs sub_0 snap_0 + +#Delete subvolume with retain snapshot fails +expect_failure ceph fs subvolume rm cephfs sub_0 --retain-snapshots + +#Validate subvolume is not deleted +ceph fs subvolume info cephfs sub_0 + +# Validate config file is not truncated and GLOBAL section exists +sudo grep "GLOBAL" $CEPH_MNT/volumes/_nogroup/sub_0/.meta + +# Hard cleanup +sudo rmdir $CEPH_MNT/volumes/_nogroup/sub_0/.snap/snap_0 +sudo rm -rf $CEPH_MNT/volumes/_nogroup/sub_0 + +#Reset the ratios to original values for the sake of rest of tests +ceph osd set-full-ratio 0.95 +ceph osd set-nearfull-ratio 0.95 +ceph osd set-backfillfull-ratio 0.95 + +echo "Printing disk usage for host as well as Ceph since test has been finished" +df -h +ceph osd df + +echo OK -- 2.39.5