]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
qa: add test for subvolume rm with retained snapshots when cluster is full 59676/head
authorneeraj pratap singh <neerajpratapsingh@li-ff7f0d4c-3462-11b2-a85c-d4004c0fa1a0.ibm.com>
Tue, 8 Oct 2024 09:23:33 +0000 (14:53 +0530)
committerneeraj pratap singh <neerajpratapsingh@li-ff7f0d4c-3462-11b2-a85c-d4004c0fa1a0.ibm.com>
Wed, 17 Sep 2025 14:06:40 +0000 (19:36 +0530)
Fixes: https://tracker.ceph.com/issues/67330
Signed-off-by: Neeraj Pratap Singh <neesingh@redhat.com>
Conflicts:
qa/suites/fs/full/tasks/mgr-osd-full.yaml

qa/suites/fs/full/tasks/mgr-osd-full.yaml
qa/workunits/fs/full/subvol_rm_retained_snap_when_cluster_is_full.sh [new file with mode: 0755]

index fffd3f1d7c3e7c8d9e553c544fce29a52372a40b..9e448ce617f2da65fecf903401ec8ea366391c66 100644 (file)
@@ -34,3 +34,8 @@ tasks:
     clients:
       client.0:
         - fs/full/subvolume_ls.sh
+- workunit:
+    cleanup: true
+    clients:
+      client.0:
+        - fs/full/subvol_rm_retained_snap_when_cluster_is_full.sh
diff --git a/qa/workunits/fs/full/subvol_rm_retained_snap_when_cluster_is_full.sh b/qa/workunits/fs/full/subvol_rm_retained_snap_when_cluster_is_full.sh
new file mode 100755 (executable)
index 0000000..c8fb480
--- /dev/null
@@ -0,0 +1,67 @@
+#!/usr/bin/env bash
+set -ex
+
+# Test that command 'ceph fs subvolume rm --retained-snapshots' fails when the
+# OSD is full.
+#
+# A subvolume is created on a cluser with OSD size 2GB and a 1GB file is written on
+# the subvolume. The OSD size is then set to below 500MB so that OSD is treated as
+# full. Now the subvolume is removed but snapshots are retained. 
+
+expect_failure() {
+       if "$@"; then return 1; else return 0; fi
+}
+
+ceph fs subvolume create cephfs sub_0
+subvol_path=$(ceph fs subvolume getpath cephfs sub_0 2>/dev/null)
+
+echo "Printing system disk usages for host as well Ceph before writing on subvolume"
+df -h
+ceph osd df
+
+sudo dd if=/dev/urandom of=$CEPH_MNT$subvol_path/1GB_file-1 status=progress bs=1M count=1000
+
+ceph osd set-full-ratio 0.2
+ceph osd set-nearfull-ratio 0.16
+ceph osd set-backfillfull-ratio 0.18
+
+timeout=30
+while [ $timeout -gt 0 ]
+do
+  health=$(ceph health detail)
+  [[ $health = *"OSD_FULL"* ]] && echo "OSD is full" && break
+  echo "Wating for osd to be full: $timeout"
+  sleep 1
+  let "timeout-=1"
+done
+
+echo "Printing disk usage for host as well as Ceph after OSD ratios have been set"
+df -h
+ceph osd df
+
+#Take snapshot
+ceph fs subvolume snapshot create cephfs sub_0 snap_0
+
+#Delete subvolume with retain snapshot fails
+expect_failure ceph fs subvolume rm cephfs sub_0 --retain-snapshots
+
+#Validate subvolume is not deleted
+ceph fs subvolume info cephfs sub_0
+
+# Validate config file is not truncated and GLOBAL section exists
+sudo grep "GLOBAL" $CEPH_MNT/volumes/_nogroup/sub_0/.meta
+
+# Hard cleanup
+sudo rmdir $CEPH_MNT/volumes/_nogroup/sub_0/.snap/snap_0
+sudo rm -rf $CEPH_MNT/volumes/_nogroup/sub_0
+
+#Reset the ratios to original values for the sake of rest of tests
+ceph osd set-full-ratio 0.95
+ceph osd set-nearfull-ratio 0.95
+ceph osd set-backfillfull-ratio 0.95
+
+echo "Printing disk usage for host as well as Ceph since test has been finished"
+df -h
+ceph osd df
+
+echo OK