From: Kotresh HR Date: Sat, 3 Feb 2024 07:31:37 +0000 (+0530) Subject: qa: Fix fs/full suite X-Git-Tag: v17.2.8~422^2 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=1cd4668e18a2b0a5496becdd16ddbf93ccaf141d;p=ceph.git qa: Fix fs/full suite Fix fs/full suite. The tests in the suite were failing in release branches, quincy and pacific because of change in the rados bluefs pre allocation (PR 51774). The change affected main/quincy differently than quincy/pacific. Fixes: https://tracker.ceph.com/issues/63132 Signed-off-by: Kotresh HR (cherry picked from commit 27e9a15a53633b0c77bb0fc60387e58b46c569dd) --- diff --git a/qa/suites/fs/full/tasks/mgr-osd-full.yaml b/qa/suites/fs/full/tasks/mgr-osd-full.yaml index b4f673e39579..a005f52037c7 100644 --- a/qa/suites/fs/full/tasks/mgr-osd-full.yaml +++ b/qa/suites/fs/full/tasks/mgr-osd-full.yaml @@ -12,7 +12,7 @@ overrides: debug mds: 20 osd: # force bluestore since it's required for ec overwrites osd objectstore: bluestore - bluestore block size: 1073741824 + bluestore block size: 2147483648 tasks: - workunit: cleanup: true diff --git a/qa/workunits/fs/full/subvolume_clone.sh b/qa/workunits/fs/full/subvolume_clone.sh index a11131215b41..510343edbb52 100755 --- a/qa/workunits/fs/full/subvolume_clone.sh +++ b/qa/workunits/fs/full/subvolume_clone.sh @@ -7,8 +7,8 @@ set -ex # Hence the subsequent subvolume commands on the clone fails with # 'MetadataMgrException: -2 (section 'GLOBAL' does not exist)' traceback. -# The osd is of the size 1GB. The full-ratios are set so that osd is treated full -# at around 600MB. The subvolume is created and 100MB is written. +# The osd is of the size 2GiB. The full-ratios are set so that osd is treated full +# at around 1.2GB. The subvolume is created and 200MB is written. # The subvolume is snapshotted and cloned ten times. Since the clone delay is set to 15 seconds, # all the clones reach pending state for sure. Among ten clones, only few succeed and rest fails # with ENOSPACE. @@ -47,7 +47,7 @@ echo "After ratios are set" df -h ceph osd df -for i in {1..100};do sudo dd if=/dev/urandom of=$CEPH_MNT$subvol_path_0/1MB_file-$i status=progress bs=1M count=1 conv=fdatasync;done +for i in {1..100};do sudo dd if=/dev/urandom of=$CEPH_MNT$subvol_path_0/2MB_file-$i status=progress bs=1M count=2 conv=fdatasync;done # For debugging echo "After subvolumes are written" diff --git a/qa/workunits/fs/full/subvolume_rm.sh b/qa/workunits/fs/full/subvolume_rm.sh index a464e30f56e9..2a3bf956df33 100755 --- a/qa/workunits/fs/full/subvolume_rm.sh +++ b/qa/workunits/fs/full/subvolume_rm.sh @@ -2,8 +2,8 @@ set -ex # This testcase tests the scenario of the 'ceph fs subvolume rm' mgr command -# when the osd is full. The command used to hang. The osd is of the size 1GB. -# The subvolume is created and 500MB file is written. The full-ratios are +# when the osd is full. The command used to hang. The osd is of the size 2GiB. +# The subvolume is created and 1GB file is written. The full-ratios are # set below 500MB such that the osd is treated as full. Now the subvolume is # is removed. This should be successful with the introduction of FULL # capabilities which the mgr holds. @@ -21,7 +21,7 @@ echo "Before write" df -h ceph osd df -sudo dd if=/dev/urandom of=$CEPH_MNT$subvol_path/500MB_file-1 status=progress bs=1M count=500 +sudo dd if=/dev/urandom of=$CEPH_MNT$subvol_path/1GB_file-1 status=progress bs=1M count=1000 ceph osd set-full-ratio 0.2 ceph osd set-nearfull-ratio 0.16 diff --git a/qa/workunits/fs/full/subvolume_snapshot_rm.sh b/qa/workunits/fs/full/subvolume_snapshot_rm.sh index f6d0add9fda4..8df89d3c7a3b 100755 --- a/qa/workunits/fs/full/subvolume_snapshot_rm.sh +++ b/qa/workunits/fs/full/subvolume_snapshot_rm.sh @@ -7,8 +7,8 @@ set -ex # snapshot rm of the same snapshot fails with 'MetadataMgrException: -2 (section 'GLOBAL' does not exist)' # traceback. -# The osd is of the size 1GB. The subvolume is created and 800MB file is written. -# Then full-ratios are set below 500MB such that the osd is treated as full. +# The osd is of the size 2GiB. The subvolume is created and 1.6GB file is written. +# Then full-ratios are set below 1GiB such that the osd is treated as full. # The subvolume snapshot is taken which succeeds as no extra space is required # for snapshot. Now, the removal of the snapshot fails with ENOSPACE as it # fails to remove the snapshot metadata set. The snapshot removal fails @@ -31,8 +31,8 @@ echo "Before write" df $CEPH_MNT ceph osd df -# Write 800MB file and set full ratio to around 200MB -ignore_failure sudo dd if=/dev/urandom of=$CEPH_MNT$subvol_path/800MB_file-1 status=progress bs=1M count=800 conv=fdatasync +# Write 1.6GB file and set full ratio to around 400MB +ignore_failure sudo dd if=/dev/urandom of=$CEPH_MNT$subvol_path/1.6GB_file-1 status=progress bs=1M count=1600 conv=fdatasync ceph osd set-full-ratio 0.2 ceph osd set-nearfull-ratio 0.16