Fix fs/full suite. The tests in the suite were failing
in release branches, quincy and pacific because of change
in the rados bluefs pre allocation (PR 51774). The change
affected main/quincy differently than quincy/pacific.
Fixes: https://tracker.ceph.com/issues/63132
Signed-off-by: Kotresh HR <khiremat@redhat.com>
(cherry picked from commit
27e9a15a53633b0c77bb0fc60387e58b46c569dd)
debug mds: 20
osd: # force bluestore since it's required for ec overwrites
osd objectstore: bluestore
- bluestore block size: 1073741824
+ bluestore block size: 2147483648
tasks:
- workunit:
cleanup: true
# Hence the subsequent subvolume commands on the clone fails with
# 'MetadataMgrException: -2 (section 'GLOBAL' does not exist)' traceback.
-# The osd is of the size 1GB. The full-ratios are set so that osd is treated full
-# at around 600MB. The subvolume is created and 100MB is written.
+# The osd is of the size 2GiB. The full-ratios are set so that osd is treated full
+# at around 1.2GB. The subvolume is created and 200MB is written.
# The subvolume is snapshotted and cloned ten times. Since the clone delay is set to 15 seconds,
# all the clones reach pending state for sure. Among ten clones, only few succeed and rest fails
# with ENOSPACE.
df -h
ceph osd df
-for i in {1..100};do sudo dd if=/dev/urandom of=$CEPH_MNT$subvol_path_0/1MB_file-$i status=progress bs=1M count=1 conv=fdatasync;done
+for i in {1..100};do sudo dd if=/dev/urandom of=$CEPH_MNT$subvol_path_0/2MB_file-$i status=progress bs=1M count=2 conv=fdatasync;done
# For debugging
echo "After subvolumes are written"
set -ex
# This testcase tests the scenario of the 'ceph fs subvolume rm' mgr command
-# when the osd is full. The command used to hang. The osd is of the size 1GB.
-# The subvolume is created and 500MB file is written. The full-ratios are
+# when the osd is full. The command used to hang. The osd is of the size 2GiB.
+# The subvolume is created and 1GB file is written. The full-ratios are
# set below 500MB such that the osd is treated as full. Now the subvolume is
# is removed. This should be successful with the introduction of FULL
# capabilities which the mgr holds.
df -h
ceph osd df
-sudo dd if=/dev/urandom of=$CEPH_MNT$subvol_path/500MB_file-1 status=progress bs=1M count=500
+sudo dd if=/dev/urandom of=$CEPH_MNT$subvol_path/1GB_file-1 status=progress bs=1M count=1000
ceph osd set-full-ratio 0.2
ceph osd set-nearfull-ratio 0.16
# snapshot rm of the same snapshot fails with 'MetadataMgrException: -2 (section 'GLOBAL' does not exist)'
# traceback.
-# The osd is of the size 1GB. The subvolume is created and 800MB file is written.
-# Then full-ratios are set below 500MB such that the osd is treated as full.
+# The osd is of the size 2GiB. The subvolume is created and 1.6GB file is written.
+# Then full-ratios are set below 1GiB such that the osd is treated as full.
# The subvolume snapshot is taken which succeeds as no extra space is required
# for snapshot. Now, the removal of the snapshot fails with ENOSPACE as it
# fails to remove the snapshot metadata set. The snapshot removal fails
df $CEPH_MNT
ceph osd df
-# Write 800MB file and set full ratio to around 200MB
-ignore_failure sudo dd if=/dev/urandom of=$CEPH_MNT$subvol_path/800MB_file-1 status=progress bs=1M count=800 conv=fdatasync
+# Write 1.6GB file and set full ratio to around 400MB
+ignore_failure sudo dd if=/dev/urandom of=$CEPH_MNT$subvol_path/1.6GB_file-1 status=progress bs=1M count=1600 conv=fdatasync
ceph osd set-full-ratio 0.2
ceph osd set-nearfull-ratio 0.16