Add fs suite for tests requiring one node as well.
Fixes: https://tracker.ceph.com/issues/50532
Signed-off-by: Patrick Donnelly <pdonnell@redhat.com>
Signed-off-by: Kotresh HR <khiremat@redhat.com>
--- /dev/null
+roles:
+- [mon.a, mgr.x, mds.a, osd.0, client.0]
+openstack:
+- volumes: # attached to each instance
+ count: 1
+ size: 5 # GB
+- machine:
+ disk: 10 # GB
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+.qa/cephfs/begin.yaml
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+.qa/cephfs/clusters/1-node-1-mds-1-osd.yaml
\ No newline at end of file
--- /dev/null
+.qa/cephfs/conf
\ No newline at end of file
--- /dev/null
+.qa/distros/supported-random-distro$
\ No newline at end of file
--- /dev/null
+tasks:
+ - ceph-fuse:
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+.qa/objectstore/bluestore-bitmap.yaml
\ No newline at end of file
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ mgr:
+ debug client: 20
+ log-ignorelist:
+ - OSD full dropping all updates
+ - OSD near full
+ - pausewr flag
+ - failsafe engaged, dropping updates
+ - failsafe disengaged, no longer dropping
+ - is full \(reached quota
+ - POOL_FULL
+ - POOL_NEARFULL
+ - POOL_BACKFILLFULL
+ - PG_DEGRADED
+ - OSD_OUT_OF_ORDER_FULL
+ - OSD_NEARFULL
+ - OSD_FULL
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+.qa/cephfs/overrides/frag_enable.yaml
\ No newline at end of file
--- /dev/null
+.qa/overrides/no_client_pidfile.yaml
\ No newline at end of file
--- /dev/null
+.qa/cephfs/overrides/whitelist_health.yaml
\ No newline at end of file
--- /dev/null
+.qa/cephfs/overrides/whitelist_wrongly_marked_down.yaml
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ osd_pool_default_size: 1
+ osd_pool_default_min_size: 1
+ client:
+ debug ms: 1
+ debug client: 20
+ mds:
+ debug ms: 1
+ debug mds: 20
+ osd: # force bluestore since it's required for ec overwrites
+ osd objectstore: bluestore
+ bluestore block size: 1073741824
+tasks:
+- workunit:
+ cleanup: false
+ clients:
+ client.0:
+ - fs/full/subvolume_rm.sh
run.Raw('PATH=$PATH:/usr/sbin'),
run.Raw('CEPH_BASE={dir}'.format(dir=clonedir)),
run.Raw('CEPH_ROOT={dir}'.format(dir=clonedir)),
+ run.Raw('CEPH_MNT={dir}'.format(dir=mnt)),
]
if env is not None:
for var, val in env.items():
--- /dev/null
+#!/usr/bin/env bash
+set -ex
+
+# This testcase tests the scenario of the 'ceph fs subvolume rm' mgr command
+# when the osd is full. The command used to hang. The osd is of the size 1GB.
+# The subvolume is created and 500MB file is written. The full-ratios are
+# set below 500MB such that the osd is treated as full. Now the subvolume is
+# is removed. This should be successful with the introduction of FULL
+# capabilities which the mgr holds.
+
+set -e
+expect_failure() {
+ if "$@"; then return 1; else return 0; fi
+}
+
+ceph fs subvolume create cephfs sub_0
+subvol_path=$(ceph fs subvolume getpath cephfs sub_0 2>/dev/null)
+
+#For debugging
+echo "Before write"
+df -h
+ceph osd df
+
+sudo dd if=/dev/urandom of=$CEPH_MNT$subvol_path/500MB_file-1 status=progress bs=1M count=500
+
+ceph osd set-full-ratio 0.2
+ceph osd set-nearfull-ratio 0.16
+ceph osd set-backfillfull-ratio 0.18
+
+timeout=30
+while [ $timeout -gt 0 ]
+do
+ health=$(ceph health detail)
+ [[ $health = *"OSD_FULL"* ]] && echo "OSD is full" && break
+ echo "Wating for osd to be full: $timeout"
+ sleep 1
+ let "timeout-=1"
+done
+
+#For debugging
+echo "After ratio set"
+df -h
+ceph osd df
+
+#Delete subvolume
+ceph fs subvolume rm cephfs sub_0
+
+#Validate subvolume is deleted
+expect_failure ceph fs subvolume info cephfs sub_0
+
+#Wait for subvolume to delete data
+trashdir=$CEPH_MNT/volumes/_deleting
+timeout=30
+while [ $timeout -gt 0 ]
+do
+ [ -z "$(sudo ls -A $trashdir)" ] && echo "Trash directory $trashdir is empty" && break
+ echo "Wating for trash dir to be empty: $timeout"
+ sleep 1
+ let "timeout-=1"
+done
+
+echo OK