--- /dev/null
+roles:
+- [mon.a, mon.b, mon.c, mgr.x, mds.a, osd.0, osd.1, osd.2, osd.3]
+- [client.0]
+openstack:
+- volumes: # attached to each instance
+ count: 4
+ size: 10 # GB
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+../../../../../cephfs/objectstore-ec/bluestore-bitmap.yaml
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+roles:
+- [mon.a, mon.b, mon.c, mgr.x, mgr.y, mds.a, mds.b, mds.c, osd.0, osd.1, osd.2, osd.3]
+- [client.0, client.1]
+openstack:
+- volumes: # attached to each instance
+ count: 4
+ size: 10 # GB
--- /dev/null
+.qa/cephfs/conf/
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+.qa/cephfs/overrides/frag_enable.yaml
\ No newline at end of file
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ mon pg warn min per osd: 0
--- /dev/null
+.qa/cephfs/overrides/whitelist_health.yaml
\ No newline at end of file
--- /dev/null
+.qa/cephfs/overrides/whitelist_wrongly_marked_down.yaml
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+meta:
+- desc: |
+ install ceph/mimic latest
+tasks:
+- install:
+ branch: mimic #tag: v13.2.8
+ exclude_packages:
+ - librados3
+ - ceph-mgr-dashboard
+ - ceph-mgr-diskprediction-local
+ - ceph-mgr-diskprediction-cloud
+ - ceph-mgr-rook
+ - ceph-mgr-cephadm
+ - cephadm
+ extra_packages: ['librados2']
+- print: "**** done installing mimic"
+- ceph:
+ mon_bind_addrvec: false
+ mon_bind_msgr2: false
+ log-whitelist:
+ - overall HEALTH_
+ - \(FS_
+ - \(MDS_
+ - \(OSD_
+ - \(MON_DOWN\)
+ - \(CACHE_POOL_
+ - \(POOL_
+ - \(MGR_DOWN\)
+ - \(PG_
+ - \(SMALLER_PGP_NUM\)
+ - Monitor daemon marked osd
+ - Behind on trimming
+ - Manager daemon
+ conf:
+ global:
+ mon warn on pool no app: false
+ ms bind msgr2: false
+- exec:
+ osd.0:
+ - ceph osd require-osd-release mimic
+ - ceph osd set-require-min-compat-client mimic
+- print: "**** done ceph"
--- /dev/null
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - fs/upgrade/volume_client
+ env:
+ ACTION: create
+- print: "**** fs/volume_client create"
+- ceph-fuse:
+ client.0:
+ mount_path: /volumes/_nogroup/vol_isolated
+ mountpoint: mnt.0
+ auth_id: vol_data_isolated
+ client.1:
+ mount_path: /volumes/_nogroup/vol_default
+ mountpoint: mnt.1
+ auth_id: vol_default
+- print: "**** ceph-fuse vol_isolated"
+- workunit:
+ clients:
+ client.0:
+ - fs/upgrade/volume_client
+ env:
+ ACTION: populate
+ cleanup: false
+- workunit:
+ clients:
+ client.1:
+ - fs/upgrade/volume_client
+ env:
+ ACTION: populate
+ cleanup: false
+- print: "**** fs/volume_client populate"
--- /dev/null
+overrides:
+ ceph:
+ mon_bind_msgr2: false
+ mon_bind_addrvec: false
+ log-whitelist:
+ - scrub mismatch
+ - ScrubResult
+ - wrongly marked
+ - \(POOL_APP_NOT_ENABLED\)
+ - \(SLOW_OPS\)
+ - overall HEALTH_
+ - \(MON_MSGR2_NOT_ENABLED\)
+ - slow request
+ conf:
+ global:
+ bluestore warn on legacy statfs: false
+ bluestore warn on no per pool omap: false
+ mon:
+ mon warn on osd down out interval zero: false
+
+tasks:
+- mds_pre_upgrade:
+- print: "**** done mds pre-upgrade sequence"
+- install.upgrade:
+ mon.a:
+- print: "**** done install.upgrade both hosts"
+- ceph.restart:
+ daemons: [mon.*, mgr.*]
+ mon-health-to-clog: false
+ wait-for-healthy: false
+- exec:
+ mon.a:
+ - ceph config set global mon_warn_on_msgr2_not_enabled false
+- ceph.healthy:
+- ceph.restart:
+ daemons: [osd.*]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+- ceph.stop: [mds.*]
+- ceph.restart:
+ daemons: [mds.*]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+- exec:
+ mon.a:
+ - ceph mon enable-msgr2
+ - ceph versions
+ - ceph osd dump -f json-pretty
+ - ceph config rm global mon_warn_on_msgr2_not_enabled
+ - ceph osd require-osd-release nautilus
+ - for f in `ceph osd pool ls` ; do ceph osd pool set $f pg_autoscale_mode off ; done
+ #- ceph osd set-require-min-compat-client nautilus
+- ceph.healthy:
+- print: "**** done ceph.restart"
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - missing required features
+tasks:
+- exec:
+ mon.a:
+ - ceph fs dump --format=json-pretty
+ - ceph fs volume ls
+ - ceph fs subvolume ls cephfs
+- workunit:
+ clients:
+ client.0:
+ - fs/upgrade/volume_client
+ env:
+ ACTION: verify
+ cleanup: false
+- workunit:
+ clients:
+ client.1:
+ - fs/upgrade/volume_client
+ env:
+ ACTION: verify
+ cleanup: false
+- print: "**** fs/volume_client verify"
--- /dev/null
+.qa/distros/all/ubuntu_18.04.yaml
\ No newline at end of file
if client_config is None:
client_config = {}
+ auth_id = client_config.get("auth_id", id_)
+
skip = client_config.get("skip", False)
if skip:
skipped[id_] = skip
continue
if id_ not in all_mounts:
- fuse_mount = FuseMount(ctx, client_config, testdir, id_, remote)
+ fuse_mount = FuseMount(ctx, client_config, testdir, auth_id, remote)
all_mounts[id_] = fuse_mount
else:
# Catch bad configs where someone has e.g. tried to use ceph-fuse and kcephfs for the same client
assert isinstance(all_mounts[id_], FuseMount)
if not config.get("disabled", False) and client_config.get('mounted', True):
- mounted_by_me[id_] = all_mounts[id_]
+ mounted_by_me[id_] = {"config": client_config, "mount": all_mounts[id_]}
ctx.mounts = all_mounts
# Mount any clients we have been asked to (default to mount all)
log.info('Mounting ceph-fuse clients...')
- for mount in mounted_by_me.values():
- mount.mount()
+ for info in mounted_by_me.values():
+ config = info["config"]
+ mount_path = config.get("mount_path")
+ mountpoint = config.get("mountpoint")
+ info["mount"].mount(mountpoint=mountpoint, mount_path=mount_path)
- for mount in mounted_by_me.values():
- mount.wait_until_mounted()
+ for info in mounted_by_me.values():
+ info["mount"].wait_until_mounted()
# Umount any pre-existing clients that we have not been asked to mount
for client_id in set(all_mounts.keys()) - set(mounted_by_me.keys()) - set(skipped.keys()):
finally:
log.info('Unmounting ceph-fuse clients...')
- for mount in mounted_by_me.values():
+ for info in mounted_by_me.values():
# Conditional because an inner context might have umounted it
+ mount = info["mount"]
if mount.is_mounted():
mount.umount_wait()
'--',
self.mountpoint,
],
- timeout=(15*60)
+ timeout=(15*60),
+ cwd=self.test_dir
)
run_cmd = [
self.mountpoint,
]
+ cwd = self.test_dir
if self.client_config.get('valgrind') is not None:
run_cmd = misc.get_valgrind_args(
self.test_dir,
run_cmd,
self.client_config.get('valgrind'),
)
+ cwd = None # misc.get_valgrind_args chdir for us
run_cmd.extend(fuse_cmd)
proc = self.client_remote.run(
args=run_cmd,
+ cwd=cwd,
logger=log.getChild('ceph-fuse.{id}'.format(id=self.client_id)),
stdin=run.PIPE,
wait=False,
'--',
self.mountpoint,
],
+ cwd=self.test_dir,
stdout=StringIO(),
stderr=StringIO(),
wait=False,
# unrestricted access to the filesystem mount.
try:
stderr = StringIO()
- self.client_remote.run(args=['sudo', 'chmod', '1777', self.mountpoint], timeout=(15*60), stderr=stderr)
+ self.client_remote.run(args=['sudo', 'chmod', '1777', self.mountpoint], timeout=(15*60), cwd=self.test_dir, stderr=stderr)
except run.CommandFailedError:
stderr = stderr.getvalue()
if "Read-only file system".lower() in stderr.lower():
raise
def _mountpoint_exists(self):
- return self.client_remote.run(args=["ls", "-d", self.mountpoint], check_status=False, timeout=(15*60)).exitstatus == 0
+ return self.client_remote.run(args=["ls", "-d", self.mountpoint], check_status=False, cwd=self.test_dir, timeout=(15*60)).exitstatus == 0
def umount(self):
try:
'-u',
self.mountpoint,
],
+ cwd=self.test_dir,
timeout=(30*60),
)
except run.CommandFailedError:
'--',
self.mountpoint,
],
+ cwd=self.test_dir,
stderr=stderr,
timeout=(60*5)
)
'-rf',
self.mountpoint,
],
+ cwd=self.test_dir,
timeout=(60*5)
)
--- /dev/null
+#!/bin/bash
+
+set -ex
+
+PYTHON="python2"
+
+function run_payload {
+ local payload="$1"
+ sudo "$PYTHON" <<EOF
+from __future__ import print_function
+from ceph_volume_client import CephFSVolumeClient, VolumePath
+from sys import version_info as sys_version_info
+from rados import OSError as rados_OSError
+import logging
+log = logging.getLogger("ceph_volume_client")
+log.addHandler(logging.StreamHandler())
+log.setLevel(logging.DEBUG)
+vc = CephFSVolumeClient("manila", "/etc/ceph/ceph.conf", "ceph")
+vc.connect()
+${payload}
+vc.disconnect()
+EOF
+}
+
+function import_key {
+ local client="$1"
+ if [ -n "$2" ]; then
+ local keyring="$2"
+ else
+ local keyring="/etc/ceph/ceph.client.${client}.keyring"
+ fi
+ local T=$(mktemp)
+ tee "$T" >&2
+ sudo touch -- "$keyring"
+ sudo ceph-authtool "$keyring" --import-keyring "$T"
+ rm -f -- "$T"
+}
+
+function conf_keys {
+ local client="$1"
+ ls /etc/ceph >&2
+ ceph auth get-or-create "client.manila" mds 'allow *' osd 'allow rw' mon 'allow *' | import_key "$client" /etc/ceph/ceph.keyring
+}
+
+function create_data_isolated {
+ local PAYLOAD='
+vp = VolumePath(None, "vol_isolated")
+vc.create_volume(vp, (1<<33), data_isolated=True)
+auth_result = vc.authorize(vp, "vol_data_isolated", tenant_id="test")
+print("[client.vol_data_isolated]\n\tkey = ", auth_result["auth_key"])
+'
+
+ run_payload "$PAYLOAD" | import_key "vol_data_isolated"
+}
+
+function create_default {
+ local PAYLOAD='
+vp = VolumePath(None, "vol_default")
+vc.create_volume(vp, (1<<33))
+auth_result = vc.authorize(vp, "vol_default", tenant_id="test")
+print("[client.vol_default]\n\tkey = ", auth_result["auth_key"])
+'
+ run_payload "$PAYLOAD" | import_key "vol_default"
+}
+
+function create {
+ create_data_isolated
+ create_default
+}
+
+function populate {
+ pwd
+ df -h .
+ ls -l
+ cp -a /usr/bin .
+}
+
+function verify_data_isolated {
+ ceph fs subvolume getpath cephfs vol_isolated
+ stat bin
+ ls bin | tail
+}
+
+function verify_default {
+ ceph fs subvolume getpath cephfs vol_default
+ stat bin
+ ls bin | tail
+}
+
+function verify {
+ diff <(ceph fs subvolume ls cephfs | jq -cS 'sort_by(.name)' | tee /dev/stderr) <(printf '[{"name":"vol_isolated"},{"name":"vol_default"}]' | jq -cS 'sort_by(.name)')
+ verify_data_isolated
+ verify_default
+}
+
+function main {
+ if [ "$1" = create ]; then
+ conf_keys
+ create
+ elif [ "$1" = populate ]; then
+ populate
+ elif [ "$1" = verify ]; then
+ # verify (sub)volumes still exist and are configured correctly
+ verify
+ else
+ exit 1
+ fi
+}
+
+main "$ACTION"