+++ /dev/null
-#
-# Test the expected behavior of the
-#
-# CEPH_FEATURE_ERASURE_CODE_PLUGINS_V3
-#
-# feature.
-#
-roles:
-- - mon.a
- - mon.b
- - osd.0
- - osd.1
-- - osd.2
- - mon.c
- - mgr.x
-tasks:
-#
-# Install hammer
-#
-- install:
- branch: hammer
-- ceph:
- fs: xfs
-#
-# We don't need mon.c for now: it will be used later to make sure an old
-# mon cannot join the quorum once the feature has been activated
-#
-- ceph.stop:
- daemons: [mon.c]
-- exec:
- mon.a:
- - |-
- ceph osd erasure-code-profile set WRONG plugin=WRONG
- ceph osd pool create poolWRONG 12 12 erasure WRONG 2>&1 | grep "failed to load plugin using profile WRONG"
-#
-# Partial upgrade, osd.2 is not upgraded
-#
-- install.upgrade:
- osd.0:
-#
-# a is the leader
-#
-- ceph.restart:
- daemons: [mon.a]
- wait-for-healthy: false
-- exec:
- mon.a:
- - |-
- ceph osd erasure-code-profile set profile-shec k=2 m=1 c=1 plugin=shec 2>&1 | grep "unsupported by: the monitor cluster"
-- ceph.restart:
- daemons: [mon.b, osd.1, osd.0]
- wait-for-healthy: false
- wait-for-osds-up: true
-#
-# The shec plugin cannot be used because osd.2 is not upgraded yet
-# and would crash.
-#
-- exec:
- mon.a:
- - |-
- ceph osd erasure-code-profile set profile-shec k=2 m=1 c=1 plugin=shec 2>&1 | grep "unsupported by: osd.2"
-#
-# Taking osd.2 out, the rest of the cluster is upgraded
-#
-- ceph.stop:
- daemons: [osd.2]
-- sleep:
- duration: 60
-#
-# Creating an erasure code profile using the shec plugin now works
-#
-- exec:
- mon.a:
- - "ceph osd erasure-code-profile set profile-shec k=2 m=1 c=1 plugin=shec"
-#
-# osd.2 won't be able to join the because is does not support the feature
-#
-- ceph.restart:
- daemons: [osd.2]
- wait-for-healthy: false
-- sleep:
- duration: 60
-- exec:
- osd.2:
- - |-
- grep "protocol feature.*missing" /var/log/ceph/ceph-osd.2.log
-#
-# mon.c won't be able to join the because it does not support the feature
-#
-- ceph.restart:
- daemons: [mon.c]
- wait-for-healthy: false
-- sleep:
- duration: 60
-- exec:
- mon.c:
- - |-
- grep "missing.*feature" /var/log/ceph/ceph-mon.c.log
if [ $2 = "master" ] ; then
# run master branch with --newest option looking for good sha1 7 builds back
teuthology-suite -v -c $2 -m $3 -k distro -s rados --subset $(echo "(($(date +%U) % 4) * 7) + $1" | bc)/28 --newest 7 -e $4 ~/vps.yaml $5
-elif [ $2 = "hammer" ] ; then
- # run hammer branch with less jobs
- teuthology-suite -v -c $2 -m $3 -k distro -s rados --subset $(echo "(($(date +%U) % 4) * 7) + $1" | bc)/56 -e $4 ~/vps.yaml $5
elif [ $2 = "jewel" ] ; then
# run jewel branch with /40 jobs
teuthology-suite -v -c $2 -m $3 -k distro -s rados --subset $(echo "(($(date +%U) % 4) * 7) + $1" | bc)/40 -e $4 ~/vps.yaml $5
echo "Scheduling " $2 " branch"
if [ $2 = "master" ] ; then
- # run master branch with --newest option looking for good sha1 7 builds back with /999 jobs
- teuthology-suite -v -c $2 -m $3 -k $6 -s $4 --subset $(echo "(($(date +%U) % 4) * 7) + $1" | bc)/999 --newest 7 -e $5 $7
-elif [ $2 = "hammer" ] ; then
- # run hammer branch with less jobs
- teuthology-suite -v -c $2 -m $3 -k $6 -s $4 --subset $(echo "(($(date +%U) % 4) * 7) + $1" | bc)/56 -e $5 $7
+ # run master branch with --newest option looking for good sha1 7 builds back with /100000 jobs
+ # using '-p 80 --force-priority' as an execption ATM
+ teuthology-suite -v -c $2 -m $3 -k $6 -s $4 --subset $(echo "(($(date +%U) % 4) * 7) + $1" | bc)/100000 --newest 7 -e $5 $7 -p 80 --force-priority
elif [ $2 = "jewel" ] ; then
# run jewel branch with /40 jobs
teuthology-suite -v -c $2 -m $3 -k $6 -s $4 --subset $(echo "(($(date +%U) % 4) * 7) + $1" | bc)/40 -e $5 $7
+++ /dev/null
-roles:
- - [mon.a, mgr.x, client.0]
-tasks:
- - install:
- # branch has precedence over sha1
- branch: hammer
- sha1: e5b6eea91cc37434f78a987d2dd1d3edd4a23f3f # dumpling
- - exec:
- client.0:
- - ceph --version | grep 'version 0.94'
)
mnt_point = DATA_PATH.format(
type_='osd', cluster=cluster_name, id_=id_)
- try:
- remote.run(args=[
- 'sudo', 'chown', '-R', 'ceph:ceph', mnt_point
- ])
- except run.CommandFailedError as e:
- # hammer does not have ceph user, so ignore this error
- log.info('ignoring error when chown ceph:ceph,'
- 'probably installing hammer: %s', e)
+ remote.run(args=[
+ 'sudo', 'chown', '-R', 'ceph:ceph', mnt_point
+ ])
log.info('Reading keys from all nodes...')
keys_fp = BytesIO()
'--keyring', keyring_path,
],
)
- try:
- remote.run(args=[
- 'sudo', 'chown', '-R', 'ceph:ceph', mnt_point
- ])
- except run.CommandFailedError as e:
- # hammer does not have ceph user, so ignore this error
- log.info('ignoring error when chown ceph:ceph,'
- 'probably installing hammer: %s', e)
+ remote.run(args=[
+ 'sudo', 'chown', '-R', 'ceph:ceph', mnt_point
+ ])
run.wait(
mons.run(
daemon_signal = 'term'
# create osds in order. (this only matters for pre-luminous, which might
- # be hammer, which doesn't take an id_ argument to legacy 'osd create').
+ # be jewel/hammer, which doesn't take an id_ argument to legacy 'osd create').
osd_uuids = {}
for remote, roles_for_host in daemons.remotes.items():
is_type_ = teuthology.is_type(type_, cluster_name)
]
)
except:
- # fallback to pre-luminous (hammer or jewel)
+ # fallback to pre-luminous (jewel)
remote.run(
args=[
'sudo', 'ceph', '--cluster', cluster_name,