20 01 * * 3,4 CEPH_BRANCH=master; MACHINE_NAME=smithi; /home/teuthology/bin/cron_wrapper teuthology-suite -v -c $CEPH_BRANCH -n 7 -m $MACHINE_NAME -s upgrade-clients/client-upgrade-octopus-pacific -k distro -e $CEPH_QA_EMAIL --suite-branch octopus
+22 14 * * 3,4 CEPH_BRANCH=master; MACHINE_NAME=smithi; /home/teuthology/bin/cron_wrapper teuthology-suite -v -c $CEPH_BRANCH -k distro -n 7 -m $MACHINE_NAME -s upgrade/octopus-x -e $CEPH_QA_EMAIL -p 70 --force-priority
+
--- /dev/null
+roles:
+- - mon.a
+ - mon.c
+ - mgr.y
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+ - client.0
+ - node-exporter.a
+ - alertmanager.a
+- - mon.b
+ - mgr.x
+ - osd.4
+ - osd.5
+ - osd.6
+ - osd.7
+ - client.1
+ - prometheus.a
+ - grafana.a
+ - node-exporter.b
+openstack:
+- volumes: # attached to each instance
+ count: 4
+ size: 10 # GB
+overrides:
+ ceph:
+ conf:
+ osd:
+ osd shutdown pgref assert: true
+++ /dev/null
-tasks:
-- install:
- branch: octopus
-- print: "**** done install task..."
-- print: "**** done start installing octopus cephadm ..."
-- cephadm:
- #image: docker.io/ceph/ceph:v15.2.0
- #cephadm_branch: v15.2.0
- image: docker.io/ceph/daemon-base:latest-octopus
- cephadm_branch: octopus
- conf:
- osd:
- #set config option for which cls modules are allowed to be loaded / used
- osd_class_load_list: "*"
- osd_class_default_list: "*"
-- print: "**** done end installing octopus cephadm ..."
-
-- print: "**** done start cephadm.shell ceph config set mgr..."
-- cephadm.shell:
- mon.a:
- - ceph config set mgr mgr/cephadm/use_repo_digest true --force
-- print: "**** done cephadm.shell ceph config set mgr..."
-
-- print: "**** done start parallel"
-- parallel:
- - workload
- - upgrade-sequence
-- print: "**** done end parallel"
--- /dev/null
+tasks:
+- install:
+ branch: octopus
+- print: "**** done install task..."
+- print: "**** done start installing octopus cephadm ..."
+- cephadm:
+ image: docker.io/ceph/daemon-base:latest-octopus
+ cephadm_branch: octopus
+ conf:
+ osd:
+ #set config option for which cls modules are allowed to be loaded / used
+ osd_class_load_list: "*"
+ osd_class_default_list: "*"
+- print: "**** done end installing octopus cephadm ..."
+
+- print: "**** done start cephadm.shell ceph config set mgr..."
+- cephadm.shell:
+ mon.a:
+ - ceph config set mgr mgr/cephadm/use_repo_digest true --force
+- print: "**** done cephadm.shell ceph config set mgr..."
+
+- print: "**** done start parallel"
+- parallel:
+ - workload
+ - upgrade-sequence
+- print: "**** done end parallel"
+++ /dev/null
-# renamed tasks: to upgrade-sequence:
-upgrade-sequence:
- sequential:
- - print: "**** done start upgrade"
- - cephadm.shell:
- env: [sha1]
- mon.a:
- - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1
- - print: "**** done end upgrade"
+++ /dev/null
-tasks:
-- print: "**** done start wait..."
-- cephadm.shell:
- env: [sha1]
- mon.a:
- - while ceph orch upgrade status | jq '.in_progress' | grep true ; do ceph orch ps ; ceph versions ; sleep 30 ; done
- - ceph orch ps
- - ceph versions
- - ceph versions | jq -e '.overall | length == 1'
- - ceph versions | jq -e '.overall | keys' | grep $sha1
-- print: "**** done end wait..."
+++ /dev/null
-roles:
-- - mon.a
- - mon.c
- - mgr.y
- - osd.0
- - osd.1
- - osd.2
- - osd.3
- - client.0
-# - ceph.rgw.realm.zone.a # Disabled, needs 15.2.4 as an upgrade start
- - node-exporter.a
- - alertmanager.a
-- - mon.b
- - mgr.x
- - mgr.z
- - osd.4
- - osd.5
- - osd.6
- - osd.7
- - client.1
- - prometheus.a
- - grafana.a
- - node-exporter.b
-openstack:
-- volumes: # attached to each instance
- count: 4
- size: 10 # GB
-overrides:
- ceph:
- conf:
- osd:
- osd shutdown pgref assert: true
--- /dev/null
+# renamed tasks: to upgrade-sequence:
+upgrade-sequence:
+ sequential:
+ - print: "**** done start upgrade, wait"
+ - cephadm.shell:
+ env: [sha1]
+ mon.a:
+ - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1
+ - while ceph orch upgrade status | jq '.in_progress' | grep true ; do ceph orch ps ; ceph versions ; sleep 30 ; done
+ - ceph orch ps
+ - ceph versions
+ - ceph versions | jq -e '.overall | length == 1'
+ - ceph versions | jq -e '.overall | keys' | grep $sha1
+ - print: "**** done end upgrade, wait..."
+
-#meta:
-#- desc: |
-# librbd C and C++ api tests
-#workload:
-# full_sequential:
-# - print: "**** done start rbd/test_librbd.sh"
-# - workunit:
-# branch: octopus
-# clients:
-# client.0:
-# - rbd/test_librbd.sh
-# - print: "**** done end rbd/test_librbd.sh"
+meta:
+- desc: |
+ librbd C and C++ api tests
+workload:
+ full_sequential:
+ - print: "**** done start rbd/test_librbd.sh"
+ - workunit:
+ branch: octopus
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
+ - print: "**** done end rbd/test_librbd.sh"
for mgr in [r for r in roles
if teuthology.is_type('mgr', cluster_name)(r)]:
c_, _, id_ = teuthology.split_role(mgr)
- if c_ == cluster_name and id_ == ctx.ceph[cluster_name].first_mgr:
- continue
log.info('Adding %s on %s' % (mgr, remote.shortname))
nodes.append(remote.shortname + '=' + id_)
+ if c_ == cluster_name and id_ == ctx.ceph[cluster_name].first_mgr:
+ continue
daemons[mgr] = (remote, id_)
if nodes:
_shell(ctx, cluster_name, remote, [