]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
qa/tasks/cephadm: include first mgr host when applying mgrs 38707/head
authorJosh Durgin <jdurgin@redhat.com>
Wed, 23 Dec 2020 00:25:48 +0000 (19:25 -0500)
committerYuri Weinstein <yweinste@redhat.com>
Wed, 23 Dec 2020 19:30:54 +0000 (11:30 -0800)
This prevents the first mgr from being shut down due to lack of
appropriate placements.

Signed-off-by: Josh Durgin <jdurgin@redhat.com>
Signed-off-by: Yuri Weinstein <yweinste@redhat.com>
qa/crontab/teuthology-cronjobs
qa/suites/upgrade/octopus-x/0-start.yaml [new file with mode: 0644]
qa/suites/upgrade/octopus-x/1-start.yaml [deleted file]
qa/suites/upgrade/octopus-x/1-tasks.yaml [new file with mode: 0644]
qa/suites/upgrade/octopus-x/3-start-upgrade.yaml [deleted file]
qa/suites/upgrade/octopus-x/4-wait.yaml [deleted file]
qa/suites/upgrade/octopus-x/fixed-2.yaml [deleted file]
qa/suites/upgrade/octopus-x/upgrade-sequence.yaml [new file with mode: 0644]
qa/suites/upgrade/octopus-x/workload/test_rbd_api.yaml
qa/tasks/cephadm.py

index 59b2a153f6fd2453e24d55cb9f7ad6a7516b756f..36406cdda06aa801f5bfd4eec7c68db5cead55c0 100644 (file)
@@ -186,3 +186,5 @@ CEPH_QA_EMAIL="ceph-qa@ceph.io"
 
 20 01 * * 3,4  CEPH_BRANCH=master; MACHINE_NAME=smithi; /home/teuthology/bin/cron_wrapper teuthology-suite -v -c $CEPH_BRANCH  -n 7 -m $MACHINE_NAME -s upgrade-clients/client-upgrade-octopus-pacific -k distro -e $CEPH_QA_EMAIL --suite-branch octopus
 
+22 14 * * 3,4 CEPH_BRANCH=master; MACHINE_NAME=smithi; /home/teuthology/bin/cron_wrapper teuthology-suite -v -c   $CEPH_BRANCH -k distro -n 7 -m $MACHINE_NAME -s upgrade/octopus-x -e $CEPH_QA_EMAIL -p 70 --force-priority
+
diff --git a/qa/suites/upgrade/octopus-x/0-start.yaml b/qa/suites/upgrade/octopus-x/0-start.yaml
new file mode 100644 (file)
index 0000000..3870530
--- /dev/null
@@ -0,0 +1,30 @@
+roles:
+- - mon.a
+  - mon.c
+  - mgr.y
+  - osd.0
+  - osd.1
+  - osd.2
+  - osd.3
+  - client.0
+  - node-exporter.a
+  - alertmanager.a
+- - mon.b
+  - mgr.x
+  - osd.4
+  - osd.5
+  - osd.6
+  - osd.7
+  - client.1
+  - prometheus.a
+  - grafana.a
+  - node-exporter.b
+openstack:
+- volumes: # attached to each instance
+    count: 4
+    size: 10 # GB
+overrides:
+  ceph:
+    conf:
+      osd:
+        osd shutdown pgref assert: true
diff --git a/qa/suites/upgrade/octopus-x/1-start.yaml b/qa/suites/upgrade/octopus-x/1-start.yaml
deleted file mode 100644 (file)
index 5e71207..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-tasks:
-- install:
-    branch: octopus
-- print: "**** done install task..."
-- print: "**** done start installing octopus cephadm ..."
-- cephadm:
-    #image: docker.io/ceph/ceph:v15.2.0
-    #cephadm_branch: v15.2.0
-    image: docker.io/ceph/daemon-base:latest-octopus
-    cephadm_branch: octopus
-    conf:
-      osd:
-        #set config option for which cls modules are allowed to be loaded / used
-        osd_class_load_list: "*"
-        osd_class_default_list: "*"
-- print: "**** done end installing octopus cephadm ..."
-
-- print: "**** done start cephadm.shell ceph config set mgr..."
-- cephadm.shell:
-    mon.a:
-      - ceph config set mgr mgr/cephadm/use_repo_digest true --force
-- print: "**** done cephadm.shell ceph config set mgr..."
-
-- print: "**** done start parallel"
-- parallel:
-    - workload
-    - upgrade-sequence
-- print: "**** done end parallel"
diff --git a/qa/suites/upgrade/octopus-x/1-tasks.yaml b/qa/suites/upgrade/octopus-x/1-tasks.yaml
new file mode 100644 (file)
index 0000000..c6b7b08
--- /dev/null
@@ -0,0 +1,26 @@
+tasks:
+- install:
+    branch: octopus
+- print: "**** done install task..."
+- print: "**** done start installing octopus cephadm ..."
+- cephadm:
+    image: docker.io/ceph/daemon-base:latest-octopus
+    cephadm_branch: octopus
+    conf:
+      osd:
+        #set config option for which cls modules are allowed to be loaded / used
+        osd_class_load_list: "*"
+        osd_class_default_list: "*"
+- print: "**** done end installing octopus cephadm ..."
+
+- print: "**** done start cephadm.shell ceph config set mgr..."
+- cephadm.shell:
+    mon.a:
+      - ceph config set mgr mgr/cephadm/use_repo_digest true --force
+- print: "**** done cephadm.shell ceph config set mgr..."
+
+- print: "**** done start parallel"
+- parallel:
+    - workload
+    - upgrade-sequence
+- print: "**** done end parallel"
diff --git a/qa/suites/upgrade/octopus-x/3-start-upgrade.yaml b/qa/suites/upgrade/octopus-x/3-start-upgrade.yaml
deleted file mode 100644 (file)
index d0b2532..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-# renamed tasks: to upgrade-sequence:
-upgrade-sequence:
-   sequential:
-   - print: "**** done start upgrade"
-   - cephadm.shell:
-       env: [sha1]
-       mon.a:
-         - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1
-   - print: "**** done end upgrade"
diff --git a/qa/suites/upgrade/octopus-x/4-wait.yaml b/qa/suites/upgrade/octopus-x/4-wait.yaml
deleted file mode 100644 (file)
index 8e03083..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-tasks:
-- print: "**** done start wait..."
-- cephadm.shell:
-    env: [sha1]
-    mon.a:
-      - while ceph orch upgrade status | jq '.in_progress' | grep true ; do ceph orch ps ; ceph versions ; sleep 30 ; done
-      - ceph orch ps
-      - ceph versions
-      - ceph versions | jq -e '.overall | length == 1'
-      - ceph versions | jq -e '.overall | keys' | grep $sha1
-- print: "**** done end wait..."
diff --git a/qa/suites/upgrade/octopus-x/fixed-2.yaml b/qa/suites/upgrade/octopus-x/fixed-2.yaml
deleted file mode 100644 (file)
index 5ad2eb3..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-roles:
-- - mon.a
-  - mon.c
-  - mgr.y
-  - osd.0
-  - osd.1
-  - osd.2
-  - osd.3
-  - client.0
-#   - ceph.rgw.realm.zone.a # Disabled, needs 15.2.4 as an upgrade start
-  - node-exporter.a
-  - alertmanager.a
-- - mon.b
-  - mgr.x
-  - mgr.z
-  - osd.4
-  - osd.5
-  - osd.6
-  - osd.7
-  - client.1
-  - prometheus.a
-  - grafana.a
-  - node-exporter.b
-openstack:
-- volumes: # attached to each instance
-    count: 4
-    size: 10 # GB
-overrides:
-  ceph:
-    conf:
-      osd:
-        osd shutdown pgref assert: true
diff --git a/qa/suites/upgrade/octopus-x/upgrade-sequence.yaml b/qa/suites/upgrade/octopus-x/upgrade-sequence.yaml
new file mode 100644 (file)
index 0000000..cb8cba7
--- /dev/null
@@ -0,0 +1,15 @@
+# renamed tasks: to upgrade-sequence:
+upgrade-sequence:
+   sequential:
+   - print: "**** done start upgrade, wait"
+   - cephadm.shell:
+       env: [sha1]
+       mon.a:
+         - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1
+         - while ceph orch upgrade status | jq '.in_progress' | grep true ; do ceph orch ps ; ceph versions ; sleep 30 ; done
+         - ceph orch ps
+         - ceph versions
+         - ceph versions | jq -e '.overall | length == 1'
+         - ceph versions | jq -e '.overall | keys' | grep $sha1
+   - print: "**** done end upgrade, wait..."
+
index d7d8e3451708ba774e4dfdc13d8f1428706117f3..13646f0855e0caa45f19556a3dbb75c4eb0ab9e2 100644 (file)
@@ -1,12 +1,12 @@
-#meta:
-#- desc: |
-#   librbd C and C++ api tests
-#workload:
-#  full_sequential:
-#    - print: "**** done start rbd/test_librbd.sh"
-#    - workunit:
-#        branch: octopus
-#        clients:
-#          client.0:
-#              - rbd/test_librbd.sh
-#    - print: "**** done end rbd/test_librbd.sh"
+meta:
+- desc: |
+   librbd C and C++ api tests
+workload:
+  full_sequential:
+    - print: "**** done start rbd/test_librbd.sh"
+    - workunit:
+        branch: octopus
+        clients:
+          client.0:
+              - rbd/test_librbd.sh
+    - print: "**** done end rbd/test_librbd.sh"
index 027b8dc7b3bc82a38c05fc38bdc37da5a82c0b55..e44a39af4b7aeafecd12a6c69d631dfff51f7ab4 100644 (file)
@@ -540,10 +540,10 @@ def ceph_mgrs(ctx, config):
             for mgr in [r for r in roles
                         if teuthology.is_type('mgr', cluster_name)(r)]:
                 c_, _, id_ = teuthology.split_role(mgr)
-                if c_ == cluster_name and id_ == ctx.ceph[cluster_name].first_mgr:
-                    continue
                 log.info('Adding %s on %s' % (mgr, remote.shortname))
                 nodes.append(remote.shortname + '=' + id_)
+                if c_ == cluster_name and id_ == ctx.ceph[cluster_name].first_mgr:
+                    continue
                 daemons[mgr] = (remote, id_)
         if nodes:
             _shell(ctx, cluster_name, remote, [