]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
tests: cleanup: drop upgrade/jewel-x/point-to-point-x 20641/head
authorNathan Cutler <ncutler@suse.com>
Thu, 1 Feb 2018 22:53:29 +0000 (23:53 +0100)
committerNathan Cutler <ncutler@suse.com>
Wed, 28 Feb 2018 11:55:40 +0000 (12:55 +0100)
This subsuite is only for testing upgrades from one jewel
point release to another. In luminous and master it serves no
useful purpose.

Fixes: http://tracker.ceph.com/issues/22888
Signed-off-by: Nathan Cutler <ncutler@suse.com>
(cherry picked from commit bc9b2e6c898d495b4eb3304b6128dd1292a212cc)

Conflicts:
    qa/suites/upgrade/jewel-x/point-to-point-x/point-to-point-upgrade.yaml
       This file has different content than in master, due to the following
       three luminous-only fixes:
           61e4de133d9fa1eddcd858152b7828515e0b280f
           e50af8bb04bd37bd0e28ddc7935e29bcdc106b05
           ee02b5c037c67c708335745898328e990ecacc90

qa/suites/upgrade/jewel-x/point-to-point-x/% [deleted file]
qa/suites/upgrade/jewel-x/point-to-point-x/distros/centos_7.3.yaml [deleted symlink]
qa/suites/upgrade/jewel-x/point-to-point-x/distros/ubuntu_14.04.yaml [deleted symlink]
qa/suites/upgrade/jewel-x/point-to-point-x/point-to-point-upgrade.yaml [deleted file]

diff --git a/qa/suites/upgrade/jewel-x/point-to-point-x/% b/qa/suites/upgrade/jewel-x/point-to-point-x/%
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/qa/suites/upgrade/jewel-x/point-to-point-x/distros/centos_7.3.yaml b/qa/suites/upgrade/jewel-x/point-to-point-x/distros/centos_7.3.yaml
deleted file mode 120000 (symlink)
index c79327b..0000000
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../distros/all/centos_7.3.yaml
\ No newline at end of file
diff --git a/qa/suites/upgrade/jewel-x/point-to-point-x/distros/ubuntu_14.04.yaml b/qa/suites/upgrade/jewel-x/point-to-point-x/distros/ubuntu_14.04.yaml
deleted file mode 120000 (symlink)
index 6237042..0000000
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../distros/all/ubuntu_14.04.yaml
\ No newline at end of file
diff --git a/qa/suites/upgrade/jewel-x/point-to-point-x/point-to-point-upgrade.yaml b/qa/suites/upgrade/jewel-x/point-to-point-x/point-to-point-upgrade.yaml
deleted file mode 100644 (file)
index d68c258..0000000
+++ /dev/null
@@ -1,236 +0,0 @@
-meta:
-- desc: |
-   Run ceph on two nodes, using one of them as a client,
-   with a separate client-only node. 
-   Use xfs beneath the osds.
-   install ceph/jewel v10.2.0 point version
-   run workload and upgrade-sequence in parallel
-   install ceph/jewel latest version
-   run workload and upgrade-sequence in parallel
-   install ceph/-x version (jewel or kraken)
-   run workload and upgrade-sequence in parallel
-overrides:
-  ceph:
-    log-whitelist:
-    - reached quota
-    - scrub
-    - osd_map_max_advance
-    - wrongly marked
-    - overall HEALTH_
-    - \(MGR_DOWN\)
-    - \(OSD_
-    - \(PG_
-    - \(CACHE_
-    fs: xfs
-    conf:
-      global:
-        mon warn on pool no app: false
-      mon:
-        mon debug unsafe allow tier with nonempty snaps: true
-      osd:
-        osd map max advance: 1000
-        osd map cache size: 1100
-roles:
-- - mon.a
-  - mds.a
-  - osd.0
-  - osd.1
-  - osd.2
-  - mgr.x
-- - mon.b
-  - mon.c
-  - osd.3
-  - osd.4
-  - osd.5
-  - client.0
-- - client.1
-openstack:
-- volumes: # attached to each instance
-    count: 3
-    size: 30 # GB
-tasks:
-- print: "****  v10.2.0 about to install"
-- install:
-    tag: v10.2.0
-    exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev', 'librgw2']
-- print: "**** done v10.2.0 install"
-- ceph:
-   fs: xfs
-   skip_mgr_daemons: true
-   add_osds_to_crush: true
-- print: "**** done ceph xfs"
-- sequential:
-   - workload
-- print: "**** done workload v10.2.0"
-- install.upgrade:
-    exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
-    mon.a:
-      branch: jewel
-    mon.b:
-      branch: jewel
-    # Note that client.a IS NOT upgraded at this point
-    #client.1:
-      #branch: jewel
-- parallel:
-   - workload_jewel
-   - upgrade-sequence_jewel
-- print: "**** done parallel jewel branch"
-- install.upgrade:
-    exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
-    client.1:
-      branch: jewel
-- print: "**** done branch: jewel install.upgrade on client.1"
-- install.upgrade:
-    mon.a:
-    mon.b:
-- print: "**** done branch: -x install.upgrade on mon.a and mon.b"
-- parallel:
-   - workload_x
-   - upgrade-sequence_x
-- print: "**** done parallel -x branch"
-- exec:
-    osd.0:
-      - ceph osd set-require-min-compat-client luminous
-# Run librados tests on the -x upgraded cluster
-- install.upgrade:
-    client.1:
-- workunit:
-    branch: jewel
-    clients:
-      client.1:
-      - rados/test-upgrade-v11.0.0.sh
-      - cls
-- print: "**** done final test on -x cluster"
-#######################
-workload:
-   sequential:
-   - workunit:
-       clients:
-         client.0:
-           - suites/blogbench.sh
-workload_jewel:
-   full_sequential:
-   - workunit:
-       branch: jewel
-       clients:
-         client.1:
-         - rados/test.sh
-         - cls
-       env:
-         CLS_RBD_GTEST_FILTER: '*:-TestClsRbd.mirror_image'
-   - print: "**** done rados/test.sh &  cls workload_jewel"
-   - sequential:
-     - rgw: [client.0]
-     - print: "**** done rgw workload_jewel"
-     - s3tests:
-         client.0:
-           force-branch: ceph-jewel
-           rgw_server: client.0
-           scan_for_encryption_keys: false
-     - print: "**** done s3tests workload_jewel"
-upgrade-sequence_jewel:
-   sequential:
-   - print: "**** done branch: jewel install.upgrade"
-   - ceph.restart: [mds.a]
-   - sleep:
-       duration: 60
-   - ceph.restart: [osd.0]
-   - sleep:
-       duration: 30
-   - ceph.restart: [osd.1]
-   - sleep:
-       duration: 30
-   - ceph.restart: [osd.2]
-   - sleep:
-       duration: 30
-   - ceph.restart: [osd.3]
-   - sleep:
-       duration: 30
-   - ceph.restart: [osd.4]
-   - sleep:
-       duration: 30
-   - ceph.restart: [osd.5]
-   - sleep:
-       duration: 60
-   - ceph.restart: [mon.a]
-   - sleep:
-       duration: 60
-   - ceph.restart: [mon.b]
-   - sleep:
-       duration: 60
-   - ceph.restart: [mon.c]
-   - sleep:
-       duration: 60
-   - print: "**** done ceph.restart all jewel branch mds/osd/mon"
-workload_x:
-   sequential:
-   - workunit:
-       branch: jewel
-       clients:
-         client.1:
-         - rados/test-upgrade-v11.0.0-noec.sh
-         - cls
-       env:
-         CLS_RBD_GTEST_FILTER: '*:-TestClsRbd.mirror_image'
-   - print: "**** done rados/test-upgrade-v11.0.0.sh &  cls workload_x NOT upgraded  client"
-   - workunit:
-       branch: jewel
-       clients:
-         client.0:
-         - rados/test-upgrade-v11.0.0-noec.sh
-         - cls
-   - print: "**** done rados/test-upgrade-v11.0.0.sh &  cls workload_x upgraded client"
-   - rgw: [client.1]
-   - print: "**** done rgw workload_x"
-   - s3tests:
-       client.1:
-         force-branch: ceph-jewel
-         rgw_server: client.1
-         scan_for_encryption_keys: false
-   - print: "**** done s3tests workload_x"
-upgrade-sequence_x:
-   sequential:
-   - ceph.restart: [mds.a]
-   - sleep:
-       duration: 60
-   - ceph.restart: [mon.a]
-   - sleep:
-       duration: 60
-   - ceph.restart: [mon.b]
-   - sleep:
-       duration: 60
-   - ceph.restart: [mon.c]
-   - sleep:
-       duration: 60
-   - ceph.restart: [osd.0]
-   - sleep:
-       duration: 30
-   - ceph.restart: [osd.1]
-   - sleep:
-       duration: 30
-   - ceph.restart: [osd.2]
-   - sleep:
-       duration: 30
-   - ceph.restart: [osd.3]
-   - sleep:
-       duration: 30
-   - ceph.restart: [osd.4]
-   - sleep:
-       duration: 30
-   - ceph.restart:
-       daemons: [osd.5]
-       wait-for-healthy: false
-       wait-for-up-osds: true
-   - exec:
-      mgr.x:
-        - mkdir -p /var/lib/ceph/mgr/ceph-x
-        - ceph auth get-or-create-key mgr.x mon 'allow profile mgr'
-        - ceph auth export mgr.x > /var/lib/ceph/mgr/ceph-x/keyring
-   - ceph.restart:
-       daemons: [mgr.x]
-       wait-for-healthy: false
-   - exec:
-       osd.0:
-         - ceph osd require-osd-release luminous
-   - ceph.healthy:
-   - print: "**** done ceph.restart all -x branch mds/osd/mon"