]> git.apps.os.sepia.ceph.com Git - ceph-ci.git/commitdiff
test: skip pool application metadata tests if OSDs not at min luminous
authorJason Dillaman <dillaman@redhat.com>
Wed, 28 Jun 2017 16:56:46 +0000 (12:56 -0400)
committerJason Dillaman <dillaman@redhat.com>
Wed, 19 Jul 2017 17:13:01 +0000 (13:13 -0400)
Signed-off-by: Jason Dillaman <dillaman@redhat.com>
17 files changed:
qa/tasks/ceph_manager.py
qa/tasks/reg11184.py
qa/workunits/rbd/verify_pool.sh
src/test/librados/aio.cc
src/test/librados/misc.cc
src/test/librados/test.cc
src/test/librados/tier.cc
src/test/librbd/fsx.cc
src/test/librbd/test_support.cc
src/test/multi_stress_watch.cc
src/test/pybind/test_rados.py
src/test/rbd_mirror/test_ClusterWatcher.cc
src/test/rbd_mirror/test_ImageReplayer.cc
src/test/rbd_mirror/test_PoolWatcher.cc
src/test/rbd_mirror/test_fixture.cc
src/test/system/rados_open_pools_parallel.cc
src/test/test_mutate.cc

index 964dc73440fff8f27520b10e93286eac9d0cce08..d541aeacccc25f3fcfa1b9c064d6a0863fd442a2 100644 (file)
@@ -1620,6 +1620,10 @@ class CephManager:
                     'osd', 'pool', 'set', pool_name,
                     'allow_ec_overwrites',
                     'true')
+            self.raw_cluster_cmd(
+                'osd', 'pool', 'application', 'enable',
+                pool_name, 'rados', '--yes-i-really-mean-it',
+                run.Raw('||'), 'true')
             self.pools[pool_name] = pg_num
         time.sleep(1)
 
index b0c6dc11a17ef39bda92e8ae7d71a9d5a9ed21f4..89edf33ae9ba96670d7aa28f02d66180d7b867a4 100644 (file)
@@ -55,6 +55,9 @@ def task(ctx, config):
     # create 1 pg pool
     log.info('creating foo')
     manager.raw_cluster_cmd('osd', 'pool', 'create', 'foo', '1')
+    manager.raw_cluster_cmd(
+        'osd', 'pool', 'application', 'enable',
+        pool_name, 'rados', run.Raw('||'), 'true')
 
     # Remove extra pool to simlify log output
     manager.raw_cluster_cmd('osd', 'pool', 'delete', 'rbd', 'rbd', '--yes-i-really-really-mean-it')
index 48d069160d3037c1fee557a836fcf49c49ba63e6..f008fb6b3cfb34a0a1193c349278db7d66b7a501 100755 (executable)
@@ -11,7 +11,7 @@ set_up () {
   tear_down
   ceph osd pool create $POOL_NAME $PG_NUM
   ceph osd pool mksnap $POOL_NAME snap
-  rbd pool init images
+  rbd pool init $POOL_NAME
 }
 
 trap tear_down EXIT HUP INT
index 22ed976995e8709aa8e7734f4449bf80192d5f9b..ff5064ba353e1244b8112c07cef680ebcbe3af43 100644 (file)
@@ -205,6 +205,7 @@ TEST(LibRadosAio, PoolQuotaPP) {
   ASSERT_EQ(0, test_data.m_cluster.pool_create(p.c_str()));
   IoCtx ioctx;
   ASSERT_EQ(0, test_data.m_cluster.ioctx_create(p.c_str(), ioctx));
+  ioctx.application_enable("rados", true);
 
   bufferlist inbl;
   ASSERT_EQ(0, test_data.m_cluster.mon_command(
index 290829d879648efc7fe3ce7236d4756e71ebc8fd..ea4d3e713e7aca1400faf08a4c53209275841bd1 100644 (file)
@@ -19,7 +19,7 @@
 #include <map>
 #include <sstream>
 #include <string>
-
+#include <boost/regex.hpp>
 
 using namespace librados;
 using std::map;
@@ -856,6 +856,14 @@ protected:
     ASSERT_EQ("", create_one_ec_pool_pp(pool_name, s_cluster));
     src_pool_name = get_temp_pool_name();
     ASSERT_EQ(0, s_cluster.pool_create(src_pool_name.c_str()));
+
+    librados::IoCtx ioctx;
+    ASSERT_EQ(0, s_cluster.ioctx_create(pool_name.c_str(), ioctx));
+    ioctx.application_enable("rados", true);
+
+    librados::IoCtx src_ioctx;
+    ASSERT_EQ(0, s_cluster.ioctx_create(src_pool_name.c_str(), src_ioctx));
+    src_ioctx.application_enable("rados", true);
   }
   static void TearDownTestCase() {
     ASSERT_EQ(0, s_cluster.pool_delete(src_pool_name.c_str()));
@@ -1231,22 +1239,37 @@ TEST_F(LibRadosMisc, CmpExt) {
 }
 
 TEST_F(LibRadosMisc, Applications) {
+  const char *cmd[] = {"{\"prefix\":\"osd dump\"}", nullptr};
+  char *buf, *st;
+  size_t buflen, stlen;
+  ASSERT_EQ(0, rados_mon_command(cluster, (const char **)cmd, 1, "", 0, &buf,
+                                 &buflen, &st, &stlen));
+  ASSERT_LT(0u, buflen);
+  string result(buf);
+  rados_buffer_free(buf);
+  rados_buffer_free(st);
+  if (!boost::regex_search(result, boost::regex("require_osd_release [l-z]"))) {
+    std::cout << "SKIPPING";
+    return;
+  }
+
   char apps[128];
   size_t app_len;
 
   app_len = sizeof(apps);
   ASSERT_EQ(0, rados_application_list(ioctx, apps, &app_len));
-  ASSERT_EQ(0U, app_len);
+  ASSERT_EQ(6U, app_len);
+  ASSERT_EQ(0, memcmp("rados\0", apps, app_len));
 
-  ASSERT_EQ(0, rados_application_enable(ioctx, "app1", 0));
+  ASSERT_EQ(0, rados_application_enable(ioctx, "app1", 1));
   ASSERT_EQ(-EPERM, rados_application_enable(ioctx, "app2", 0));
   ASSERT_EQ(0, rados_application_enable(ioctx, "app2", 1));
 
   ASSERT_EQ(-ERANGE, rados_application_list(ioctx, apps, &app_len));
-  ASSERT_EQ(10U, app_len);
+  ASSERT_EQ(16U, app_len);
   ASSERT_EQ(0, rados_application_list(ioctx, apps, &app_len));
-  ASSERT_EQ(10U, app_len);
-  ASSERT_EQ(0, memcmp("app1\0app2\0", apps, app_len));
+  ASSERT_EQ(16U, app_len);
+  ASSERT_EQ(0, memcmp("app1\0app2\0rados\0", apps, app_len));
 
   char keys[128];
   char vals[128];
@@ -1288,16 +1311,28 @@ TEST_F(LibRadosMisc, Applications) {
 }
 
 TEST_F(LibRadosMiscPP, Applications) {
-  std::set<std::string> expected_apps;
+  bufferlist inbl, outbl;
+  string outs;
+  ASSERT_EQ(0, cluster.mon_command("{\"prefix\": \"osd dump\"}",
+                                  inbl, &outbl, &outs));
+  ASSERT_LT(0u, outbl.length());
+  ASSERT_LE(0u, outs.length());
+  if (!boost::regex_search(outbl.to_str(),
+                           boost::regex("require_osd_release [l-z]"))) {
+    std::cout << "SKIPPING";
+    return;
+  }
+
+  std::set<std::string> expected_apps = {"rados"};
   std::set<std::string> apps;
   ASSERT_EQ(0, ioctx.application_list(&apps));
   ASSERT_EQ(expected_apps, apps);
 
-  ASSERT_EQ(0, ioctx.application_enable("app1", false));
+  ASSERT_EQ(0, ioctx.application_enable("app1", true));
   ASSERT_EQ(-EPERM, ioctx.application_enable("app2", false));
   ASSERT_EQ(0, ioctx.application_enable("app2", true));
 
-  expected_apps = {"app1", "app2"};
+  expected_apps = {"app1", "app2", "rados"};
   ASSERT_EQ(0, ioctx.application_list(&apps));
   ASSERT_EQ(expected_apps, apps);
 
index 7c9ee59b3ac7e62c25c2e9270b9308e61620c36c..42269b074bf58666ade58bf3998fa824a7c8b172 100644 (file)
@@ -49,6 +49,17 @@ std::string create_one_pool(
     return oss.str();
   }
 
+  rados_ioctx_t ioctx;
+  ret = rados_ioctx_create(*cluster, pool_name.c_str(), &ioctx);
+  if (ret < 0) {
+    rados_shutdown(*cluster);
+    std::ostringstream oss;
+    oss << "rados_ioctx_create(" << pool_name << ") failed with error " << ret;
+    return oss.str();
+  }
+
+  rados_application_enable(ioctx, "rados", 1);
+  rados_ioctx_destroy(ioctx);
   return "";
 }
 
@@ -155,6 +166,17 @@ std::string create_one_pool_pp(const std::string &pool_name, Rados &cluster,
     oss << "cluster.pool_create(" << pool_name << ") failed with error " << ret;
     return oss.str();
   }
+
+  IoCtx ioctx;
+  ret = cluster.ioctx_create(pool_name.c_str(), ioctx);
+  if (ret < 0) {
+    cluster.shutdown();
+    std::ostringstream oss;
+    oss << "cluster.ioctx_create(" << pool_name << ") failed with error "
+        << ret;
+    return oss.str();
+  }
+  ioctx.application_enable("rados", true);
   return "";
 }
 
index 1296acd0b75aea2d3b9c987d9ffcfe7badb8e54b..ec30b1dd340b1fb40c739106431633bda488681d 100755 (executable)
@@ -82,7 +82,9 @@ protected:
     cache_pool_name = get_temp_pool_name();
     ASSERT_EQ(0, s_cluster.pool_create(cache_pool_name.c_str()));
     RadosTestPP::SetUp();
+
     ASSERT_EQ(0, cluster.ioctx_create(cache_pool_name.c_str(), cache_ioctx));
+    cache_ioctx.application_enable("rados", true);
     cache_ioctx.set_namespace(nspace);
   }
   void TearDown() override {
@@ -1761,6 +1763,7 @@ TEST_F(LibRadosTierPP, FlushWriteRaces) {
   ASSERT_EQ(0, cluster.pool_create(cache_pool_name.c_str()));
   IoCtx cache_ioctx;
   ASSERT_EQ(0, cluster.ioctx_create(cache_pool_name.c_str(), cache_ioctx));
+  cache_ioctx.application_enable("rados", true);
   IoCtx ioctx;
   ASSERT_EQ(0, cluster.ioctx_create(pool_name.c_str(), ioctx));
 
@@ -2830,7 +2833,9 @@ protected:
     cache_pool_name = get_temp_pool_name();
     ASSERT_EQ(0, s_cluster.pool_create(cache_pool_name.c_str()));
     RadosTestECPP::SetUp();
+
     ASSERT_EQ(0, cluster.ioctx_create(cache_pool_name.c_str(), cache_ioctx));
+    cache_ioctx.application_enable("rados", true);
     cache_ioctx.set_namespace(nspace);
   }
   void TearDown() override {
@@ -4426,6 +4431,7 @@ TEST_F(LibRadosTierECPP, FlushWriteRaces) {
   ASSERT_EQ(0, cluster.pool_create(cache_pool_name.c_str()));
   IoCtx cache_ioctx;
   ASSERT_EQ(0, cluster.ioctx_create(cache_pool_name.c_str(), cache_ioctx));
+  cache_ioctx.application_enable("rados", true);
   IoCtx ioctx;
   ASSERT_EQ(0, cluster.ioctx_create(pool_name.c_str(), ioctx));
 
@@ -4772,6 +4778,7 @@ TEST_F(LibRadosTierECPP, CallForcesPromote) {
   ASSERT_EQ(0, cluster.pool_create(cache_pool_name.c_str()));
   IoCtx cache_ioctx;
   ASSERT_EQ(0, cluster.ioctx_create(cache_pool_name.c_str(), cache_ioctx));
+  cache_ioctx.application_enable("rados", true);
   IoCtx ioctx;
   ASSERT_EQ(0, cluster.ioctx_create(pool_name.c_str(), ioctx));
 
index 444fdbefd590454860b476c53b343731a3690a1f..12e1eaddde4265de2e9ede1a3be958db7e7ffc37 100644 (file)
@@ -1522,6 +1522,8 @@ create_image()
                simple_err("Error creating ioctx", r);
                goto failed_krbd;
        }
+        rados_application_enable(ioctx, "rbd", 1);
+
        if (clone_calls || journal_replay) {
                 uint64_t features = 0;
                 if (clone_calls) {
index fa8e25194a7661b2856f2d920bab83e406b06550..55ebe03dce32d6bf00305b88c6fa39055279107e 100644 (file)
@@ -109,5 +109,12 @@ int create_image_data_pool(librados::Rados &rados, std::string &data_pool, bool
     return 0;
   }
 
+  librados::IoCtx ioctx;
+  r = rados.ioctx_create(pool.c_str(), ioctx);
+  if (r < 0) {
+    return r;
+  }
+  ioctx.application_enable("rbd", true);
+
   return r;
 }
index 9b1b715f34828964a69c40b0d26ed7f0a2454732..f105c46e9b0a4f10c6c9aa5332823678eab91290 100644 (file)
@@ -42,6 +42,7 @@ test_loop(Rados &cluster, std::string pool_name, std::string obj_name)
     std::cerr << "ioctx_create " << pool_name << " failed with " << ret << std::endl;
     exit(1);
   }
+  ioctx.application_enable("rados", true);
 
   ret = ioctx.create(obj_name, false);
   if (ret < 0) {
@@ -64,6 +65,11 @@ test_loop(Rados &cluster, std::string pool_name, std::string obj_name)
   }
 
   ioctx.close();
+  ret = cluster.pool_delete(pool_name.c_str());
+  if (ret < 0) {
+    std::cerr << "pool_delete failed with " << ret << std::endl;
+    exit(1);
+  }
 }
 
 #pragma GCC diagnostic pop
index ca496528df8ed1795254f527204f88021dd916d9..88b8d2a92c8c73fbbb34acc1c0cfd170add6f90f 100644 (file)
@@ -1,4 +1,5 @@
 from __future__ import print_function
+from nose import SkipTest
 from nose.tools import eq_ as eq, ok_ as ok, assert_raises
 from rados import (Rados, Error, RadosStateError, Object, ObjectExists,
                    ObjectNotFound, ObjectBusy, requires, opt,
@@ -860,6 +861,15 @@ class TestIoctx(object):
         [i.remove() for i in self.ioctx.list_objects()]
 
     def test_applications(self):
+        cmd = {"prefix":"osd dump", "format":"json"}
+        ret, buf, errs = self.rados.mon_command(json.dumps(cmd), b'')
+        eq(ret, 0)
+        assert len(buf) > 0
+        release = json.loads(buf.decode("utf-8")).get("require_osd_release",
+                                                      None)
+        if not release or release[0] < 'l':
+            raise SkipTest
+
         eq([], self.ioctx.application_list())
 
         self.ioctx.application_enable("app1")
index 2fd894c702e788b7549ab8f08b77e1a22b71aa38..11080b7a70fe350fae17cb3cf79cf57f847ee661 100644 (file)
@@ -51,10 +51,13 @@ public:
 
     int64_t pool_id = m_cluster->pool_lookup(pool_name.c_str());
     ASSERT_GE(pool_id, 0);
+
+    librados::IoCtx ioctx;
+    ASSERT_EQ(0, m_cluster->ioctx_create2(pool_id, ioctx));
+    ioctx.application_enable("rbd", true);
+
     m_pools.insert(pool_name);
     if (enable_mirroring) {
-      librados::IoCtx ioctx;
-      ASSERT_EQ(0, m_cluster->ioctx_create2(pool_id, ioctx));
       ASSERT_EQ(0, librbd::api::Mirror<>::mode_set(ioctx,
                                                    RBD_MIRROR_MODE_POOL));
 
index 04259e4d12b24458d251738d10a9e12cd5fe758a..8a7766948c5f98d78e4449e43b1e6929cc2a0058 100644 (file)
@@ -90,6 +90,7 @@ public:
     EXPECT_EQ(0, m_local_cluster->pool_create(m_local_pool_name.c_str()));
     EXPECT_EQ(0, m_local_cluster->ioctx_create(m_local_pool_name.c_str(),
                                              m_local_ioctx));
+    m_local_ioctx.application_enable("rbd", true);
 
     EXPECT_EQ("", connect_cluster_pp(m_remote_cluster));
     EXPECT_EQ(0, m_remote_cluster.conf_set("rbd_cache", "false"));
@@ -101,6 +102,8 @@ public:
 
     EXPECT_EQ(0, m_remote_cluster.ioctx_create(m_remote_pool_name.c_str(),
                                               m_remote_ioctx));
+    m_remote_ioctx.application_enable("rbd", true);
+
     EXPECT_EQ(0, librbd::api::Mirror<>::mode_set(m_remote_ioctx,
                                                  RBD_MIRROR_MODE_POOL));
 
index c12724669330128fe4ec47c95ffee44e2bb383ce..06dcd570b32f36dc7babef4cdecb9fef60131137 100644 (file)
@@ -97,6 +97,7 @@ public:
 
     librados::IoCtx ioctx;
     ASSERT_EQ(0, m_cluster->ioctx_create2(pool_id, ioctx));
+    ioctx.application_enable("rbd", true);
 
     m_pool_watcher.reset(new PoolWatcher<>(m_threads, ioctx,
                                            m_pool_watcher_listener));
index b095f7372caf24794d92e9ad430b5d48ab71b39c..221d618d4923c1d2ce950ba655459f53d2560e9e 100644 (file)
@@ -31,9 +31,17 @@ void TestFixture::SetUpTestCase() {
   _local_pool_name = get_temp_pool_name("test-rbd-mirror-");
   ASSERT_EQ(0, _rados->pool_create(_local_pool_name.c_str()));
 
+  librados::IoCtx local_ioctx;
+  ASSERT_EQ(0, _rados->ioctx_create(_local_pool_name.c_str(), local_ioctx));
+  local_ioctx.application_enable("rbd", true);
+
   _remote_pool_name = get_temp_pool_name("test-rbd-mirror-");
   ASSERT_EQ(0, _rados->pool_create(_remote_pool_name.c_str()));
 
+  librados::IoCtx remote_ioctx;
+  ASSERT_EQ(0, _rados->ioctx_create(_remote_pool_name.c_str(), remote_ioctx));
+  remote_ioctx.application_enable("rbd", true);
+
   ASSERT_EQ(0, create_image_data_pool(_data_pool));
   if (!_data_pool.empty()) {
     printf("using image data pool: %s\n", _data_pool.c_str());
index 0181e2b882570b29f6b066f0681bae02ab9fb7aa..04a144264acf1036848e1722b12f2287577e754d 100644 (file)
@@ -85,6 +85,7 @@ public:
     if (m_open_pool_sem)
       m_open_pool_sem->post();
     rados_ioctx_destroy(io_ctx);
+    rados_pool_delete(cl, m_pool_name.c_str());
     rados_shutdown(cl);
     return 0;
   }
index a8d83d5708cb3669cc0fc2ef7859f1f99b50907d..023b4f2d5156be44b6856bddb29b568c06479410 100644 (file)
@@ -99,6 +99,8 @@ int main(int argc, const char **argv)
          << "': error " << ret << std::endl;
      return 1;
   }
+  ioctx.application_enable("rados", true);
+
   librados::ObjectWriteOperation op;
   op.create(true);
   ret = ioctx.operate(oid, &op);