]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
ceph_test_rados_api_tier: partial test for promote vs snap trim race
authorSage Weil <sage@inktank.com>
Thu, 9 Jan 2014 10:01:48 +0000 (02:01 -0800)
committerSage Weil <sage@inktank.com>
Tue, 14 Jan 2014 00:19:49 +0000 (16:19 -0800)
This reliably returns ENODEV due to the test at the finish of flush.  Not
because we are actually racing with trim, though: the trimmer doesn't run
at all.  I believe it captures the important property, though.  Namely:
we should not write a promoted object that is "behind" the snap trimmer's
progress.  The fact that we are in front of it (the trimmer hasn't started
yet) should not matter since the object is logically deleted anyway.

We probably want to make the OSD return ENODEV on read in the normal case
when you try to access a clone that is pending trimming.

Signed-off-by: Sage Weil <sage@inktank.com>
src/test/librados/tier.cc

index 0e2985f500ce81df753251bfbe654d0d8984187f..972ef5ecc985577c0f42a61f6b7379419f906fa3 100644 (file)
@@ -423,6 +423,85 @@ TEST(LibRadosTier, PromoteSnap) {
   ASSERT_EQ(0, destroy_one_pool_pp(base_pool_name, cluster));
 }
 
+TEST(LibRadosTier, PromoteSnapTrimRace) {
+  Rados cluster;
+  std::string base_pool_name = get_temp_pool_name();
+  std::string cache_pool_name = base_pool_name + "-cache";
+  ASSERT_EQ("", create_one_pool_pp(base_pool_name, cluster));
+  ASSERT_EQ(0, cluster.pool_create(cache_pool_name.c_str()));
+  IoCtx cache_ioctx;
+  ASSERT_EQ(0, cluster.ioctx_create(cache_pool_name.c_str(), cache_ioctx));
+  IoCtx base_ioctx;
+  ASSERT_EQ(0, cluster.ioctx_create(base_pool_name.c_str(), base_ioctx));
+
+  // create object
+  {
+    bufferlist bl;
+    bl.append("hi there");
+    ObjectWriteOperation op;
+    op.write_full(bl);
+    ASSERT_EQ(0, base_ioctx.operate("foo", &op));
+  }
+
+  // create a snapshot, clone
+  vector<uint64_t> my_snaps(1);
+  ASSERT_EQ(0, base_ioctx.selfmanaged_snap_create(&my_snaps[0]));
+  ASSERT_EQ(0, base_ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0],
+                                                        my_snaps));
+  {
+    bufferlist bl;
+    bl.append("ciao!");
+    ObjectWriteOperation op;
+    op.write_full(bl);
+    ASSERT_EQ(0, base_ioctx.operate("foo", &op));
+  }
+
+  // configure cache
+  bufferlist inbl;
+  ASSERT_EQ(0, cluster.mon_command(
+    "{\"prefix\": \"osd tier add\", \"pool\": \"" + base_pool_name +
+    "\", \"tierpool\": \"" + cache_pool_name + "\"}",
+    inbl, NULL, NULL));
+  ASSERT_EQ(0, cluster.mon_command(
+    "{\"prefix\": \"osd tier set-overlay\", \"pool\": \"" + base_pool_name +
+    "\", \"overlaypool\": \"" + cache_pool_name + "\"}",
+    inbl, NULL, NULL));
+  ASSERT_EQ(0, cluster.mon_command(
+    "{\"prefix\": \"osd tier cache-mode\", \"pool\": \"" + cache_pool_name +
+    "\", \"mode\": \"writeback\"}",
+    inbl, NULL, NULL));
+
+  // wait for maps to settle
+  cluster.wait_for_latest_osdmap();
+
+  // delete the snap
+  ASSERT_EQ(0, base_ioctx.selfmanaged_snap_remove(my_snaps[0]));
+
+  base_ioctx.snap_set_read(my_snaps[0]);
+
+  // read foo snap
+  {
+    bufferlist bl;
+    ASSERT_EQ(-ENOENT, base_ioctx.read("foo", bl, 1, 0));
+  }
+
+  // tear down tiers
+  ASSERT_EQ(0, cluster.mon_command(
+    "{\"prefix\": \"osd tier remove-overlay\", \"pool\": \"" + base_pool_name +
+    "\"}",
+    inbl, NULL, NULL));
+  ASSERT_EQ(0, cluster.mon_command(
+    "{\"prefix\": \"osd tier remove\", \"pool\": \"" + base_pool_name +
+    "\", \"tierpool\": \"" + cache_pool_name + "\"}",
+    inbl, NULL, NULL));
+
+  base_ioctx.close();
+  cache_ioctx.close();
+
+  //  cluster.pool_delete(cache_pool_name.c_str());
+  //ASSERT_EQ(0, destroy_one_pool_pp(base_pool_name, cluster));
+}
+
 TEST(LibRadosTier, Whiteout) {
   Rados cluster;
   std::string base_pool_name = get_temp_pool_name();