From: Sage Weil Date: Thu, 9 Jan 2014 10:01:48 +0000 (-0800) Subject: ceph_test_rados_api_tier: partial test for promote vs snap trim race X-Git-Tag: v0.77~22^2~5 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=8cab9e7657b5879f3ec3a42ae002d858b51c6cfe;p=ceph.git ceph_test_rados_api_tier: partial test for promote vs snap trim race This reliably returns ENODEV due to the test at the finish of flush. Not because we are actually racing with trim, though: the trimmer doesn't run at all. I believe it captures the important property, though. Namely: we should not write a promoted object that is "behind" the snap trimmer's progress. The fact that we are in front of it (the trimmer hasn't started yet) should not matter since the object is logically deleted anyway. We probably want to make the OSD return ENODEV on read in the normal case when you try to access a clone that is pending trimming. Signed-off-by: Sage Weil --- diff --git a/src/test/librados/tier.cc b/src/test/librados/tier.cc index 0e2985f500c..972ef5ecc98 100644 --- a/src/test/librados/tier.cc +++ b/src/test/librados/tier.cc @@ -423,6 +423,85 @@ TEST(LibRadosTier, PromoteSnap) { ASSERT_EQ(0, destroy_one_pool_pp(base_pool_name, cluster)); } +TEST(LibRadosTier, PromoteSnapTrimRace) { + Rados cluster; + std::string base_pool_name = get_temp_pool_name(); + std::string cache_pool_name = base_pool_name + "-cache"; + ASSERT_EQ("", create_one_pool_pp(base_pool_name, cluster)); + ASSERT_EQ(0, cluster.pool_create(cache_pool_name.c_str())); + IoCtx cache_ioctx; + ASSERT_EQ(0, cluster.ioctx_create(cache_pool_name.c_str(), cache_ioctx)); + IoCtx base_ioctx; + ASSERT_EQ(0, cluster.ioctx_create(base_pool_name.c_str(), base_ioctx)); + + // create object + { + bufferlist bl; + bl.append("hi there"); + ObjectWriteOperation op; + op.write_full(bl); + ASSERT_EQ(0, base_ioctx.operate("foo", &op)); + } + + // create a snapshot, clone + vector my_snaps(1); + ASSERT_EQ(0, base_ioctx.selfmanaged_snap_create(&my_snaps[0])); + ASSERT_EQ(0, base_ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0], + my_snaps)); + { + bufferlist bl; + bl.append("ciao!"); + ObjectWriteOperation op; + op.write_full(bl); + ASSERT_EQ(0, base_ioctx.operate("foo", &op)); + } + + // configure cache + bufferlist inbl; + ASSERT_EQ(0, cluster.mon_command( + "{\"prefix\": \"osd tier add\", \"pool\": \"" + base_pool_name + + "\", \"tierpool\": \"" + cache_pool_name + "\"}", + inbl, NULL, NULL)); + ASSERT_EQ(0, cluster.mon_command( + "{\"prefix\": \"osd tier set-overlay\", \"pool\": \"" + base_pool_name + + "\", \"overlaypool\": \"" + cache_pool_name + "\"}", + inbl, NULL, NULL)); + ASSERT_EQ(0, cluster.mon_command( + "{\"prefix\": \"osd tier cache-mode\", \"pool\": \"" + cache_pool_name + + "\", \"mode\": \"writeback\"}", + inbl, NULL, NULL)); + + // wait for maps to settle + cluster.wait_for_latest_osdmap(); + + // delete the snap + ASSERT_EQ(0, base_ioctx.selfmanaged_snap_remove(my_snaps[0])); + + base_ioctx.snap_set_read(my_snaps[0]); + + // read foo snap + { + bufferlist bl; + ASSERT_EQ(-ENOENT, base_ioctx.read("foo", bl, 1, 0)); + } + + // tear down tiers + ASSERT_EQ(0, cluster.mon_command( + "{\"prefix\": \"osd tier remove-overlay\", \"pool\": \"" + base_pool_name + + "\"}", + inbl, NULL, NULL)); + ASSERT_EQ(0, cluster.mon_command( + "{\"prefix\": \"osd tier remove\", \"pool\": \"" + base_pool_name + + "\", \"tierpool\": \"" + cache_pool_name + "\"}", + inbl, NULL, NULL)); + + base_ioctx.close(); + cache_ioctx.close(); + + // cluster.pool_delete(cache_pool_name.c_str()); + //ASSERT_EQ(0, destroy_one_pool_pp(base_pool_name, cluster)); +} + TEST(LibRadosTier, Whiteout) { Rados cluster; std::string base_pool_name = get_temp_pool_name();