}
}
+class LibRadosTwoPoolsECPP : public RadosTestECPP
+{
+public:
+ LibRadosTwoPoolsECPP() {};
+ virtual ~LibRadosTwoPoolsECPP() {};
+protected:
+ static void SetUpTestCase() {
+ pool_name = get_temp_pool_name();
+ ASSERT_EQ("", create_one_ec_pool_pp(pool_name, s_cluster));
+ src_pool_name = get_temp_pool_name();
+ ASSERT_EQ(0, s_cluster.pool_create(src_pool_name.c_str()));
+ }
+ static void TearDownTestCase() {
+ ASSERT_EQ(0, s_cluster.pool_delete(src_pool_name.c_str()));
+ ASSERT_EQ(0, destroy_one_ec_pool_pp(pool_name, s_cluster));
+ }
+ static std::string src_pool_name;
+
+ virtual void SetUp() {
+ RadosTestECPP::SetUp();
+ ASSERT_EQ(0, cluster.ioctx_create(src_pool_name.c_str(), src_ioctx));
+ src_ioctx.set_namespace(nspace);
+ }
+ virtual void TearDown() {
+ // wait for maps to settle before next test
+ cluster.wait_for_latest_osdmap();
+
+ RadosTestECPP::TearDown();
+
+ cleanup_default_namespace(src_ioctx);
+ cleanup_namespace(src_ioctx, nspace);
+
+ src_ioctx.close();
+ }
+
+ librados::IoCtx src_ioctx;
+};
+std::string LibRadosTwoPoolsECPP::src_pool_name;
+
+//copy_from between ecpool and no-ecpool.
+TEST_F(LibRadosTwoPoolsECPP, CopyFrom) {
+ //create object w/ omapheader
+ bufferlist b;
+ b.append("copyfrom");
+ ASSERT_EQ(0, src_ioctx.omap_set_header("foo", b));
+
+ version_t uv = src_ioctx.get_last_version();
+ ObjectWriteOperation op;
+ op.copy_from("foo", src_ioctx, uv);
+ ASSERT_EQ(-EOPNOTSUPP, ioctx.operate("foo.copy", &op));
+}
+
TEST_F(LibRadosMiscPP, CopyScrubPP) {
bufferlist inbl, bl, x;
for (int i=0; i<100; ++i)
cluster.wait_for_latest_osdmap();
}
+//Make ecpool as cache pool; no-ecpool as data pool
+//Judge promote object which has omap from no-ecpool into ecpool.
+TEST_F(LibRadosTwoPoolsECPP, OmapOperation) {
+ // create object
+ {
+ bufferlist bl;
+ bl.append("hi there");
+ ASSERT_EQ(0, cache_ioctx.omap_set_header("foo", bl));
+ }
+
+ // configure cache.
+ bufferlist inbl;
+ ASSERT_EQ(0, cluster.mon_command(
+ "{\"prefix\": \"osd tier add\", \"pool\": \"" + cache_pool_name +
+ "\", \"tierpool\": \"" + pool_name +
+ "\", \"force_nonempty\": \"--force-nonempty\" }",
+ inbl, NULL, NULL));
+ ASSERT_EQ(0, cluster.mon_command(
+ "{\"prefix\": \"osd tier set-overlay\", \"pool\": \"" + cache_pool_name +
+ "\", \"overlaypool\": \"" + pool_name + "\"}",
+ inbl, NULL, NULL));
+ ASSERT_EQ(0, cluster.mon_command(
+ "{\"prefix\": \"osd tier cache-mode\", \"pool\": \"" + pool_name +
+ "\", \"mode\": \"writeback\"}",
+ inbl, NULL, NULL));
+
+
+ // wait for maps to settle
+ cluster.wait_for_latest_osdmap();
+
+ {
+ bufferlist got;
+ ObjectReadOperation o;
+ o.omap_get_header(&got, NULL);
+ ASSERT_EQ(-EOPNOTSUPP, ioctx.operate("foo", &o, NULL));
+
+ }
+ // tear down tiers
+ ASSERT_EQ(0, cluster.mon_command(
+ "{\"prefix\": \"osd tier remove-overlay\", \"pool\": \"" + cache_pool_name +
+ "\"}",
+ inbl, NULL, NULL));
+ ASSERT_EQ(0, cluster.mon_command(
+ "{\"prefix\": \"osd tier remove\", \"pool\": \"" + cache_pool_name +
+ "\", \"tierpool\": \"" + pool_name + "\"}",
+ inbl, NULL, NULL));
+
+ // wait for maps to settle before next test
+ cluster.wait_for_latest_osdmap();
+}
+
int main(int argc, char **argv)
{
::testing::InitGoogleTest(&argc, argv);