db->submit_transaction_sync(txn);
}
+void BlueStore::inject_legacy_omap()
+{
+ dout(1) << __func__ << dendl;
+ per_pool_omap = false;
+ KeyValueDB::Transaction txn;
+ txn = db->get_transaction();
+ txn->rmkey(PREFIX_SUPER, "per_pool_omap");
+ db->submit_transaction_sync(txn);
+}
+
+void BlueStore::inject_legacy_omap(coll_t cid, ghobject_t oid)
+{
+ dout(1) << __func__ << " "
+ << cid << " " << oid
+ <<dendl;
+ KeyValueDB::Transaction txn;
+ OnodeRef o;
+ CollectionRef c = _get_collection(cid);
+ ceph_assert(c);
+ {
+ std::unique_lock l{ c->lock }; // just to avoid internal asserts
+ o = c->get_onode(oid, false);
+ ceph_assert(o);
+ }
+ o->onode.clear_flag(bluestore_onode_t::FLAG_PERPOOL_OMAP | bluestore_onode_t::FLAG_PGMETA_OMAP);
+ txn = db->get_transaction();
+ _record_onode(o, txn);
+ db->submit_transaction_sync(txn);
+}
+
+
void BlueStore::inject_statfs(const string& key, const store_statfs_t& new_statfs)
{
BlueStoreRepairer repairer;
cout << std::endl;
}
+TEST_P(StoreTest, BluestorePerPoolOmapFixOnMount)
+{
+ if (string(GetParam()) != "bluestore")
+ return;
+
+ BlueStore* bstore = dynamic_cast<BlueStore*> (store.get());
+ const uint64_t pool = 555;
+ coll_t cid(spg_t(pg_t(0, pool), shard_id_t::NO_SHARD));
+ ghobject_t oid = make_object("Object 1", pool);
+ ghobject_t oid2 = make_object("Object 2", pool);
+ // fill the store with some data
+ auto ch = store->create_new_collection(cid);
+ map<string, bufferlist> omap;
+ bufferlist h;
+ h.append("header");
+ {
+ omap["omap_key"].append("omap value");
+ ObjectStore::Transaction t;
+ t.create_collection(cid, 0);
+ t.touch(cid, oid);
+ t.omap_setheader(cid, oid, h);
+ t.touch(cid, oid2);
+ t.omap_setheader(cid, oid2, h);
+ int r = queue_transaction(store, ch, std::move(t));
+ ASSERT_EQ(r, 0);
+ }
+
+ // inject legacy omaps
+ bstore->inject_legacy_omap();
+ bstore->inject_legacy_omap(cid, oid);
+ bstore->inject_legacy_omap(cid, oid2);
+
+ bstore->umount();
+
+ // check we injected an issue
+ SetVal(g_conf(), "bluestore_fsck_quick_fix_on_mount", "false");
+ SetVal(g_conf(), "bluestore_fsck_error_on_no_per_pool_omap", "true");
+ g_ceph_context->_conf.apply_changes(nullptr);
+ ASSERT_EQ(bstore->fsck(false), 3);
+
+ // set autofix and mount
+ SetVal(g_conf(), "bluestore_fsck_quick_fix_on_mount", "true");
+ g_ceph_context->_conf.apply_changes(nullptr);
+ bstore->mount();
+ bstore->umount();
+
+ // check we fixed it..
+ ASSERT_EQ(bstore->fsck(false), 0);
+ bstore->mount();
+
+ //
+ // Now repro https://tracker.ceph.com/issues/43824
+ //
+ // inject legacy omaps again
+ bstore->inject_legacy_omap();
+ bstore->inject_legacy_omap(cid, oid);
+ bstore->inject_legacy_omap(cid, oid2);
+ bstore->umount();
+
+ // check we injected an issue
+ SetVal(g_conf(), "bluestore_fsck_quick_fix_on_mount", "true");
+ SetVal(g_conf(), "bluestore_fsck_error_on_no_per_pool_omap", "true");
+ g_ceph_context->_conf.apply_changes(nullptr);
+ bstore->mount();
+ ch = store->open_collection(cid);
+
+ {
+ // write to onode which will partiall revert per-pool
+ // omap repair done on mount due to #43824.
+ // And object removal will leave stray per-pool omap recs
+ //
+ ObjectStore::Transaction t;
+ bufferlist bl;
+ bl.append("data");
+ //this triggers onode rec update and hence legacy omap
+ t.write(cid, oid, 0, bl.length(), bl);
+ t.remove(cid, oid2); // this will trigger stray per-pool omap
+ int r = queue_transaction(store, ch, std::move(t));
+ ASSERT_EQ(r, 0);
+ }
+ bstore->umount();
+ // check omap's been fixed.
+ ASSERT_EQ(bstore->fsck(false), 0); // this will fail without fix for #43824
+
+ bstore->mount();
+}
+
TEST_P(StoreTestSpecificAUSize, BluestoreTinyDevFailure) {
if (string(GetParam()) != "bluestore")
return;