| **ceph-bluestore-tool** show-sharding --path *osd path*
| **ceph-bluestore-tool** trim --path *osd path*
| **ceph-bluestore-tool** zap-device --dev *dev path*
-| **ceph-bluestore-tool** downgrade-wal-to-v1 --path *osd path*
+| **ceph-bluestore-tool** revert-wal-to-plain --path *osd path*
Description
Zeros all device label locations. This effectively makes device appear empty.
-:command: `downgrade-wal-to-v1` --path *osd path*
+:command: `revert-wal-to-plain` --path *osd path*
- Changes WAL disk format from the new version to the legacy one. Useful for downgrades, or if you
- might want to disable this new feature (bluefs_wal_v2).
+ Changes WAL files from envelope mode to the legacy plain mode.
+ Useful for downgrades, or if you might want to disable this new feature (bluefs_wal_envelope_mode).
Options
=======
level: advanced
default: false
with_legacy: true
+- name: bluefs_wal_envelope_mode
+ type: bool
+ level: advanced
+ desc: Enables a faster backend in BlueFS for WAL writes.
+ long_desc: In envelope mode BlueFS files do not need to update metadata. When applied to RocksDB WAL files,
+ it reduces by ~50% the amount of fdatasync syscalls.
+ Downgrading from an envelope mode to legacy mode requires `ceph-bluestore-tool --command downgrade-wal-to-v1`.
+ default: true
+ with_legacy: false
- name: bluefs_allocator
type: str
level: dev
desc: Enables exception throwing instead of process abort on transaction submission error.
default: false
with_legacy: false
-- name: bluefs_wal_envelope_mode
- type: bool
- level: advanced
- desc: Enables a faster backend in BlueFS for WAL writes.
- long_desc: In envelope mode BlueFS files do not need to update metadata. When applied to RocksDB WAL files,
- it reduces by ~50% the amount of fdatasync syscalls.
- Downgrading from an envelope mode to legacy mode requires `ceph-bluestore-tool --command downgrade-wal-to-v1`.
- default: true
- with_legacy: false
return 0;
}
-int BlueFS::downgrade_wal_to_v1(
+int BlueFS::revert_wal_to_plain(
const std::string& dir,
const std::string& name)
{
FileReader* reader = nullptr;
// we use dir for wals and name like wal; should get proper hint
r = open_for_write(dir, tmp_name, &writer, false);
- // use normal v1 write path by marking node type to legacy
+ // use normal legacy write path by marking node type to plain
writer->file->fnode.encoding = bluefs_node_encoding::PLAIN;
ceph_assert(r == 0);
r = open_for_read(dir, name, &reader);
return 0;
}
-int BlueFS::downgrade_wal_to_v1()
+int BlueFS::revert_wal_to_plain()
{
string wal_dir("db.wal");
auto dir_it = nodes.dir_map.find(wal_dir);
auto dir_copy = dir_it->second->file_map;
for (const auto& [file_name, file] : dir_copy) {
if(file->envelope_mode()) {
- downgrade_wal_to_v1(wal_dir, file_name);
+ revert_wal_to_plain(wal_dir, file_name);
sync_metadata(true);
- dout(10) << __func__ << fmt::format(" {} v2=>v1", file_name) << dendl;
+ dout(10) << __func__ << fmt::format(" {} envelope mode=>plain mode", file_name) << dendl;
} else {
- dout(10) << __func__ << fmt::format(" {} in v1", file_name) << dendl;
+ dout(10) << __func__ << fmt::format(" {} in plain mode", file_name) << dendl;
}
}
const std::set<int>& devs_source,
int dev_target,
const bluefs_layout_t& layout);
- int downgrade_wal_to_v1();
+ int revert_wal_to_plain();
uint64_t get_used();
uint64_t get_block_device_size(unsigned id);
size_t read_len,
bufferlist* bl);
void _check_vselector_LNF();
- int downgrade_wal_to_v1(
+ int revert_wal_to_plain(
const std::string& dir,
const std::string& name
);
return _fsck_on_open(depth, repair);
}
-int BlueStore::downgrade_wal_to_v1() {
+int BlueStore::revert_wal_to_plain() {
int r = cold_open();
if (r != 0) {
dout(1) << __func__ << "failed to open db / allocator" << dendl;
goto out;
}
- bluefs->downgrade_wal_to_v1();
+ bluefs->revert_wal_to_plain();
cold_close();
out:
return r;
int repair(bool deep) override {
return _fsck(deep ? FSCK_DEEP : FSCK_REGULAR, true);
}
- int downgrade_wal_to_v1();
+ int revert_wal_to_plain();
int quick_fix() override {
return _fsck(FSCK_SHALLOW, true);
}
"show-sharding, "
"trim, "
"zap-device, "
- "downgrade-wal-to-v1"
+ "revert-wal-to-plain"
);
po::options_description po_all("All options");
po_all.add(po_options).add(po_positional);
}
}
- if (action == "fsck" || action == "repair" || action == "quick-fix" || action == "allocmap" || action == "qfsck" || action == "restore_cfb" || action == "migrate-wal-to-v1") {
+ if (action == "fsck" || action == "repair" ||
+ action == "quick-fix" || action == "allocmap" ||
+ action == "qfsck" || action == "restore_cfb" ||
+ action == "revert-wal-to-plain") {
if (path.empty()) {
cerr << "must specify bluestore path" << std::endl;
exit(EXIT_FAILURE);
else if (action == "fsck" ||
action == "repair" ||
action == "quick-fix" ||
- action == "downgrade-wal-to-v1") {
+ action == "revert-wal-to-plain") {
validate_path(cct.get(), path, false);
BlueStore bluestore(cct.get(), path);
int r;
r = bluestore.fsck(fsck_deep);
} else if (action == "repair") {
r = bluestore.repair(fsck_deep);
- } else if (action == "downgrade-wal-to-v1") {
- r = bluestore.downgrade_wal_to_v1();
+ } else if (action == "revert-wal-to-plain") {
+ r = bluestore.revert_wal_to_plain();
} else {
r = bluestore.quick_fix();
}
// WAL files don't update internal extents while writing to save memory, only on _replay
fs.umount();
fs.mount();
- fs.downgrade_wal_to_v1();
+ fs.revert_wal_to_plain();
BlueFS::FileReader *reader;
ASSERT_EQ(0, fs.open_for_read(dir_db, wal_file, &reader));