From: Sage Weil Date: Wed, 22 Jun 2016 19:52:18 +0000 (-0400) Subject: os/bluestore/BlueFS: move metadata dump (compaction) into a helper X-Git-Tag: v11.0.1~439^2~9 X-Git-Url: http://git.apps.os.sepia.ceph.com/?a=commitdiff_plain;h=9d412644d1bd1c0fe1c4b8e51f71ed42d75184f3;p=ceph-ci.git os/bluestore/BlueFS: move metadata dump (compaction) into a helper Signed-off-by: Sage Weil --- diff --git a/src/os/bluestore/BlueFS.cc b/src/os/bluestore/BlueFS.cc index 6665060c2b6..919e07e5fc5 100644 --- a/src/os/bluestore/BlueFS.cc +++ b/src/os/bluestore/BlueFS.cc @@ -984,47 +984,54 @@ bool BlueFS::_should_compact_log() return true; } -void BlueFS::_compact_log_sync() +void BlueFS::_compact_log_dump_metadata(bluefs_transaction_t *t) { - // FIXME: we currently hold the lock while writing out the compacted log, - // which may mean a latency spike. we could drop the lock while writing out - // the big compacted log, while continuing to log at the end of the old log - // file, and once it's done swap out the old log extents for the new ones. - dout(10) << __func__ << dendl; - File *log_file = log_writer->file.get(); - - // clear out log (be careful who calls us!!!) - log_t.clear(); - - bluefs_transaction_t t; - t.seq = 1; - t.uuid = super.uuid; + t->seq = 1; + t->uuid = super.uuid; dout(20) << __func__ << " op_init" << dendl; - t.op_init(); + + t->op_init(); for (unsigned bdev = 0; bdev < MAX_BDEV; ++bdev) { interval_set& p = block_all[bdev]; for (interval_set::iterator q = p.begin(); q != p.end(); ++q) { dout(20) << __func__ << " op_alloc_add " << bdev << " 0x" << std::hex << q.get_start() << "~" << q.get_len() << std::dec << dendl; - t.op_alloc_add(bdev, q.get_start(), q.get_len()); + t->op_alloc_add(bdev, q.get_start(), q.get_len()); } } for (auto& p : file_map) { if (p.first == 1) continue; dout(20) << __func__ << " op_file_update " << p.second->fnode << dendl; - t.op_file_update(p.second->fnode); + t->op_file_update(p.second->fnode); } for (auto& p : dir_map) { dout(20) << __func__ << " op_dir_create " << p.first << dendl; - t.op_dir_create(p.first); + t->op_dir_create(p.first); for (auto& q : p.second->file_map) { dout(20) << __func__ << " op_dir_link " << p.first << "/" << q.first << " to " << q.second->fnode.ino << dendl; - t.op_dir_link(p.first, q.first, q.second->fnode.ino); + t->op_dir_link(p.first, q.first, q.second->fnode.ino); } } +} + +void BlueFS::_compact_log_sync() +{ + // FIXME: we currently hold the lock while writing out the compacted log, + // which may mean a latency spike. we could drop the lock while writing out + // the big compacted log, while continuing to log at the end of the old log + // file, and once it's done swap out the old log extents for the new ones. + dout(10) << __func__ << dendl; + File *log_file = log_writer->file.get(); + + // clear out log (be careful who calls us!!!) + log_t.clear(); + + bluefs_transaction_t t; + _compact_log_dump_metadata(&t); + dout(20) << __func__ << " op_jump_seq " << log_seq << dendl; t.op_jump_seq(log_seq); diff --git a/src/os/bluestore/BlueFS.h b/src/os/bluestore/BlueFS.h index 9bb82e63493..f3bf5700ac8 100644 --- a/src/os/bluestore/BlueFS.h +++ b/src/os/bluestore/BlueFS.h @@ -243,6 +243,7 @@ private: uint64_t jump_to = 0); uint64_t _estimate_log_size(); bool _should_compact_log(); + void _compact_log_dump_metadata(bluefs_transaction_t *t); void _compact_log_sync(); //void _aio_finish(void *priv);