We recently added a flush on activate, but we are still building the
transaction (the caller queues it), so calling osr.flush() here is totally
useless.
Instead, set a flag 'need_flush', and do the flush the next time we receive
some work.
This has the added benefit of doing the flush in the worker thread, outside
of osd_lock.
Signed-off-by: Sage Weil <sage@newdream.net>
update_stats();
}
- // flush this all out (e.g., deletions from clean_up_local) to avoid
- // subsequent races.
- osr.flush();
+ // we need to flush this all out before doing anything else..
+ need_flush = true;
// waiters
if (!is_replay()) {
on_activate();
}
+void PG::do_pending_flush()
+{
+ assert(is_locked());
+ if (need_flush) {
+ dout(10) << "do_pending_flush doing pending flush" << dendl;
+ osr.flush();
+ need_flush = false;
+ dout(10) << "do_pending_flush done" << dendl;
+ }
+}
+
void PG::do_request(OpRequest *op)
{
// do any pending flush
// primary-only, recovery-only state
set<int> might_have_unfound; // These osds might have objects on them
// which are unfound on the primary
+ bool need_flush; // need to flush before any new activity
epoch_t last_peering_reset;
role(0),
state(0),
need_up_thru(false),
+ need_flush(false),
last_peering_reset(0),
backfill_target(-1),
pg_stats_lock("PG::pg_stats_lock"),
bool is_empty() const { return info.last_update == eversion_t(0,0); }
// pg on-disk state
+ void do_pending_flush();
+
void write_info(ObjectStore::Transaction& t);
void write_log(ObjectStore::Transaction& t);