replay_queue.clear();
requeue_ops(replay);
requeue_ops(waiting_for_active);
+ assert(waiting_for_peered.empty());
publish_stats_to_osd();
}
state_set(PG_STATE_ACTIVE);
// waiters
if (flushes_in_progress == 0) {
- requeue_ops(waiting_for_active);
+ requeue_ops(waiting_for_peered);
}
}
assert(waiting_for_degraded_object.empty());
assert(waiting_for_ack.empty());
assert(waiting_for_ondisk.empty());
+ assert(waiting_for_active.empty());
split_replay_queue(&replay_queue, &(child->replay_queue), match, split_bits);
- osd->dequeue_pg(this, &waiting_for_active);
+ osd->dequeue_pg(this, &waiting_for_peered);
OSD::split_list(
- &waiting_for_active, &(child->waiting_for_active), match, split_bits);
+ &waiting_for_peered, &(child->waiting_for_peered), match, split_bits);
{
Mutex::Locker l(map_lock); // to avoid a race with the osd dispatch
OSD::split_list(
on_role_change();
// take active waiters
- requeue_ops(waiting_for_active);
+ requeue_ops(waiting_for_peered);
} else {
// no role change.
// waiters
if (!pg->is_replay() && pg->flushes_in_progress == 0) {
- pg->requeue_ops(pg->waiting_for_active);
+ pg->requeue_ops(pg->waiting_for_peered);
}
pg->on_activate();
// pg waiters
unsigned flushes_in_progress;
+ // ops waiting on peered
+ list<OpRequestRef> waiting_for_peered;
+
+ // ops waiting on active (require peered as well)
list<OpRequestRef> waiting_for_active;
+
list<OpRequestRef> waiting_for_cache_not_full;
list<OpRequestRef> waiting_for_all_missing;
map<hobject_t, list<OpRequestRef> > waiting_for_unreadable_object,
dout(20) << flushes_in_progress
<< " flushes_in_progress pending "
<< "waiting for active on " << op << dendl;
- waiting_for_active.push_back(op);
- op->mark_delayed("waiting for flushes");
+ waiting_for_peered.push_back(op);
return;
}
- if (!is_active()) {
+ if (!is_peered()) {
// Delay unless PGBackend says it's ok
if (pgbackend->can_handle_while_inactive(op)) {
bool handled = pgbackend->handle_message(op);
assert(handled);
return;
} else {
- waiting_for_active.push_back(op);
- op->mark_delayed("waiting for active");
+ waiting_for_peered.push_back(op);
return;
}
}
switch (op->get_req()->get_type()) {
case CEPH_MSG_OSD_OP:
+ if (!is_active()) {
+ dout(20) << " peered, not active, waiting for active on " << op << dendl;
+ waiting_for_active.push_back(op);
+ return;
+ }
if (is_replay()) {
dout(20) << " replay, waiting for active on " << op << dendl;
waiting_for_active.push_back(op);
first = &m->ops[0];
}
- if (!is_active()) {
- waiting_for_active.push_back(op);
+ if (!is_peered()) {
+ waiting_for_peered.push_back(op);
op->mark_delayed("waiting for active");
return;
}
assert(flushes_in_progress > 0);
flushes_in_progress--;
if (flushes_in_progress == 0) {
- requeue_ops(waiting_for_active);
+ requeue_ops(waiting_for_peered);
}
if (!is_active() || !is_primary()) {
pair<hobject_t, ObjectContextRef> i;
// requeue everything in the reverse order they should be
// reexamined.
+ requeue_ops(waiting_for_peered);
+
clear_scrub_reserved();
+
+ // requeues waiting_for_active
scrub_clear_state();
context_registry_on_change();