return;
}
+ // order this op as a write?
+ bool write_ordered =
+ op->may_write() ||
+ op->may_cache() ||
+ m->has_flag(CEPH_OSD_FLAG_RWORDERED);
+
// discard due to cluster full transition? (we discard any op that
// originates before the cluster or pool is marked full; the client
// will resend after the full flag is removed or if they expect the
// op to succeed despite being full). The except is FULL_FORCE ops,
// which there is no reason to discard because they bypass all full
// checks anyway.
+ // If this op isn't write or read-ordered, we skip
// FIXME: we exclude mds writes for now.
- if (!(m->get_source().is_mds() || m->has_flag(CEPH_OSD_FLAG_FULL_FORCE)) &&
+ if (write_ordered && !( m->get_source().is_mds() || m->has_flag(CEPH_OSD_FLAG_FULL_FORCE)) &&
info.history.last_epoch_marked_full > m->get_map_epoch()) {
dout(10) << __func__ << " discarding op sent before full " << m << " "
<< *m << dendl;
return;
}
- if (!m->get_source().is_mds() && osd->check_failsafe_full()) {
+ if (!(m->get_source().is_mds()) && osd->check_failsafe_full() && write_ordered) {
dout(10) << __func__ << " fail-safe full check failed, dropping request"
<< dendl;
return;
}
}
- // order this op as a write?
- bool write_ordered =
- op->may_write() ||
- op->may_cache() ||
- m->has_flag(CEPH_OSD_FLAG_RWORDERED);
-
dout(10) << "do_op " << *m
<< (op->may_write() ? " may_write" : "")
<< (op->may_read() ? " may_read" : "")