// set bits based on op codes, called methods.
for (iter = m->ops.begin(); iter != m->ops.end(); ++iter) {
- if (!(iter->op.op == CEPH_OSD_OP_WATCH &&
- iter->op.watch.op == CEPH_OSD_WATCH_OP_PING)) {
+ if ((iter->op.op == CEPH_OSD_OP_WATCH &&
+ iter->op.watch.op == CEPH_OSD_WATCH_OP_PING)) {
/* This a bit odd. PING isn't actually a write. It can't
* result in an update to the object_info. PINGs also aren'ty
* replayed, so there's no reason to write out a log entry
+ *
+ * However, we pipeline them behind writes, so let's force
+ * the write_ordered flag.
*/
+ op->set_force_rwordered();
+ } else {
if (ceph_osd_op_mode_modify(iter->op.op))
op->set_write();
}
return need_write_cap() || check_rmw(CEPH_OSD_RMW_FLAG_CLASS_WRITE);
}
bool OpRequest::may_cache() { return check_rmw(CEPH_OSD_RMW_FLAG_CACHE); }
+bool OpRequest::rwordered_forced() { return check_rmw(CEPH_OSD_RMW_FLAG_CACHE); }
+bool OpRequest::rwordered() {
+ return may_write() || may_cache() || rwordered_forced();
+}
+
bool OpRequest::includes_pg_op() { return check_rmw(CEPH_OSD_RMW_FLAG_PGOP); }
bool OpRequest::need_read_cap() {
return check_rmw(CEPH_OSD_RMW_FLAG_READ);
void OpRequest::set_promote() { set_rmw_flags(CEPH_OSD_RMW_FLAG_FORCE_PROMOTE); }
void OpRequest::set_skip_handle_cache() { set_rmw_flags(CEPH_OSD_RMW_FLAG_SKIP_HANDLE_CACHE); }
void OpRequest::set_skip_promote() { set_rmw_flags(CEPH_OSD_RMW_FLAG_SKIP_PROMOTE); }
+void OpRequest::set_force_rwordered() { set_rmw_flags(CEPH_OSD_RMW_FLAG_RWORDERED); }
void OpRequest::mark_flag_point(uint8_t flag, const string& s) {
#ifdef WITH_LTTNG
bool may_read();
bool may_write();
bool may_cache();
+ bool rwordered_forced();
+ bool rwordered();
bool includes_pg_op();
bool need_read_cap();
bool need_write_cap();
void set_promote();
void set_skip_handle_cache();
void set_skip_promote();
+ void set_force_rwordered();
struct ClassInfo {
ClassInfo(const std::string& name, bool read, bool write,
}
// order this op as a write?
- bool write_ordered =
- op->may_write() ||
- op->may_cache() ||
- m->has_flag(CEPH_OSD_FLAG_RWORDERED);
+ bool write_ordered = op->rwordered();
// discard due to cluster full transition? (we discard any op that
// originates before the cluster or pool is marked full; the client
CEPH_OSD_RMW_FLAG_FORCE_PROMOTE = (1 << 7),
CEPH_OSD_RMW_FLAG_SKIP_HANDLE_CACHE = (1 << 8),
CEPH_OSD_RMW_FLAG_SKIP_PROMOTE = (1 << 9),
+ CEPH_OSD_RMW_FLAG_RWORDERED = (1 << 10),
};