if ((iter->op.op == CEPH_OSD_OP_WATCH &&
iter->op.watch.op == CEPH_OSD_WATCH_OP_PING)) {
/* This a bit odd. PING isn't actually a write. It can't
- * result in an update to the object_info. PINGs also aren'ty
- * resent, so there's no reason to write out a log entry
+ * result in an update to the object_info. PINGs also aren't
+ * resent, so there's no reason to write out a log entry.
*
* However, we pipeline them behind writes, so let's force
* the write_ordered flag.
OSDMapRef osdmap = sdata->waiting_for_pg_osdmap;
if (osdmap->is_up_acting_osd_shard(item.first, osd->whoami)) {
dout(20) << __func__ << " " << item.first
- << " no pg, should exist, will wait" << " on " << *qi << dendl;
+ << " no pg, should exist, will wait on " << *qi << dendl;
slot.to_process.push_front(*qi);
slot.waiting_for_pg = true;
} else if (qi->get_map_epoch() > osdmap->get_epoch()) {
*
* Multiple worker threads can operate on each shard.
*
- * Under normal circumstances, num_running == to_proces.size(). There are
+ * Under normal circumstances, num_running == to_process.size(). There are
* two times when that is not true: (1) when waiting_for_pg == true and
* to_process is accumulating requests that are waiting for the pg to be
* instantiated; in that case they will all get requeued together by
/// wake any pg waiters after a PG is created/instantiated
void wake_pg_waiters(spg_t pgid);
- /// prune ops (and possiblye pg_slots) for pgs that shouldn't be here
+ /// prune ops (and possibly pg_slots) for pgs that shouldn't be here
void prune_pg_waiters(OSDMapRef osdmap, int whoami);
/// clear cached PGRef on pg deletion