* maintain a queue of in progress reads (@see in_progress_client_reads)
* to ensure that we always call the completion callback in order.
*
- * Another subtely is that while we may read a degraded object, we will
+ * Another subtly is that while we may read a degraded object, we will
* still only perform a client read from shards in the acting set. This
* ensures that we won't ever have to restart a client initiated read in
* check_recovery_sources.
pi->recovery_progress, &new_progress, reply,
&(pi->stat));
// Handle the case of a read error right after we wrote, which is
- // hopefuilly extremely rare.
+ // hopefully extremely rare.
if (r < 0) {
dout(5) << __func__ << ": oid " << soid << " error " << r << dendl;
map<hobject_t, PullInfo> pulling;
- // Reverse mapping from osd peer to objects beging pulled from that peer
+ // Reverse mapping from osd peer to objects being pulled from that peer
map<pg_shard_t, set<hobject_t> > pull_from_peer;
void clear_pull(
map<hobject_t, PullInfo>::iterator piter,
entity_addr_t addr;
bool will_ping; ///< is client new enough to ping the watch
- utime_t last_ping; ///< last cilent ping
+ utime_t last_ping; ///< last client ping
entity_name_t entity;
bool discarded;
// This class exists to bridge the ceph code, which treats the class
// as the client, and the queue, where the class is
- // osd_op_type_t. So this adpater class will transform calls
+ // osd_op_type_t. So this adapter class will transform calls
// appropriately.
class mClockClientQueue : public OpQueue<Request, Client> {
// This class exists to bridge the ceph code, which treats the class
// as the client, and the queue, where the class is
- // osd_op_type_t. So this adpater class will transform calls
+ // osd_op_type_t. So this adapter class will transform calls
// appropriately.
class mClockOpClassQueue : public OpQueue<Request, Client> {
* PastIntervals only needs to be able to answer two questions:
* 1) Where should the primary look for unfound objects?
* 2) List a set of subsets of the OSDs such that contacting at least
- * one from each subset guarrantees we speak to at least one witness
+ * one from each subset guarantees we speak to at least one witness
* of any completed write.
*
* Crucially, 2) does not require keeping *all* past intervals. Certainly,
/**
* pg_hit_set_info_t - information about a single recorded HitSet
*
- * Track basic metadata about a HitSet, like the nubmer of insertions
+ * Track basic metadata about a HitSet, like the number of insertions
* and the time range it covers.
*/
struct pg_hit_set_info_t {
int Objecter::_normalize_watch_error(int r)
{
// translate ENOENT -> ENOTCONN so that a delete->disconnection
- // notification and a failure to reconnect becuase we raced with
+ // notification and a failure to reconnect because we raced with
// the delete appear the same to the user.
if (r == -ENOENT)
r = -ENOTCONN;
}
}
- // Make sure we don't resechedule if we wake up after shutdown
+ // Make sure we don't reschedule if we wake up after shutdown
if (initialized) {
tick_event = timer.reschedule_me(ceph::make_timespan(
cct->_conf->objecter_tick_interval));
::decode(*ptruncated, p);
} else {
// the OSD did not provide this. since old OSDs do not
- // enfoce omap result limits either, we can infer it from
+ // enforce omap result limits either, we can infer it from
// the size of the result
*ptruncated = (pattrs->size() == max_entries);
}