#define rdout(x) lgeneric_subdout(cct,reserver,x)
/**
- * Manages a configurable number of asyncronous reservations.
+ * Manages a configurable number of asynchronous reservations.
*
* Memory usage is linear with the number of items queued and
* linear with respect to the total number of priorities used
/**
*
- * TODO: normalize value based on some fucntion of half_life,
+ * TODO: normalize value based on some function of half_life,
* so that it can be interpreted as an approximation of a
* moving average of N seconds. currently, changing half-life
* skews the scale of the value, even at steady state.
* Abstract class for all Op Queues
*
* In order to provide optimized code, be sure to declare all
- * virutal functions as final in the derived class.
+ * virtual functions as final in the derived class.
*/
template <typename T, typename K>
* Throttles the maximum number of active requests and completes them in order
*
* Operations can complete out-of-order but their associated Context callback
- * will completed in-order during invokation of start_op() and wait_for_ret()
+ * will completed in-order during invocation of start_op() and wait_for_ret()
*/
class OrderedThrottle {
public:
* Call with the event_lock LOCKED
*
* Returns true if the callback was cancelled.
- * Returns false if you never addded the callback in the first place.
+ * Returns false if you never added the callback in the first place.
*/
bool cancel_event(Context *callback);
// Pick a new priority out of the total priority.
unsigned prio = rand() % total_prio + 1;
unsigned tp = total_prio - i->key;
- // Find the priority coresponding to the picked number.
+ // Find the priority corresponding to the picked number.
// Subtract high priorities to low priorities until the picked number
// is more than the total and try to dequeue that priority.
// Reverse the direction from previous implementation because there is a higher
* registered, and an incoming command is 'foo bar baz', it is
* matched with 'foo bar', while 'foo fud' will match 'foo'.
*
- * The entire incoming command string is passed to the registred
+ * The entire incoming command string is passed to the registered
* hook.
*
* @param command command string
* @param cmddesc command syntax descriptor
- * @param hook implementaiton
+ * @param hook implementation
* @param help help text. if empty, command will not be included in 'help' output.
*
* @return 0 for success, -EEXIST if command already registered.
void drain(std::function<void(T*)> uref,
uint32_t flags = FLAG_NONE) {
/* clear a table, call supplied function on
- * each element found (e.g., retuns sentinel
+ * each element found (e.g., returns sentinel
* references) */
Object::Queue2 drain_q;
for (int t_ix = 0; t_ix < n_part; ++t_ix) {
// don't drop privileges
CINIT_FLAG_DEFER_DROP_PRIVILEGES = 0x10,
- // do'nt contact mons for config
+ // don't contact mons for config
CINIT_FLAG_NO_MON_CONFIG = 0x20,
};
#include <boost/utility/string_view.hpp>
/* Returns the length of a buffer that would be needed to escape 'buf'
- * as an XML attrribute
+ * as an XML attribute
*/
size_t escape_xml_attr_len(const char *buf);
void escape_xml_attr(const char *buf, char *out);
/* Returns the length of a buffer that would be needed to escape 'buf'
- * as an JSON attrribute
+ * as an JSON attribute
*/
size_t escape_json_attr_len(const char *buf, size_t src_len);
OPTION(ms_bind_port_min, OPT_INT)
OPTION(ms_bind_port_max, OPT_INT)
OPTION(ms_bind_retry_count, OPT_INT) // If binding fails, how many times do we retry to bind
-OPTION(ms_bind_retry_delay, OPT_INT) // Delay between attemps to bind
+OPTION(ms_bind_retry_delay, OPT_INT) // Delay between attempts to bind
OPTION(ms_bind_before_connect, OPT_BOOL)
OPTION(ms_tcp_listen_backlog, OPT_INT)
OPTION(ms_rwthread_stack_bytes, OPT_U64)
OPTION(mds_op_log_threshold, OPT_INT) // how many op log messages to show in one go
OPTION(mds_snap_min_uid, OPT_U32) // The minimum UID required to create a snapshot
OPTION(mds_snap_max_uid, OPT_U32) // The maximum UID allowed to create a snapshot
-OPTION(mds_snap_rstat, OPT_BOOL) // enable/disbale nested stat for snapshot
+OPTION(mds_snap_rstat, OPT_BOOL) // enable/disable nested stat for snapshot
OPTION(mds_verify_backtrace, OPT_U32)
// detect clients which aren't trimming completed requests
OPTION(mds_max_completed_flushes, OPT_U32)
OPTION(osd_recovery_cost, OPT_U32)
/**
- * osd_recovery_op_warn_multiple scales the normal warning threshhold,
+ * osd_recovery_op_warn_multiple scales the normal warning threshold,
* osd_op_complaint_time, so that slow recovery ops won't cause noise
*/
OPTION(osd_recovery_op_warn_multiple, OPT_U32)
// (try to) use extsize for alloc hint NOTE: extsize seems to trigger
// data corruption in xfs prior to kernel 3.5. filestore will
-// implicity disable this if it cannot confirm the kernel is newer
+// implicitly disable this if it cannot confirm the kernel is newer
// than that.
// NOTE: This option involves a tradeoff: When disabled, fragmentation is
// worse, but large sequential writes are faster. When enabled, large
PERFCOUNTER_TIME = 0x1, // float (measuring seconds)
PERFCOUNTER_U64 = 0x2, // integer (note: either TIME or U64 *must* be set)
PERFCOUNTER_LONGRUNAVG = 0x4, // paired counter + sum (time)
- PERFCOUNTER_COUNTER = 0x8, // counter (vs guage)
+ PERFCOUNTER_COUNTER = 0x8, // counter (vs gauge)
PERFCOUNTER_HISTOGRAM = 0x10, // histogram (vector) of values
};
* non-NULL if the mode argument is equal to CONT or END
* p_buf - the packet buffer where crc computations are being performed
* length - the length of p_buf in bytes
- * init_bytes - the number of initial bytes that need to be procesed before
+ * init_bytes - the number of initial bytes that need to be processed before
* aligning p_buf to multiples of 4 bytes
* mode - can be any of the following: BEGIN, CONT, END, BODY, ALIGN
*
#elif defined(__linux__)
error = fsetxattr(fd, name, value, size, 0);
#elif defined(__APPLE__)
- error = fsetxattr(fd, name, value, size, 0, 0 /* no options should be indentical to Linux */ );
+ error = fsetxattr(fd, name, value, size, 0, 0 /* no options should be identical to Linux */ );
#endif
return (error);
* @param n_threads the number of threads in this thread pool.
* @param cpu the CPU core to which this thread pool is assigned
* @note each @c Task has its own ceph::thread::Condition, which possesses
- * possesses an fd, so we should keep the size of queue under a resonable
+ * possesses an fd, so we should keep the size of queue under a reasonable
* limit.
*/
ThreadPool(size_t n_threads, size_t queue_sz, unsigned cpu);
/**
* returns (type_id, type) of all parent buckets between id and
- * default, can be used to check for anomolous CRUSH maps
+ * default, can be used to check for anomalous CRUSH maps
*/
map<int, string> get_parent_hierarchy(int id) const;
* next item or set of items. or why pow() is used the way it is.
*
* note that the original version 1 of this function made special
- * accomodation for the case where straw lengths were identical. this
+ * accommodation for the case where straw lengths were identical. this
* is also flawed in a non-obvious way; version 2 drops the special
* handling and appears to work just as well.
*
* to another bucket.
*
* @param[in] map the crush_map
- * @param[in] bucketno the bucket unique identifer or 0
+ * @param[in] bucketno the bucket unique identifier or 0
* @param[in] bucket the bucket to add to the __map__
* @param[out] idout a pointer to the bucket identifier
*
*
* Remove __item__ from __bucket__ and subtract the item weight from
* the bucket weight. If the weight of the item is greater than the
- * weight of the bucket, silentely set the bucket weight to zero.
+ * weight of the bucket, silently set the bucket weight to zero.
*
* - return -ENOMEM if the __bucket__ cannot be sized down with __realloc(3)__.
* - return -1 if the value of __bucket->alg__ is unknown.
unsigned char**
ErasureCodeIsaTableCache::getEncodingCoefficientNoLock(int matrix, int k, int m)
{
- // create a pointer to store an encoding coefficients adddress
+ // create a pointer to store an encoding coefficients address
if (!encoding_coefficient[matrix][k][m]) {
encoding_coefficient[matrix][k][m] = new (unsigned char*);
*encoding_coefficient[matrix][k][m] = 0;
//
bool ErasureCodeJerasureBlaumRoth::check_w(ostream *ss) const
{
- // back in Firefly, w = 7 was the default and produced useable
+ // back in Firefly, w = 7 was the default and produced usable
// chunks. Tolerate this value for backward compatibility.
if (w == 7)
return true;
DEFINE_CEPH_FEATURE(60, 1, OSD_RECOVERY_DELETES) // *do not share this bit*
DEFINE_CEPH_FEATURE(61, 1, CEPHX_V2) // *do not share this bit*
-DEFINE_CEPH_FEATURE(62, 1, RESERVED) // do not use; used as a sentinal
+DEFINE_CEPH_FEATURE(62, 1, RESERVED) // do not use; used as a sentinel
DEFINE_CEPH_FEATURE_DEPRECATED(63, 1, RESERVED_BROKEN, LUMINOUS) // client-facing
} __attribute__ ((packed));
/*
- * Note that this is embedded wthin ceph_mds_request_head. Also, compatability
+ * Note that this is embedded wthin ceph_mds_request_head. Also, compatibility
* with the ceph_mds_request_args_legacy must be maintained!
*/
union ceph_mds_request_args {
#define CEPH_STATX_ALL_STATS 0x00001fffU /* All supported stats */
/*
- * Compatability macros until these defines make their way into glibc
+ * Compatibility macros until these defines make their way into glibc
*/
#ifndef AT_NO_ATTR_SYNC
#define AT_NO_ATTR_SYNC 0x4000 /* Don't sync attributes with the server */
// are included before this file. Redefinition might not help in this
// case since already parsed code has evaluated to the wrong value.
// This would warrrant for d definition that would actually be evaluated
-// at the location of usage and report a possible confict.
+// at the location of usage and report a possible conflict.
// This is left up to a future improvement
#elif (ENODATA != 87)
// #warning ENODATA already defined to a value different from 87 (ENOATRR), refining to fix
/* Defined if you have libzfs enabled */
#cmakedefine HAVE_LIBZFS
-/* Define if the C complier supports __func__ */
+/* Define if the C compiler supports __func__ */
#cmakedefine HAVE_FUNC
-/* Define if the C complier supports __PRETTY_FUNCTION__ */
+/* Define if the C compiler supports __PRETTY_FUNCTION__ */
#cmakedefine HAVE_PRETTY_FUNC
/* F_SETPIPE_SZ is supported */
// otype == external type, i.e., the type on the wire
// NOTE: the overload resolution ensures that the legacy encode/decode methods
-// defined for int types is prefered to the ones defined using the specialized
-// template, and hence get selected. This machinary prevents these these from
+// defined for int types is preferred to the ones defined using the specialized
+// template, and hence get selected. This machinery prevents these these from
// getting glued into the legacy encode/decode methods; the overhead of setting
// up a contiguous_appender etc is likely to be slower.
namespace _denc {
* - get_first_child(), next_sibling(int parent_bits) to make (possibly partial)
* iteration efficient (see, e.g., try_assimilate_children()
* - rework frag_t so that we mask the left-most (most significant) bits instead of
- * the right-most (least significant) bits. just because it's more intutive, and
+ * the right-most (least significant) bits. just because it's more intuitive, and
* matches the network/netmask concept.
*/
* @post If this succeeds, any function in librados may be used
*
* @param cluster The cluster to connect to.
- * @returns 0 on sucess, negative error code on failure
+ * @returns 0 on success, negative error code on failure
*/
CEPH_RADOS_API int rados_connect(rados_t cluster);
* Get/wait for the most recent osdmap
*
* @param cluster the cluster to shutdown
- * @returns 0 on sucess, negative error code on failure
+ * @returns 0 on success, negative error code on failure
*/
CEPH_RADOS_API int rados_wait_for_latest_osdmap(rados_t cluster);
* based upon the current features
* @param[out] require_min_compat_client required minimum client version
* based upon explicit setting
- * @returns 0 on sucess, negative error code on failure
+ * @returns 0 on success, negative error code on failure
*/
CEPH_RADOS_API int rados_get_min_compatible_client(rados_t cluster,
int8_t* min_compat_client,
* @param init_value the init value for the algorithm
* @param init_value_len the length of the init value
* @param len the number of bytes to checksum
- * @param off the offset to start checksuming in the object
+ * @param off the offset to start checksumming in the object
* @param chunk_size optional length-aligned chunk size for checksums
* @param pchecksum where to store the checksum result
* @param checksum_len the number of bytes available for the result
* - N bytes = key name
*
* Restrictions:
- * - The HDR update must preceed any key/value updates.
+ * - The HDR update must precede any key/value updates.
* - All key/value updates must be in lexicographically sorted order
* in cmdbuf.
* - You can read/write to a tmap object via the regular APIs, but
const char *buf, size_t len, uint64_t off);
/**
- * Asychronously append data to an object
+ * Asynchronously append data to an object
*
* Queues the append and returns.
*
const char *buf, size_t len);
/**
- * Asychronously write an entire object
+ * Asynchronously write an entire object
*
* The object is filled with the provided data. If the object exists,
* it is atomically truncated and then written.
const char *buf, size_t len);
/**
- * Asychronously write the same buffer multiple times
+ * Asynchronously write the same buffer multiple times
*
* Queues the writesame and returns.
*
size_t write_len, uint64_t off);
/**
- * Asychronously remove an object
+ * Asynchronously remove an object
*
* Queues the remove and returns.
*
rados_completion_t completion);
/**
- * Asychronously read data from an object
+ * Asynchronously read data from an object
*
* The io context determines the snapshot to read from, if any was set
* by rados_ioctx_snap_set_read().
* @name Object Operations
*
* A single rados operation can do multiple operations on one object
- * atomicly. The whole operation will suceed or fail, and no partial
+ * atomically. The whole operation will succeed or fail, and no partial
* results will be visible.
*
* Operations may be either reads, which can return data, or writes,
* @param init_value the init value for the algorithm
* @param init_value_len the length of the init value
* @param len the number of bytes to checksum
- * @param off the offset to start checksuming in the object
+ * @param off the offset to start checksumming in the object
* @param chunk_size optional length-aligned chunk size for checksums
* @param pchecksum where to store the checksum result for this action
* @param checksum_len the number of bytes available for the result
* @param keys_len number of bytes in keys buffer
* @param values buffer in which to store values
* @param vals_len number of bytes in values buffer
- * @returns 0 on succcess, negative error code on failure
+ * @returns 0 on success, negative error code on failure
* @returns -ERANGE if either buffer is too short
*/
CEPH_RADOS_API int rados_application_metadata_list(rados_ioctx_t io,
void assert_version(uint64_t ver);
/**
- * Guard operatation with a check that the object already exists
+ * Guard operation with a check that the object already exists
*/
void assert_exists();
size_t write_len, uint64_t off);
/**
- * Asychronously remove an object
+ * Asynchronously remove an object
*
* Queues the remove and returns.
*
* This layout will be used when new objects are created (by writing to them)
* Already existing objects will be opened with their own layout.
*
- * @param striper the targetted striper
+ * @param striper the targeted striper
* @param stripe_unit the stripe_unit value of the new object layout
* @returns 0 on success, negative error code on failure
*/
* This layout will be used when new objects are created (by writing to them)
* Already existing objects will be opened with their own layout.
*
- * @param striper the targetted striper
+ * @param striper the targeted striper
* @param stripe_count the stripe_count value of the new object layout
* @returns 0 on success, negative error code on failure
*/
* This layout will be used when new objects are created (by writing to them)
* Already existing objects will be opened with their own layout.
*
- * @param striper the targetted striper
+ * @param striper the targeted striper
* @param object_size the object_size value of the new object layout
* @returns 0 on success, negative error code on failure
*/
inline __u32 cap_to_u32_max(__u64 t) {
return std::min(t, (__u64)std::numeric_limits<uint32_t>::max());
}
-/* WARNING: If add member in utime_t, please make sure the encode/decode funtion
+/* WARNING: If add member in utime_t, please make sure the encode/decode function
* work well. For little-endian machine, we should make sure there is no padding
* in 32-bit machine and 64-bit machine.
* You should also modify the padding_check function.
throw reason;\r
}\r
\r
- // the spirit grammer \r
+ // the spirit grammar \r
//\r
template< class Value_type, class Iter_type >\r
class Json_grammer : public spirit_namespace::grammar< Json_grammer< Value_type, Iter_type > >\r
Int_action new_int ( boost::bind( &Semantic_actions_t::new_int, &self.actions_, _1 ) );\r
Uint64_action new_uint64 ( boost::bind( &Semantic_actions_t::new_uint64, &self.actions_, _1 ) );\r
\r
- // actual grammer\r
+ // actual grammar\r
\r
json_\r
= value_ | eps_p[ &throw_not_value ]\r
/**
* set the method that gets called before each ObjectWriteOperation.
* If waite_time is set and the method passed involves waiting, it will wait
- * for that many miliseconds.
+ * for that many milliseconds.
*/
virtual void set_inject(injection_t inject, int wait_time) = 0;
/**
* sets this kvba to call inject before every ObjectWriteOperation.
* If inject is wait and wait_time is set, wait will have a 10% chance of
- * sleeping for waite_time miliseconds.
+ * sleeping for waite_time milliseconds.
*/
void set_inject(injection_t inject, int wait_time) override;
}
// This superclass is used both by kv iterators *and* by the ObjectMap
- // omap iterator. The class hiearchies are unfortunatley tied together
+ // omap iterator. The class hierarchies are unfortunately tied together
// by the legacy DBOjectMap implementation :(.
class SimplestIteratorImpl {
public:
// not threadsafe
size_t TEST_GetLRUSize();
- // Retrives high pri pool ratio
+ // Retrieves high pri pool ratio
double GetHighPriPoolRatio() const;
- // Retrives high pri pool usage
+ // Retrieves high pri pool usage
size_t GetHighPriPoolUsage() const;
private:
size_t TEST_GetLRUSize();
// Sets the high pri pool ratio
void SetHighPriPoolRatio(double high_pri_pool_ratio);
- // Retrives high pri pool ratio
+ // Retrieves high pri pool ratio
double GetHighPriPoolRatio() const;
// Retrieves high pri pool usage
size_t GetHighPriPoolUsage() const;
namespace librados {
/**
- * iterator object used in implementation of the extrenal
+ * iterator object used in implementation of the external
* attributes part of the C interface of librados
*/
struct RadosXattrsIter {
*
* Still the writing to a striped object is not atomic. This means in particular that
* the size of an object may not be in sync with its content at all times.
- * As the size is always garanteed to be updated first and in an atomic way, and as
+ * As the size is always guaranteed to be updated first and in an atomic way, and as
* sparse striped objects are supported (see below), what will typically happen is
* that a reader that comes too soon after a write will read 0s instead of the actual
* data.
// return
return rcr;
} catch (ErrorCode &e) {
- // errror caught when trying to take the exclusive lock
+ // error caught when trying to take the exclusive lock
return e.m_code;
}
hobject_t hobj;
spg_t pgid;
bufferlist::const_iterator p;
- // Decoding flags. Decoding is only needed for messages catched by pipe reader.
+ // Decoding flags. Decoding is only needed for messages caught by pipe reader.
// Transition from true -> false without locks being held
// Can never see final_decode_needed == false and partial_decode_needed == true
atomic<bool> partial_decode_needed;
encode((uint32_t)s, payload);
if (s) {
// this should be big enough for normal min_message padding sizes. since
- // we are targetting jumbo ethernet frames around 9000 bytes, 16k should
+ // we are targeting jumbo ethernet frames around 9000 bytes, 16k should
// be more than sufficient! the compiler will statically zero this so
// that at runtime we are only adding a bufferptr reference to it.
static char zeros[16384] = {};
spg_t pgid;
bufferlist::const_iterator p;
- // Decoding flags. Decoding is only needed for messages catched by pipe reader.
+ // Decoding flags. Decoding is only needed for messages caught by pipe reader.
bool final_decode_needed;
// subop
void decode_payload() override {
p = payload.cbegin();
- // splitted to partial and final
+ // split to partial and final
decode(map_epoch, p);
if (header.version >= 2) {
decode(min_epoch, p);
eversion_t last_complete_ondisk;
bufferlist::const_iterator p;
- // Decoding flags. Decoding is only needed for messages catched by pipe reader.
+ // Decoding flags. Decoding is only needed for messages caught by pipe reader.
bool final_decode_needed;
epoch_t get_map_epoch() const override {
public:
Connection(CephContext *cct, Messenger *m)
- // we are managed exlusively by ConnectionRef; make it so you can
+ // we are managed exclusively by ConnectionRef; make it so you can
// ConnectionRef foo = new Connection;
: RefCountedObject(cct, 0),
lock("Connection::lock"),
}
/**
- * Notify each Dispatcher of a new incomming Connection. Call
+ * Notify each Dispatcher of a new incoming Connection. Call
* this function whenever a new Connection is accepted.
*
* @param con Pointer to the new Connection.
ceph::unordered_map<entity_addrvec_t, AsyncConnectionRef> conns;
/**
- * list of connection are in teh process of accepting
+ * list of connection are in the process of accepting
*
* These are not yet in the conns map.
*/
DPDKQueuePair& qp, rte_mbuf*& m, char* va, size_t buf_len) {
static constexpr size_t max_frag_len = 15 * 1024; // 15K
- // FIXME: current all tx buf is alloced without rte_malloc
+ // FIXME: current all tx buf is allocated without rte_malloc
return copy_one_data_buf(qp, m, va, buf_len);
//
// Currently we break a buffer on a 15K boundary because 82599
bool insert = true;
auto beg = offset;
auto end = beg + p.len();
- // Fisrt, try to merge the packet with existing segment
+ // First, try to merge the packet with existing segment
for (auto it = map.begin(); it != map.end();) {
auto& seg_pkt = it->second;
auto seg_beg = it->first;
}
// Second, merge adjacent segments after this packet has been merged,
- // becasue this packet might fill a "whole" and make two adjacent
+ // because this packet might fill a "whole" and make two adjacent
// segments mergable
for (auto it = map.begin(); it != map.end();) {
// The first segment
// Max number of TCP payloads we can pass to NIC
uint32_t len;
if (_tcp.get_hw_features().tx_tso) {
- // FIXME: Info tap device the size of the splitted packet
+ // FIXME: Info tap device the size of the split packet
len = _tcp.get_hw_features().max_packet_len - tcp_hdr_len_min - InetTraits::ip_hdr_len_min;
} else {
len = std::min(uint16_t(_tcp.get_hw_features().mtu - tcp_hdr_len_min - InetTraits::ip_hdr_len_min), _snd.mss);
*/
ceph::unordered_map<entity_addr_t, Pipe*> rank_pipe;
/**
- * list of pipes are in teh process of accepting
+ * list of pipes are in the process of accepting
*
* These are not yet in the rank_pipe map.
*/
* 1) collection is currently idle: the method returns true. c is
* not touched.
* 2) collection is not idle: the method returns false and c is
- * called asyncronously with a value of 0 once all transactions
+ * called asynchronously with a value of 0 once all transactions
* queued on this collection prior to the call have been applied
* and committed.
*/
* sobject_encoding detects an older/simpler version of oid
* present in pre-bobtail versions of ceph. use_pool_override
* also detects a situation where the pool of an oid can be
- * overriden for legacy operations/buffers. For non-legacy
+ * overridden for legacy operations/buffers. For non-legacy
* implementations of ObjectStore, neither of these fields are
* relevant.
*
* applies independently to each transaction element. For example,
* if a transaction contains two mutating elements "create A" and
* "delete B". And an enumeration operation is performed while this
- * transaction is pending. It is permissable for ObjectStore to
+ * transaction is pending. It is permissible for ObjectStore to
* report any of the four possible combinations of the existence of
* A and B.
*
bool empty() {
return !data.ops;
}
- /// Number of operations in the transation
+ /// Number of operations in the transaction
int get_num_ops() {
return data.ops;
}
uint64_t osd_memory_target = 0; ///< OSD memory target when autotuning cache
uint64_t osd_memory_base = 0; ///< OSD base memory when autotuning cache
double osd_memory_expected_fragmentation = 0; ///< expected memory fragmentation
- uint64_t osd_memory_cache_min = 0; ///< Min memory to assign when autotuning cahce
+ uint64_t osd_memory_cache_min = 0; ///< Min memory to assign when autotuning cache
double osd_memory_cache_resize_interval = 0; ///< Time to wait between cache resizing
std::mutex vstatfs_lock;
volatile_statfs vstatfs;
const coll_t& cid, const ghobject_t& oid) {
ceph_assert(granularity); // initialized
- // can't call this func after filter_out has been apllied
+ // can't call this func after filter_out has been applied
ceph_assert(!was_filtered_out);
if (!len) {
return;
OP_NONE = 0,
OP_INIT, ///< initial (empty) file system marker
OP_ALLOC_ADD, ///< add extent to available block storage (extent)
- OP_ALLOC_RM, ///< remove extent from availabe block storage (extent)
+ OP_ALLOC_RM, ///< remove extent from available block storage (extent)
OP_DIR_LINK, ///< (re)set a dir entry (dirname, filename, ino)
OP_DIR_UNLINK, ///< remove a dir entry (dirname, filename)
OP_DIR_CREATE, ///< create a dir (dirname)
/**
* Cleanup before replaying journal
*
- * Index implemenations may need to perform compound operations
- * which may leave the collection unstable if interupted. cleanup
+ * Index implementations may need to perform compound operations
+ * which may leave the collection unstable if interrupted. cleanup
* is called on mount to allow the CollectionIndex implementation
* to stabilize.
*
/**
* Generate new header for c oid with new seq number
*
- * Has the side effect of syncronously saving the new DBObjectMap state
+ * Has the side effect of synchronously saving the new DBObjectMap state
*/
Header _generate_new_header(const ghobject_t &oid, Header parent);
Header generate_new_header(const ghobject_t &oid, Header parent) {
CephContext* cct,
coll_t collection, ///< [in] Collection
const char *base_path, ///< [in] Path to the index root.
- int merge_at, ///< [in] Merge threshhold.
- int split_multiple, ///< [in] Split threshhold.
+ int merge_at, ///< [in] Merge threshold.
+ int split_multiple, ///< [in] Split threshold.
uint32_t index_version,///< [in] Index version
double retry_probability=0) ///< [in] retry probability
: LFNIndex(cct, collection, base_path, index_version, retry_probability),
* Encapsulates mutual exclusion for CollectionIndexes.
*
* Allowing a modification (removal or addition of an object) to occur
- * while a read is occuring (lookup of an object's path and use of
+ * while a read is occurring (lookup of an object's path and use of
* that path) may result in the path becoming invalid. Thus, during
* the lifetime of a CollectionIndex object and any paths returned
* by it, no other concurrent accesses may be allowed.
*
* User must call created when an object is created.
*
- * Syncronization: Calling code must ensure that there are no object
+ * Synchronization: Calling code must ensure that there are no object
* creations or deletions during the lifetime of a Path object (except
* of an object at that path).
*
- * Unless otherwise noted, methods which return an int return 0 on sucess
+ * Unless otherwise noted, methods which return an int return 0 on success
* and a negative error code on failure.
*/
#define WRAP_RETRY(x) { \
/**
* Moves contents of from into to.
*
- * Invalidates mangled names in to. If interupted, all objects will be
+ * Invalidates mangled names in to. If interrupted, all objects will be
* present in to before objects are removed from from. Ignores EEXIST
* while linking into to.
* @return Error Code, 0 on success
bufferlist &attr_value ///< [in] Value to save.
);
- /// Read into attr_value atribute attr_name on path.
+ /// Read into attr_value attribute attr_name on path.
int get_attr_path(
const vector<string> &path, ///< [in] Path to read.
const string &attr_name, ///< [in] Attribute to read.
}
/**
- * Gets the filename corresponsing to oid in path.
+ * Gets the filename corresponding to oid in path.
*
* @param [in] path Path in which to get filename for oid.
* @param [in] oid Object for which to get filename.
int lfn_parse_object_name_keyless(
const string &long_name, ///< [in] Name to parse
ghobject_t *out ///< [out] Resulting Object
- ); ///< @return True if successfull, False otherwise.
+ ); ///< @return True if successful, False otherwise.
/// Parse object name
int lfn_parse_object_name_poolless(
const string &long_name, ///< [in] Name to parse
ghobject_t *out ///< [out] Resulting Object
- ); ///< @return True if successfull, False otherwise.
+ ); ///< @return True if successful, False otherwise.
/// Parse object name
int lfn_parse_object_name(
const string &long_name, ///< [in] Name to parse
ghobject_t *out ///< [out] Resulting Object
- ); ///< @return True if successfull, False otherwise.
+ ); ///< @return True if successful, False otherwise.
/// Checks whether short_name is a hashed filename.
bool lfn_is_hashed_filename(
new_erasure_code_profiles[name] = profile;
}
- /// propage update pools' snap metadata to any of their tiers
+ /// propagate update pools' snap metadata to any of their tiers
int propagate_snaps_to_tiers(CephContext *cct, const OSDMap &base);
/// filter out osds with any pending state changing