From: Josh Soref <2119212+jsoref@users.noreply.github.com> Date: Thu, 23 Jun 2022 01:24:12 +0000 (-0400) Subject: rgw: fix spelling errors X-Git-Tag: v19.0.0~34^2 X-Git-Url: http://git.apps.os.sepia.ceph.com/?a=commitdiff_plain;h=ed70d843df4d4077efd053bd8065bdf26635d239;p=ceph.git rgw: fix spelling errors * access * acquire * across * algorithm * alternatively * ambiguous * attributes * authenticated * automatically * based * between * beware * bootstrap * bucket * cache * character * check * cleanup * combinations * committed * comparison * compatibility * concurrent * conditions * conjunction * connections * considered * contains * correct * creates * crypto * dependencies * dequeued * dequeuing * derivative * described * determine * deterministic * detritus * discrepancy * display * either way * elements * entirely * entries * error * everything * expected * explicitly * extend * false * fclose * feature * following * foreign * format * forward * functionality * functions * generated * getting * gracefully * happened * hashicorp * healthcheck * hierarchy * however * http * identify * idleness * ignored * incremental * information * initialization * length * locally * macros * malformed * means * mechanism * multipart * multiple * must * namespace * necessary * nonexistent * notifications * occurred * official * outputs * outputted * overridden * ownership * parallelism * partition * passthrough * pause * penguin * perform * permission * populated * position * preferably * prefixes * presumably * previous * print * quadruple * reference * regardless * registry * renewed * request * requires * reshard * resource * result to * satisfy * secret * separate * separator * setting * shouldn't * skipping * some * somehow * something * specific * struct * succeeded * success * successful * successfully * syntax * testing * truncated * unambiguously * unittest * unmodified * unnecessary * valid_action * verifying * versioning * visible * whatever * whenever Signed-off-by: Josh Soref <2119212+jsoref@users.noreply.github.com> --- diff --git a/examples/rgw/golang/object-upload/README.md b/examples/rgw/golang/object-upload/README.md index 31ea00006d61a..a859667009e3b 100644 --- a/examples/rgw/golang/object-upload/README.md +++ b/examples/rgw/golang/object-upload/README.md @@ -2,7 +2,7 @@ This directory contains Golang code examples on how to upload an object to an S3 bucket running on a Ceph RGW cluster. # Prerequisite -Linux machine running an RGW Ceph cluster. Preferrably started with the ``OSD=1 MON=1 MDS=0 MGR=0 RGW=1 ../src/vstart.sh --debug --new `` command. +Linux machine running an RGW Ceph cluster. Preferably started with the ``OSD=1 MON=1 MDS=0 MGR=0 RGW=1 ../src/vstart.sh --debug --new `` command. Go installed on the Linux machine. ## Workflow Procedure diff --git a/examples/rgw/java/ceph-s3-upload/README.md b/examples/rgw/java/ceph-s3-upload/README.md index 235d43666d1c1..fbd7d749b02b3 100644 --- a/examples/rgw/java/ceph-s3-upload/README.md +++ b/examples/rgw/java/ceph-s3-upload/README.md @@ -2,7 +2,7 @@ This directory contains Java code examples on how to upload an object to an S3 bucket running on a Ceph RGW cluster. # Prerequisites -Linux machine running an RGW Ceph cluster. Preferrably started with the ``OSD=1 MON=1 MDS=0 MGR=0 RGW=1 ../src/vstart.sh --debug --new `` command. +Linux machine running an RGW Ceph cluster. Preferably started with the ``OSD=1 MON=1 MDS=0 MGR=0 RGW=1 ../src/vstart.sh --debug --new `` command. Java and Maven installed on the Linux machine. ## Workflow Procedure diff --git a/qa/tasks/pykmip.py b/qa/tasks/pykmip.py index 45a5af6891b5c..3ab08a301aa0a 100644 --- a/qa/tasks/pykmip.py +++ b/qa/tasks/pykmip.py @@ -106,7 +106,7 @@ def install_packages(ctx, config): passed in to the main task. """ assert isinstance(config, dict) - log.info('Installing system dependenies for PyKMIP...') + log.info('Installing system dependencies for PyKMIP...') packages = {} for (client, _) in config.items(): diff --git a/qa/tasks/s3tests_java.py b/qa/tasks/s3tests_java.py index dbe03921c6016..78a1edb6f613d 100644 --- a/qa/tasks/s3tests_java.py +++ b/qa/tasks/s3tests_java.py @@ -182,7 +182,7 @@ class S3tests_java(Task): Configuration is read from a skelethon config file s3tests.teuth.config.yaml in the java-s3tests repository and missing information is added from the task. - Existing values are NOT overriden unless they are empty! + Existing values are NOT overridden unless they are empty! """ log.info("S3 Tests Java: Creating S3 users...") testdir = teuthology.get_testdir(self.ctx) diff --git a/qa/tasks/vault.py b/qa/tasks/vault.py index 2ff008c4dbef7..ae874eb55e35e 100644 --- a/qa/tasks/vault.py +++ b/qa/tasks/vault.py @@ -38,7 +38,7 @@ def assign_ports(ctx, config, initial_port): @contextlib.contextmanager def download(ctx, config): """ - Download Vault Release from Hashicopr website. + Download Vault Release from Hashicorp website. Remove downloaded file upon exit. """ assert isinstance(config, dict) diff --git a/qa/workunits/rgw/s3_bucket_quota.pl b/qa/workunits/rgw/s3_bucket_quota.pl index 7f5476ef676c7..6f0552ab7018c 100755 --- a/qa/workunits/rgw/s3_bucket_quota.pl +++ b/qa/workunits/rgw/s3_bucket_quota.pl @@ -16,7 +16,7 @@ Examples: =head1 DESCRIPTION -This script intends to test the rgw bucket quota funcionality using s3 interface +This script intends to test the rgw bucket quota functionality using s3 interface and reports the test results =head1 ARGUMENTS diff --git a/qa/workunits/rgw/s3_user_quota.pl b/qa/workunits/rgw/s3_user_quota.pl index 6d5c02a9a0152..04546eac84045 100755 --- a/qa/workunits/rgw/s3_user_quota.pl +++ b/qa/workunits/rgw/s3_user_quota.pl @@ -16,7 +16,7 @@ Examples: =head1 DESCRIPTION -This script intends to test the rgw user quota funcionality using s3 interface +This script intends to test the rgw user quota functionality using s3 interface and reports the test results =head1 ARGUMENTS diff --git a/qa/workunits/rgw/test_librgw_file.sh b/qa/workunits/rgw/test_librgw_file.sh index 1371ff7110755..4b9dfeb41210d 100755 --- a/qa/workunits/rgw/test_librgw_file.sh +++ b/qa/workunits/rgw/test_librgw_file.sh @@ -19,7 +19,7 @@ fi # nfsns is the main suite -# create herarchy, and then list it +# create hierarchy, and then list it echo "phase 1.1" ceph_test_librgw_file_nfsns ${K} --hier1 --dirs1 --create --rename --verbose diff --git a/qa/workunits/rgw/test_rgw_datacache.py b/qa/workunits/rgw/test_rgw_datacache.py index f070ec0f17998..2af2a0d3aa3d6 100755 --- a/qa/workunits/rgw/test_rgw_datacache.py +++ b/qa/workunits/rgw/test_rgw_datacache.py @@ -202,7 +202,7 @@ def main(): # remove datacache dir #cmd = exec_cmd('rm -rf %s' % (cache_dir)) #log.debug("RGW Datacache dir deleted") - #^ commenting for future refrence - the work unit will continue running tests and if the cache_dir is removed + #^ commenting for future reference - the work unit will continue running tests and if the cache_dir is removed # all the writes to cache will fail with errno 2 ENOENT No such file or directory. main() diff --git a/src/cls/2pc_queue/cls_2pc_queue.cc b/src/cls/2pc_queue/cls_2pc_queue.cc index 1e99503943a35..759d360b01484 100644 --- a/src/cls/2pc_queue/cls_2pc_queue.cc +++ b/src/cls/2pc_queue/cls_2pc_queue.cc @@ -135,7 +135,7 @@ static int cls_2pc_queue_reserve(cls_method_context_t hctx, bufferlist *in, buff } urgent_data.reserved_size += res_op.size + overhead; - // note that last id is incremented regadless of failures + // note that last id is incremented regardless of failures // to avoid "old reservation" issues below ++urgent_data.last_id; bool result; diff --git a/src/cls/refcount/cls_refcount_client.h b/src/cls/refcount/cls_refcount_client.h index 73a23a7ee28db..17af7a78ef3e0 100644 --- a/src/cls/refcount/cls_refcount_client.h +++ b/src/cls/refcount/cls_refcount_client.h @@ -19,7 +19,7 @@ * So, the regular usage would be to create an object, to increase the refcount. Then, when * wanting to have another reference to it, increase the refcount using a different tag. When * removing a reference it is required to drop the refcount (using the same tag that was used - * for that reference). When the refcount drops to zero, the object is removed automaticfally. + * for that reference). When the refcount drops to zero, the object is removed automatically. * * In order to maintain backwards compatibility with objects that were created without having * their refcount increased, the implicit_ref was added. Any object that was created without diff --git a/src/cls/rgw/cls_rgw.cc b/src/cls/rgw/cls_rgw.cc index 75a37bad63428..034660a11ed12 100644 --- a/src/cls/rgw/cls_rgw.cc +++ b/src/cls/rgw/cls_rgw.cc @@ -518,7 +518,7 @@ int rgw_bucket_list(cls_method_context_t hctx, bufferlist *in, bufferlist *out) std::string start_after_omap_key; encode_list_index_key(hctx, op.start_obj, &start_after_omap_key); - // this is set whenenver start_after_omap_key is set to keep them in + // this is set whenever start_after_omap_key is set to keep them in // sync since this will be the returned marker when a marker is // returned cls_rgw_obj_key start_after_entry_key; @@ -3181,7 +3181,7 @@ int rgw_bucket_check_index(cls_method_context_t hctx, bufferlist *in, bufferlist * Additionally, each of the three segment functions, if successful, * is expected to return the number of entries added to the output * list as a non-negative value. As per usual, negative return values - * indicate error condtions. + * indicate error conditions. */ static int rgw_bi_list_op(cls_method_context_t hctx, bufferlist *in, @@ -4457,7 +4457,7 @@ static int rgw_reshard_list(cls_method_context_t hctx, bufferlist *in, bufferlis try { decode(op, in_iter); } catch (ceph::buffer::error& err) { - CLS_LOG(1, "ERROR: rgw_cls_rehard_list(): failed to decode entry\n"); + CLS_LOG(1, "ERROR: rgw_cls_reshard_list(): failed to decode entry\n"); return -EINVAL; } cls_rgw_reshard_list_ret op_ret; @@ -4476,7 +4476,7 @@ static int rgw_reshard_list(cls_method_context_t hctx, bufferlist *in, bufferlis try { decode(entry, iter); } catch (ceph::buffer::error& err) { - CLS_LOG(1, "ERROR: rgw_cls_rehard_list(): failed to decode entry\n"); + CLS_LOG(1, "ERROR: rgw_cls_reshard_list(): failed to decode entry\n"); return -EIO; } op_ret.entries.push_back(entry); @@ -4521,7 +4521,7 @@ static int rgw_reshard_remove(cls_method_context_t hctx, bufferlist *in, bufferl try { decode(op, in_iter); } catch (ceph::buffer::error& err) { - CLS_LOG(1, "ERROR: rgw_cls_rehard_remove: failed to decode entry\n"); + CLS_LOG(1, "ERROR: rgw_cls_reshard_remove: failed to decode entry\n"); return -EINVAL; } diff --git a/src/cls/rgw/cls_rgw_client.cc b/src/cls/rgw/cls_rgw_client.cc index 5e7fba88f24a1..ed2be322da545 100644 --- a/src/cls/rgw/cls_rgw_client.cc +++ b/src/cls/rgw/cls_rgw_client.cc @@ -79,7 +79,7 @@ int CLSRGWConcurrentIO::operator()() { cleanup(); } return ret; -} // CLSRGWConcurrintIO::operator()() +} // CLSRGWConcurrentIO::operator()() /** @@ -186,7 +186,7 @@ bool BucketIndexAioManager::wait_for_completions(int valid_ret_code, return true; } -// note: currently only called by tesing code +// note: currently only called by testing code void cls_rgw_bucket_init_index(ObjectWriteOperation& o) { bufferlist in; diff --git a/src/cls/rgw/cls_rgw_client.h b/src/cls/rgw/cls_rgw_client.h index 1ae49c877bb4d..3db7306517129 100644 --- a/src/cls/rgw/cls_rgw_client.h +++ b/src/cls/rgw/cls_rgw_client.h @@ -405,7 +405,7 @@ int cls_rgw_usage_log_trim(librados::IoCtx& io_ctx, const std::string& oid, cons /** * Std::list the bucket with the starting object and filter prefix. * NOTE: this method do listing requests for each bucket index shards identified by - * the keys of the *list_results* std::map, which means the std::map should be popludated + * the keys of the *list_results* std::map, which means the std::map should be populated * by the caller to fill with each bucket index object id. * * io_ctx - IO context for rados. diff --git a/src/cls/rgw/cls_rgw_const.h b/src/cls/rgw/cls_rgw_const.h index 8595db3c9e8b9..a3398ebd0164a 100644 --- a/src/cls/rgw/cls_rgw_const.h +++ b/src/cls/rgw/cls_rgw_const.h @@ -6,7 +6,7 @@ #define RGW_CLASS "rgw" /* Special error code returned by cls bucket list operation if it was - * unable to skip past enough not visibile entries to return any + * unable to skip past enough not visible entries to return any * entries in the call. */ constexpr int RGWBIAdvanceAndRetryError = -EFBIG; diff --git a/src/cls/rgw/cls_rgw_ops.h b/src/cls/rgw/cls_rgw_ops.h index 4d58909a76708..a4d76de108ba0 100644 --- a/src/cls/rgw/cls_rgw_ops.h +++ b/src/cls/rgw/cls_rgw_ops.h @@ -430,7 +430,7 @@ struct rgw_cls_list_ret { // if is_truncated is true, starting marker for next iteration; this // is necessary as it's possible after maximum number of tries we // still might have zero entries to return, in which case we have to - // at least move the ball foward + // at least move the ball forward cls_rgw_obj_key marker; // cls_filtered is not transmitted; it is assumed true for versions @@ -758,7 +758,7 @@ WRITE_CLASS_ENCODER(rgw_cls_bi_put_op) struct rgw_cls_bi_list_op { uint32_t max; - std::string name_filter; // limit resultto one object and its instances + std::string name_filter; // limit result to one object and its instances std::string marker; rgw_cls_bi_list_op() : max(0) {} diff --git a/src/cls/rgw/cls_rgw_types.h b/src/cls/rgw/cls_rgw_types.h index 5f94b9918fa46..7b6b46101ccc0 100644 --- a/src/cls/rgw/cls_rgw_types.h +++ b/src/cls/rgw/cls_rgw_types.h @@ -132,7 +132,7 @@ inline uint64_t cls_rgw_get_rounded_size(uint64_t size) { * path that ends with a delimiter and appends a new character to the * end such that when a we request bucket-index entries *after* this, * we'll get the next object after the "subdirectory". This works - * because we append a '\xFF' charater, and no valid UTF-8 character + * because we append a '\xFF' character, and no valid UTF-8 character * can contain that byte, so no valid entries can be skipped. */ inline std::string cls_rgw_after_delim(const std::string& path) { @@ -181,7 +181,7 @@ enum class RGWObjCategory : uint8_t { Main = 1, // b-i entries for standard objs - Shadow = 2, // presumfably intended for multipart shadow + Shadow = 2, // presumably intended for multipart shadow // uploads; not currently used in the codebase MultiMeta = 3, // b-i entries for multipart upload metadata objs diff --git a/src/rgw/driver/dbstore/common/dbstore.h b/src/rgw/driver/dbstore/common/dbstore.h index d3bdf88053817..fc436d55df800 100644 --- a/src/rgw/driver/dbstore/common/dbstore.h +++ b/src/rgw/driver/dbstore/common/dbstore.h @@ -156,7 +156,7 @@ struct DBOpParams { * Difference with above structure is that all * the fields are strings here to accommodate any * style identifiers used by backend db. By default - * initialized with sqlitedb style, can be overriden + * initialized with sqlitedb style, can be overridden * using InitPrepareParams() * * These identifiers are used in prepare and bind statements @@ -605,7 +605,7 @@ class DBOp { REFERENCES '{}' (BucketName) ON DELETE CASCADE ON UPDATE CASCADE \n);"; static constexpr std::string_view CreateObjectViewQ = - /* This query creats temporary view with entries from ObjectData table which have + /* This query creates temporary view with entries from ObjectData table which have * corresponding head object (i.e, with same ObjName, ObjInstance, ObjNS, ObjID) * in the Object table. * @@ -711,8 +711,8 @@ class InsertUserOp : virtual public DBOp { private: /* For existing entires, - * (1) INSERT or REPLACE - it will delete previous entry and then - * inserts new one. Since it deletes previos enties, it will - * trigger all foriegn key cascade deletes or other triggers. + * inserts new one. Since it deletes previous entries, it will + * trigger all foreign key cascade deletes or other triggers. * (2) INSERT or UPDATE - this will set NULL values to unassigned * fields. * more info: https://code-examples.net/en/q/377728 diff --git a/src/rgw/driver/dbstore/sqlite/sqliteDB.cc b/src/rgw/driver/dbstore/sqlite/sqliteDB.cc index dc244c07b3437..81c716c27f184 100644 --- a/src/rgw/driver/dbstore/sqlite/sqliteDB.cc +++ b/src/rgw/driver/dbstore/sqlite/sqliteDB.cc @@ -758,7 +758,7 @@ int SQLiteDB::createUserTable(const DoutPrefixProvider *dpp, DBOpParams *params) if (ret) ldpp_dout(dpp, 0)<<"CreateUserTable failed" << dendl; - ldpp_dout(dpp, 20)<<"CreateUserTable suceeded" << dendl; + ldpp_dout(dpp, 20)<<"CreateUserTable succeeded" << dendl; return ret; } @@ -774,7 +774,7 @@ int SQLiteDB::createBucketTable(const DoutPrefixProvider *dpp, DBOpParams *param if (ret) ldpp_dout(dpp, 0)<<"CreateBucketTable failed " << dendl; - ldpp_dout(dpp, 20)<<"CreateBucketTable suceeded " << dendl; + ldpp_dout(dpp, 20)<<"CreateBucketTable succeeded " << dendl; return ret; } @@ -790,7 +790,7 @@ int SQLiteDB::createObjectTable(const DoutPrefixProvider *dpp, DBOpParams *param if (ret) ldpp_dout(dpp, 0)<<"CreateObjectTable failed " << dendl; - ldpp_dout(dpp, 20)<<"CreateObjectTable suceeded " << dendl; + ldpp_dout(dpp, 20)<<"CreateObjectTable succeeded " << dendl; return ret; } @@ -806,7 +806,7 @@ int SQLiteDB::createObjectTableTrigger(const DoutPrefixProvider *dpp, DBOpParams if (ret) ldpp_dout(dpp, 0)<<"CreateObjectTableTrigger failed " << dendl; - ldpp_dout(dpp, 20)<<"CreateObjectTableTrigger suceeded " << dendl; + ldpp_dout(dpp, 20)<<"CreateObjectTableTrigger succeeded " << dendl; return ret; } @@ -822,7 +822,7 @@ int SQLiteDB::createObjectView(const DoutPrefixProvider *dpp, DBOpParams *params if (ret) ldpp_dout(dpp, 0)<<"CreateObjectView failed " << dendl; - ldpp_dout(dpp, 20)<<"CreateObjectView suceeded " << dendl; + ldpp_dout(dpp, 20)<<"CreateObjectView succeeded " << dendl; return ret; } @@ -838,7 +838,7 @@ int SQLiteDB::createQuotaTable(const DoutPrefixProvider *dpp, DBOpParams *params if (ret) ldpp_dout(dpp, 0)<<"CreateQuotaTable failed " << dendl; - ldpp_dout(dpp, 20)<<"CreateQuotaTable suceeded " << dendl; + ldpp_dout(dpp, 20)<<"CreateQuotaTable succeeded " << dendl; return ret; } @@ -854,7 +854,7 @@ int SQLiteDB::createObjectDataTable(const DoutPrefixProvider *dpp, DBOpParams *p if (ret) ldpp_dout(dpp, 0)<<"CreateObjectDataTable failed " << dendl; - ldpp_dout(dpp, 20)<<"CreateObjectDataTable suceeded " << dendl; + ldpp_dout(dpp, 20)<<"CreateObjectDataTable succeeded " << dendl; return ret; } @@ -875,7 +875,7 @@ int SQLiteDB::createLCTables(const DoutPrefixProvider *dpp) ldpp_dout(dpp, 0)<<"CreateLCEntryTable failed" << dendl; return ret; } - ldpp_dout(dpp, 20)<<"CreateLCEntryTable suceeded" << dendl; + ldpp_dout(dpp, 20)<<"CreateLCEntryTable succeeded" << dendl; schema = CreateTableSchema("LCHead", ¶ms); ret = exec(dpp, schema.c_str(), NULL); @@ -883,7 +883,7 @@ int SQLiteDB::createLCTables(const DoutPrefixProvider *dpp) ldpp_dout(dpp, 0)<<"CreateLCHeadTable failed" << dendl; (void)DeleteLCEntryTable(dpp, ¶ms); } - ldpp_dout(dpp, 20)<<"CreateLCHeadTable suceeded" << dendl; + ldpp_dout(dpp, 20)<<"CreateLCHeadTable succeeded" << dendl; return ret; } @@ -899,7 +899,7 @@ int SQLiteDB::DeleteUserTable(const DoutPrefixProvider *dpp, DBOpParams *params) if (ret) ldpp_dout(dpp, 0)<<"DeleteUserTable failed " << dendl; - ldpp_dout(dpp, 20)<<"DeleteUserTable suceeded " << dendl; + ldpp_dout(dpp, 20)<<"DeleteUserTable succeeded " << dendl; return ret; } @@ -915,7 +915,7 @@ int SQLiteDB::DeleteBucketTable(const DoutPrefixProvider *dpp, DBOpParams *param if (ret) ldpp_dout(dpp, 0)<<"DeletebucketTable failed " << dendl; - ldpp_dout(dpp, 20)<<"DeletebucketTable suceeded " << dendl; + ldpp_dout(dpp, 20)<<"DeletebucketTable succeeded " << dendl; return ret; } @@ -931,7 +931,7 @@ int SQLiteDB::DeleteObjectTable(const DoutPrefixProvider *dpp, DBOpParams *param if (ret) ldpp_dout(dpp, 0)<<"DeleteObjectTable failed " << dendl; - ldpp_dout(dpp, 20)<<"DeleteObjectTable suceeded " << dendl; + ldpp_dout(dpp, 20)<<"DeleteObjectTable succeeded " << dendl; return ret; } @@ -947,7 +947,7 @@ int SQLiteDB::DeleteObjectDataTable(const DoutPrefixProvider *dpp, DBOpParams *p if (ret) ldpp_dout(dpp, 0)<<"DeleteObjectDataTable failed " << dendl; - ldpp_dout(dpp, 20)<<"DeleteObjectDataTable suceeded " << dendl; + ldpp_dout(dpp, 20)<<"DeleteObjectDataTable succeeded " << dendl; return ret; } @@ -963,7 +963,7 @@ int SQLiteDB::DeleteQuotaTable(const DoutPrefixProvider *dpp, DBOpParams *params if (ret) ldpp_dout(dpp, 0)<<"DeleteQuotaTable failed " << dendl; - ldpp_dout(dpp, 20)<<"DeleteQuotaTable suceeded " << dendl; + ldpp_dout(dpp, 20)<<"DeleteQuotaTable succeeded " << dendl; return ret; } @@ -977,7 +977,7 @@ int SQLiteDB::DeleteLCEntryTable(const DoutPrefixProvider *dpp, DBOpParams *para ret = exec(dpp, schema.c_str(), NULL); if (ret) ldpp_dout(dpp, 0)<<"DeleteLCEntryTable failed " << dendl; - ldpp_dout(dpp, 20)<<"DeleteLCEntryTable suceeded " << dendl; + ldpp_dout(dpp, 20)<<"DeleteLCEntryTable succeeded " << dendl; return ret; } @@ -991,7 +991,7 @@ int SQLiteDB::DeleteLCHeadTable(const DoutPrefixProvider *dpp, DBOpParams *param ret = exec(dpp, schema.c_str(), NULL); if (ret) ldpp_dout(dpp, 0)<<"DeleteLCHeadTable failed " << dendl; - ldpp_dout(dpp, 20)<<"DeleteLCHeadTable suceeded " << dendl; + ldpp_dout(dpp, 20)<<"DeleteLCHeadTable succeeded " << dendl; return ret; } @@ -1006,7 +1006,7 @@ int SQLiteDB::ListAllUsers(const DoutPrefixProvider *dpp, DBOpParams *params) if (ret) ldpp_dout(dpp, 0)<<"GetUsertable failed " << dendl; - ldpp_dout(dpp, 20)<<"GetUserTable suceeded " << dendl; + ldpp_dout(dpp, 20)<<"GetUserTable succeeded " << dendl; return ret; } @@ -1022,7 +1022,7 @@ int SQLiteDB::ListAllBuckets(const DoutPrefixProvider *dpp, DBOpParams *params) if (ret) ldpp_dout(dpp, 0)<<"Listbuckettable failed " << dendl; - ldpp_dout(dpp, 20)<<"ListbucketTable suceeded " << dendl; + ldpp_dout(dpp, 20)<<"ListbucketTable succeeded " << dendl; return ret; } @@ -1049,7 +1049,7 @@ int SQLiteDB::ListAllObjects(const DoutPrefixProvider *dpp, DBOpParams *params) if (ret) ldpp_dout(dpp, 0)<<"ListObjecttable failed " << dendl; - ldpp_dout(dpp, 20)<<"ListObjectTable suceeded " << dendl; + ldpp_dout(dpp, 20)<<"ListObjectTable succeeded " << dendl; } return ret; diff --git a/src/rgw/driver/motr/rgw_sal_motr.cc b/src/rgw/driver/motr/rgw_sal_motr.cc index 8cc8df987d2bb..3f5a46e4bc9e0 100644 --- a/src/rgw/driver/motr/rgw_sal_motr.cc +++ b/src/rgw/driver/motr/rgw_sal_motr.cc @@ -386,7 +386,7 @@ int MotrUser::store_user(const DoutPrefixProvider* dpp, orig_info.user_id = info.user_id; // XXX: we open and close motr idx 2 times in this method: // 1) on load_user_from_idx() here and 2) on do_idx_op_by_name(PUT) below. - // Maybe this can be optimised later somewhow. + // Maybe this can be optimised later somehow. int rc = load_user_from_idx(dpp, store, orig_info, nullptr, &objv_tr); ldpp_dout(dpp, 10) << "Get user: rc = " << rc << dendl; @@ -874,7 +874,7 @@ int MotrBucket::trim_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, int MotrBucket::remove_objs_from_index(const DoutPrefixProvider *dpp, std::list& objs_to_unlink) { - /* XXX: CHECK: Unlike RadosStore, there is no seperate bucket index table. + /* XXX: CHECK: Unlike RadosStore, there is no separate bucket index table. * Delete all the object in the list from the object table of this * bucket */ @@ -1014,7 +1014,7 @@ int MotrBucket::list_multiparts(const DoutPrefixProvider *dpp, if (prefix.size() && (0 != ent.key.name.compare(0, prefix.size(), prefix))) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << - ": skippping \"" << ent.key << + ": skipping \"" << ent.key << "\" because doesn't match prefix" << dendl; continue; } @@ -2338,7 +2338,7 @@ int MotrAtomicWriter::complete(size_t accounted_size, const std::string& etag, bufferlist bl; rgw_bucket_dir_entry ent; - // Set rgw_bucet_dir_entry. Some of the member of this structure may not + // Set rgw_bucket_dir_entry. Some of the member of this structure may not // apply to motr. For example the storage_class. // // Checkout AtomicObjectProcessor::complete() in rgw_putobj_processor.cc @@ -2838,7 +2838,7 @@ int MotrMultipartUpload::complete(const DoutPrefixProvider *dpp, // Update the dir entry and insert it to the bucket index so // the object will be seen when listing the bucket. bufferlist update_bl; - target_obj->get_key().get_index_key(&ent.key); // Change to offical name :) + target_obj->get_key().get_index_key(&ent.key); // Change to official name :) ent.meta.size = off; ent.meta.accounted_size = accounted_size; ldpp_dout(dpp, 20) << "MotrMultipartUpload::complete(): obj size=" << ent.meta.size @@ -3667,7 +3667,7 @@ int MotrStore::open_motr_idx(struct m0_uint128 *id, struct m0_idx *idx) return 0; } -// The following marcos are from dix/fid_convert.h which are not exposed. +// The following macros are from dix/fid_convert.h which are not exposed. enum { M0_DIX_FID_DEVICE_ID_OFFSET = 32, M0_DIX_FID_DIX_CONTAINER_MASK = (1ULL << M0_DIX_FID_DEVICE_ID_OFFSET) diff --git a/src/rgw/driver/motr/rgw_sal_motr.h b/src/rgw/driver/motr/rgw_sal_motr.h index 3a110e36f173a..ecfabfc151814 100644 --- a/src/rgw/driver/motr/rgw_sal_motr.h +++ b/src/rgw/driver/motr/rgw_sal_motr.h @@ -70,11 +70,11 @@ protected: // of RGW instances under heavy use. If you would like to turn off cache expiry, // set this value to zero. // - // Currently POC hasn't implemented the watch-notify menchanism yet. So the + // Currently POC hasn't implemented the watch-notify mechanism yet. So the // current implementation is similar to cortx-s3server which is based on expiry // time. TODO: see comments on distribute_cache). // - // Beaware: Motr object data is not cached in current POC as RGW! + // Beware: Motr object data is not cached in current POC as RGW! // RGW caches the first chunk (4MB by default). ObjectCache cache; @@ -877,7 +877,7 @@ public: // object part index and for each part an entry is created in extended index. // The entry for the object is created in bucket (object list) index. The part // index is deleted and an entry removed from bucket_nnn_multipart_index. Like -// bucket multipart index, bucket part extened metadata index is created during +// bucket multipart index, bucket part extend metadata index is created during // bucket creation. // // The extended metadata index is used mainly due to fault tolerant diff --git a/src/rgw/driver/rados/rgw_bucket.h b/src/rgw/driver/rados/rgw_bucket.h index 857d530aab50f..706683e6bd102 100644 --- a/src/rgw/driver/rados/rgw_bucket.h +++ b/src/rgw/driver/rados/rgw_bucket.h @@ -110,7 +110,7 @@ public: }; /** - * store a list of the user's buckets, with associated functinos. + * store a list of the user's buckets, with associated functions. */ class RGWUserBuckets { std::map buckets; diff --git a/src/rgw/driver/rados/rgw_cr_rados.cc b/src/rgw/driver/rados/rgw_cr_rados.cc index 1baff16ff42e2..e7174908dd773 100644 --- a/src/rgw/driver/rados/rgw_cr_rados.cc +++ b/src/rgw/driver/rados/rgw_cr_rados.cc @@ -795,7 +795,7 @@ int RGWAsyncFetchRemoteObj::_send_request(const DoutPrefixProvider *dpp) } else { // r >= 0 if (bytes_transferred) { - // send notification that object was succesfully synced + // send notification that object was successfully synced std::string user_id = "rgw sync"; std::string req_id = "0"; diff --git a/src/rgw/driver/rados/rgw_d3n_datacache.cc b/src/rgw/driver/rados/rgw_d3n_datacache.cc index e07b5f4bd6bf5..18d213cf6c342 100644 --- a/src/rgw/driver/rados/rgw_d3n_datacache.cc +++ b/src/rgw/driver/rados/rgw_d3n_datacache.cc @@ -143,11 +143,11 @@ int D3nDataCache::d3n_io_write(bufferlist& bl, unsigned int len, std::string oid // Check whether fclose returned an error if (r != 0) { - ldout(cct, 0) << "ERROR: D3nDataCache::fclsoe file has return error, errno=" << errno << dendl; + ldout(cct, 0) << "ERROR: D3nDataCache::fclose file has return error, errno=" << errno << dendl; return -errno; } - { // update cahce_map entries for new chunk in cache + { // update cache_map entries for new chunk in cache const std::lock_guard l(d3n_cache_lock); chunk_info = new D3nChunkDataInfo; chunk_info->oid = oid; @@ -294,7 +294,7 @@ bool D3nDataCache::get(const string& oid, const off_t len) struct D3nChunkDataInfo* chdo = iter->second; struct stat st; int r = stat(location.c_str(), &st); - if ( r != -1 && st.st_size == len) { // file exists and containes required data range length + if ( r != -1 && st.st_size == len) { // file exists and contains required data range length exist = true; /*LRU*/ /*get D3nChunkDataInfo*/ diff --git a/src/rgw/driver/rados/rgw_data_sync.cc b/src/rgw/driver/rados/rgw_data_sync.cc index bbd3aaf448dad..02965f9073d65 100644 --- a/src/rgw/driver/rados/rgw_data_sync.cc +++ b/src/rgw/driver/rados/rgw_data_sync.cc @@ -3515,7 +3515,7 @@ class CheckBucketShardStatusIsIncremental : public RGWReadBucketPipeSyncStatusCo class CheckAllBucketShardStatusIsIncremental : public RGWShardCollectCR { // start with 1 shard, and only spawn more if we detect an existing shard. - // this makes the backward compatilibility check far less expensive in the + // this makes the backward compatibility check far less expensive in the // general case where no shards exist static constexpr int initial_concurrent_shards = 1; static constexpr int max_concurrent_shards = 16; @@ -4320,7 +4320,7 @@ public: * create index from key -> , and from marker -> key * this is useful so that we can insure that we only have one * entry for any key that is used. This is needed when doing - * incremenatl sync of data, and we don't want to run multiple + * incremental sync of data, and we don't want to run multiple * concurrent sync operations for the same bucket shard * Also, we should make sure that we don't run concurrent operations on the same key with * different ops. @@ -5911,7 +5911,7 @@ int RGWSyncBucketCR::operate(const DoutPrefixProvider *dpp) return set_cr_error(retcode); } if (bucket_status.state != BucketSyncState::Stopped) { - // make sure that state is changed to stopped localy + // make sure that state is changed to stopped locally bucket_status.state = BucketSyncState::Stopped; yield call(new WriteCR(dpp, env->driver, status_obj, bucket_status, &objv, false)); diff --git a/src/rgw/driver/rados/rgw_datalog.cc b/src/rgw/driver/rados/rgw_datalog.cc index 167cbcdba4a89..9274dc2ae9be1 100644 --- a/src/rgw/driver/rados/rgw_datalog.cc +++ b/src/rgw/driver/rados/rgw_datalog.cc @@ -618,7 +618,7 @@ void RGWDataChangesLog::update_renewed(const rgw_bucket_shard& bs, auto status = _get_change(bs, gen); l.unlock(); - ldout(cct, 20) << "RGWDataChangesLog::update_renewd() bucket_name=" + ldout(cct, 20) << "RGWDataChangesLog::update_renewed() bucket_name=" << bs.bucket.name << " shard_id=" << bs.shard_id << " expiration=" << expiration << dendl; diff --git a/src/rgw/driver/rados/rgw_lc_tier.cc b/src/rgw/driver/rados/rgw_lc_tier.cc index c52acef654f37..2dd8a6625fcc1 100644 --- a/src/rgw/driver/rados/rgw_lc_tier.cc +++ b/src/rgw/driver/rados/rgw_lc_tier.cc @@ -454,7 +454,7 @@ int RGWLCStreamRead::init_rest_obj() { rest_obj.content_len = m_part_size; } - /* For mulitpart attrs are sent as part of InitMultipartCR itself */ + /* For multipart attrs are sent as part of InitMultipartCR itself */ if (multipart) { return 0; } diff --git a/src/rgw/driver/rados/rgw_notify.cc b/src/rgw/driver/rados/rgw_notify.cc index 3935957bc5c3e..ecbccc8b220b0 100644 --- a/src/rgw/driver/rados/rgw_notify.cc +++ b/src/rgw/driver/rados/rgw_notify.cc @@ -199,7 +199,7 @@ private: std::vector entryProcessingResultString = {"Failure", "Successful", "Sleeping", "Expired", "Migrating"}; // processing of a specific entry - // return whether processing was successfull (true) or not (false) + // return whether processing was successful (true) or not (false) EntryProcessingResult process_entry(const ConfigProxy& conf, persistency_tracker& entry_persistency_tracker, const cls_queue_entry& entry, yield_context yield) { event_entry_t event_entry; @@ -577,7 +577,7 @@ private: for (const auto& queue_name : queues) { // try to lock the queue to check if it is owned by this rgw - // or if ownershif needs to be taken + // or if ownership needs to be taken librados::ObjectWriteOperation op; op.assert_exists(); rados::cls::lock::lock(&op, queue_name+"_lock", @@ -612,7 +612,7 @@ private: // start processing this queue spawn::spawn(io_context, [this, &queue_gc, &queue_gc_lock, queue_name](yield_context yield) { process_queue(queue_name, yield); - // if queue processing ended, it measn that the queue was removed or not owned anymore + // if queue processing ended, it means that the queue was removed or not owned anymore // mark it for deletion std::lock_guard lock_guard(queue_gc_lock); queue_gc.push_back(queue_name); @@ -790,7 +790,7 @@ int remove_persistent_topic(const std::string& topic_name, optional_yield y) { return remove_persistent_topic(s_manager, s_manager->rados_store.getRados()->get_notif_pool_ctx(), topic_name, y); } -rgw::sal::Object* get_object_with_atttributes( +rgw::sal::Object* get_object_with_attributes( const reservation_t& res, rgw::sal::Object* obj) { // in case of copy obj, the tags and metadata are taken from source const auto src_obj = res.src_object ? res.src_object : obj; @@ -820,7 +820,7 @@ static inline void filter_amz_meta(meta_map_t& dest, const meta_map_t& src) { static inline void metadata_from_attributes( reservation_t& res, rgw::sal::Object* obj) { auto& metadata = res.x_meta_map; - const auto src_obj = get_object_with_atttributes(res, obj); + const auto src_obj = get_object_with_attributes(res, obj); if (!src_obj) { return; } @@ -838,7 +838,7 @@ static inline void metadata_from_attributes( static inline void tags_from_attributes( const reservation_t& res, rgw::sal::Object* obj, KeyMultiValueMap& tags) { - const auto src_obj = get_object_with_atttributes(res, obj); + const auto src_obj = get_object_with_attributes(res, obj); if (!src_obj) { return; } diff --git a/src/rgw/driver/rados/rgw_notify.h b/src/rgw/driver/rados/rgw_notify.h index 460a7bacb5dac..20f0c17e5bb5b 100644 --- a/src/rgw/driver/rados/rgw_notify.h +++ b/src/rgw/driver/rados/rgw_notify.h @@ -23,7 +23,7 @@ struct rgw_obj_key; namespace rgw::notify { // initialize the notification manager -// notification manager is dequeing the 2-phase-commit queues +// notification manager is dequeuing the 2-phase-commit queues // and send the notifications to the endpoints bool init(CephContext* cct, rgw::sal::RadosStore* store, const DoutPrefixProvider *dpp); diff --git a/src/rgw/driver/rados/rgw_putobj_processor.cc b/src/rgw/driver/rados/rgw_putobj_processor.cc index b6d08e5c7f383..091adb9830c0c 100644 --- a/src/rgw/driver/rados/rgw_putobj_processor.cc +++ b/src/rgw/driver/rados/rgw_putobj_processor.cc @@ -189,7 +189,7 @@ RadosWriter::~RadosWriter() * Such race condition is caused by the fact that the multipart object is the gatekeeper of a multipart * upload, when it is deleted, a second upload would start with the same suffix("2/"), therefore, objects * written by the second upload may be deleted by the first upload. - * details is describled on #11749 + * details is described on #11749 * * The above comment still stands, but instead of searching for a specific object in the multipart * namespace, we just make sure that we remove the object that is marked as the head object after diff --git a/src/rgw/driver/rados/rgw_rados.cc b/src/rgw/driver/rados/rgw_rados.cc index 3ee43dd91a89c..2b5f7c4e7a66f 100644 --- a/src/rgw/driver/rados/rgw_rados.cc +++ b/src/rgw/driver/rados/rgw_rados.cc @@ -1911,7 +1911,7 @@ int RGWRados::Bucket::List::list_objects_ordered( if (cur_end_marker_valid && cur_end_marker <= index_key) { truncated = false; ldpp_dout(dpp, 10) << __func__ << - ": finished due to gitting end marker of \"" << cur_end_marker << + ": finished due to getting end marker of \"" << cur_end_marker << "\" with \"" << entry.key << "\"" << dendl; goto done; } @@ -2194,14 +2194,14 @@ int RGWRados::Bucket::List::list_objects_unordered(const DoutPrefixProvider *dpp if (!params.list_versions && !entry.is_visible()) { ldpp_dout(dpp, 20) << __func__ << - ": skippping \"" << index_key << - "\" because not listing versions and entry not visibile" << dendl; + ": skipping \"" << index_key << + "\" because not listing versions and entry not visible" << dendl; continue; } if (params.enforce_ns && obj.ns != params.ns) { ldpp_dout(dpp, 20) << __func__ << - ": skippping \"" << index_key << + ": skipping \"" << index_key << "\" because namespace does not match" << dendl; continue; } @@ -2210,7 +2210,7 @@ int RGWRados::Bucket::List::list_objects_unordered(const DoutPrefixProvider *dpp // we're not guaranteed items will come in order, so we have // to loop through all ldpp_dout(dpp, 20) << __func__ << - ": skippping \"" << index_key << + ": skipping \"" << index_key << "\" because after end_marker" << dendl; continue; } @@ -2218,7 +2218,7 @@ int RGWRados::Bucket::List::list_objects_unordered(const DoutPrefixProvider *dpp if (params.access_list_filter && !params.access_list_filter->filter(obj.name, index_key.name)) { ldpp_dout(dpp, 20) << __func__ << - ": skippping \"" << index_key << + ": skipping \"" << index_key << "\" because doesn't match filter" << dendl; continue; } @@ -2226,7 +2226,7 @@ int RGWRados::Bucket::List::list_objects_unordered(const DoutPrefixProvider *dpp if (params.prefix.size() && (0 != obj.name.compare(0, params.prefix.size(), params.prefix))) { ldpp_dout(dpp, 20) << __func__ << - ": skippping \"" << index_key << + ": skipping \"" << index_key << "\" because doesn't match prefix" << dendl; continue; } @@ -3297,7 +3297,7 @@ done_cancel: /* we lost in a race. There are a few options: * - existing object was rewritten (ECANCELED) - * - non existing object was created (EEXIST) + * - nonexistent object was created (EEXIST) * - object was removed (ENOENT) * should treat it as a success */ @@ -4627,7 +4627,7 @@ int RGWRados::copy_obj(RGWObjectCtx& obj_ctx, if (src_attrs.count(RGW_ATTR_CRYPT_MODE)) { // Current implementation does not follow S3 spec and even // may result in data corruption silently when copying - // multipart objects acorss pools. So reject COPY operations + // multipart objects across pools. So reject COPY operations //on encrypted objects before it is fully functional. ldpp_dout(dpp, 0) << "ERROR: copy op for encrypted object " << src_obj << " has not been implemented." << dendl; @@ -9451,7 +9451,7 @@ int RGWRados::cls_bucket_list_ordered(const DoutPrefixProvider *dpp, for (auto& r : shard_list_results) { results_trackers.emplace_back(r.first, r.second, shard_oids[r.first]); - // if any *one* shard's result is trucated, the entire result is + // if any *one* shard's result is truncated, the entire result is // truncated *is_truncated = *is_truncated || r.second.is_truncated; diff --git a/src/rgw/driver/rados/rgw_rest_log.h b/src/rgw/driver/rados/rgw_rest_log.h index 02b1d133fc5b2..b3a8e49d1b295 100644 --- a/src/rgw/driver/rados/rgw_rest_log.h +++ b/src/rgw/driver/rados/rgw_rest_log.h @@ -331,7 +331,7 @@ public: RGWHandler_REST* get_handler(rgw::sal::Driver* driver, req_state* const, const rgw::auth::StrategyRegistry& auth_registry, - const std::string& frontend_prefixs) override { + const std::string& frontend_prefixes) override { return new RGWHandler_Log(auth_registry); } }; diff --git a/src/rgw/driver/rados/rgw_sal_rados.cc b/src/rgw/driver/rados/rgw_sal_rados.cc index b60ddd7a4d18b..478d7a5b7067d 100644 --- a/src/rgw/driver/rados/rgw_sal_rados.cc +++ b/src/rgw/driver/rados/rgw_sal_rados.cc @@ -330,7 +330,7 @@ int RadosBucket::remove(const DoutPrefixProvider* dpp, RGWObjVersionTracker ot; // if we deleted children above we will force delete, as any that - // remain is detrius from a prior bug + // remain is detritus from a prior bug ret = store->getRados()->delete_bucket(info, ot, y, dpp, !delete_children); if (ret < 0) { ldpp_dout(dpp, -1) << "ERROR: could not remove bucket " << @@ -3141,7 +3141,7 @@ int RadosLuaManager::remove_package(const DoutPrefixProvider *dpp, optional_yiel librados::ObjectWriteOperation op; size_t pos = package_name.find(" "); if (pos != package_name.npos) { - // remove specfic version of the the package + // remove specific version of the the package op.omap_rm_keys(std::set({package_name})); auto ret = rgw_rados_operate(dpp, ioctx, PACKAGE_LIST_OBJECT_NAME, &op, y); diff --git a/src/rgw/driver/rados/rgw_sync.cc b/src/rgw/driver/rados/rgw_sync.cc index c973a039efc84..251e9b395e0c7 100644 --- a/src/rgw/driver/rados/rgw_sync.cc +++ b/src/rgw/driver/rados/rgw_sync.cc @@ -1767,7 +1767,7 @@ public: sync_marker, tn)); /* - * mdlog_marker: the remote sync marker positiion + * mdlog_marker: the remote sync marker position * sync_marker: the local sync marker position * max_marker: the max mdlog position that we fetched * marker: the current position we try to sync diff --git a/src/rgw/driver/rados/rgw_sync_module_aws.cc b/src/rgw/driver/rados/rgw_sync_module_aws.cc index cdcd831e98923..46b99968a5069 100644 --- a/src/rgw/driver/rados/rgw_sync_module_aws.cc +++ b/src/rgw/driver/rados/rgw_sync_module_aws.cc @@ -60,7 +60,7 @@ static string obj_to_aws_path(const rgw_obj& obj) # anything below here is for non trivial configuration - # can be used in conjuction with the above + # can be used in conjunction with the above "default": { "connection": { @@ -107,7 +107,7 @@ static string obj_to_aws_path(const rgw_obj& obj) target path optional variables: (evaluated at init) -sid: sync instance id, randomly generated by sync process on first sync initalization +sid: sync instance id, randomly generated by sync process on first sync initialization zonegroup: zonegroup name zonegroup_id: zonegroup name zone: zone name diff --git a/src/rgw/driver/rados/rgw_sync_module_es.cc b/src/rgw/driver/rados/rgw_sync_module_es.cc index 4e8eb6201db4a..13c2057bf7793 100644 --- a/src/rgw/driver/rados/rgw_sync_module_es.cc +++ b/src/rgw/driver/rados/rgw_sync_module_es.cc @@ -608,7 +608,7 @@ struct es_obj_metadata { f->open_array_section("custom-date"); for (auto i : custom_date) { /* - * try to exlicitly parse date field, otherwise elasticsearch could reject the whole doc, + * try to explicitly parse date field, otherwise elasticsearch could reject the whole doc, * which will end up with failed sync */ real_time t; diff --git a/src/rgw/driver/rados/rgw_zone.h b/src/rgw/driver/rados/rgw_zone.h index 1f837e312c098..446f1efda91a6 100644 --- a/src/rgw/driver/rados/rgw_zone.h +++ b/src/rgw/driver/rados/rgw_zone.h @@ -318,7 +318,7 @@ struct RGWZoneGroup : public RGWSystemMetaObj { // TODO: Maybe convert hostnames to a map> for // endpoint_type->hostnames /* -20:05 < _robbat21irssi> maybe I do someting like: if (hostname_map.empty()) { populate all map keys from hostnames; }; +20:05 < _robbat21irssi> maybe I do something like: if (hostname_map.empty()) { populate all map keys from hostnames; }; 20:05 < _robbat21irssi> but that's a later compatability migration planning bit 20:06 < yehudasa> more like if (!hostnames.empty()) { 20:06 < yehudasa> for (std::list::iterator iter = hostnames.begin(); iter != hostnames.end(); ++iter) { diff --git a/src/rgw/jwt-cpp/jwt.h b/src/rgw/jwt-cpp/jwt.h index b86fb57b032f6..352b80010b866 100644 --- a/src/rgw/jwt-cpp/jwt.h +++ b/src/rgw/jwt-cpp/jwt.h @@ -74,7 +74,7 @@ namespace jwt { namespace helper { inline std::string extract_pubkey_from_cert(const std::string& certstr, const std::string& pw = "") { - // TODO: Cannot find the exact version this change happended + // TODO: Cannot find the exact version this change happened #if OPENSSL_VERSION_NUMBER <= 0x1000114fL std::unique_ptr certbio(BIO_new_mem_buf(const_cast(certstr.data()), certstr.size()), BIO_free_all); #else @@ -203,7 +203,7 @@ namespace jwt { return alg_name; } private: - /// HMAC secrect + /// HMAC secret const std::string secret; /// HMAC hash generator const EVP_MD*(*md)(); @@ -821,7 +821,7 @@ namespace jwt { /** * Get type of contained object * \return Type - * \throws std::logic_error An internal error occured + * \throws std::logic_error An internal error occurred */ type get_type() const { if (val.is()) return type::null; @@ -1045,7 +1045,7 @@ namespace jwt { std::unordered_map header_claims; public: /** - * Check if algortihm is present ("alg") + * Check if algorithm is present ("alg") * \return true if present, false otherwise */ bool has_algorithm() const noexcept { return has_header_claim("alg"); } @@ -1119,7 +1119,7 @@ namespace jwt { */ class decoded_jwt : public header, public payload { protected: - /// Unmodifed token, as passed to constructor + /// Unmodified token, as passed to constructor const std::string token; /// Header part decoded from base64 std::string header; diff --git a/src/rgw/rgw-gap-list b/src/rgw/rgw-gap-list index 5018cedd7cb65..983e893057790 100755 --- a/src/rgw/rgw-gap-list +++ b/src/rgw/rgw-gap-list @@ -2,7 +2,7 @@ # Last revision 2023-01-13 -# NOTE: This script based based on rgw-orphan-list but doing the +# NOTE: This script based on rgw-orphan-list but doing the # reverse calculation. # NOTE: The awk included in this script replaces the 'ceph-diff-sorted' @@ -276,7 +276,7 @@ for myfile in $rados_out $rgwadmin_out; do fi done -# Create an awk script in a file for parsing the two command outoputs. +# Create an awk script in a file for parsing the two command outputs. log "Creating awk script for comparing outputs: ${incremental_grep_awk}" cat <<"EOF" >$incremental_grep_awk @@ -292,7 +292,7 @@ cat <<"EOF" >$incremental_grep_awk # indicates a possible deleted tail object and the accompanying # bucket / user object name is output, assuming it had not been # previously identified. -# - A map of outputed bucket / user object is maintained in memory +# - A map of outputted bucket / user object is maintained in memory # * If a value appears in $rados_out, but not in $rgwadmin_out, the # $rados_out file is iterated until the $rados_out line is equal # or > (alphabetically) the value from the $rgwadmin_out file. diff --git a/src/rgw/rgw-orphan-list b/src/rgw/rgw-orphan-list index c8856e8eeec58..70557947e5df9 100755 --- a/src/rgw/rgw-orphan-list +++ b/src/rgw/rgw-orphan-list @@ -159,7 +159,7 @@ rados_ls() { namespace_found=1 fi - # check for locators (w/o namespace); we idenitfy them by skipping + # check for locators (w/o namespace); we identify them by skipping # past the empty namespace (i.e., one TAB), skipping past the oid, # then looking for a TAB; note we use egrep to get the '+' character # and the $ in front of the ' allows the \t to be interpreted as a TAB diff --git a/src/rgw/rgw_acl_swift.cc b/src/rgw/rgw_acl_swift.cc index f1ca68d637d75..c6f06bbd37cc0 100644 --- a/src/rgw/rgw_acl_swift.cc +++ b/src/rgw/rgw_acl_swift.cc @@ -384,7 +384,7 @@ boost::optional RGWAccessControlPolicy_SWIFTAcct::to_str() const std::vector readwrite; std::vector readonly; - /* Parition the grant map into three not-overlapping groups. */ + /* Partition the grant map into three not-overlapping groups. */ for (const auto& item : get_acl().get_grant_map()) { const ACLGrant& grant = item.second; const uint32_t perm = grant.get_permission().get_permissions(); diff --git a/src/rgw/rgw_admin.cc b/src/rgw/rgw_admin.cc index 04d085daee594..c19d44c9e5366 100644 --- a/src/rgw/rgw_admin.cc +++ b/src/rgw/rgw_admin.cc @@ -2870,11 +2870,11 @@ static int bucket_sync_status(rgw::sal::Driver* driver, const RGWBucketInfo& inf for (auto& zone_id : zone_ids) { auto z = static_cast(driver)->svc()->zone->get_zonegroup().zones.find(zone_id.id); - if (z == static_cast(driver)->svc()->zone->get_zonegroup().zones.end()) { /* should't happen */ + if (z == static_cast(driver)->svc()->zone->get_zonegroup().zones.end()) { /* shouldn't happen */ continue; } auto c = zone_conn_map.find(zone_id.id); - if (c == zone_conn_map.end()) { /* should't happen */ + if (c == zone_conn_map.end()) { /* shouldn't happen */ continue; } @@ -7641,7 +7641,7 @@ next: do { entries.clear(); - // if object is specified, we use that as a filter to only retrieve some some entries + // if object is specified, we use that as a filter to only retrieve some entries ret = static_cast(driver)->getRados()->bi_list(bs, object, marker, max_entries, &entries, &is_truncated, null_yield); if (ret < 0) { ldpp_dout(dpp(), 0) << "ERROR: bi_list(): " << cpp_strerror(-ret) << dendl; diff --git a/src/rgw/rgw_amqp.cc b/src/rgw/rgw_amqp.cc index 3014edd1db09d..67cbbe5c67715 100644 --- a/src/rgw/rgw_amqp.cc +++ b/src/rgw/rgw_amqp.cc @@ -301,7 +301,7 @@ std::string to_string(amqp_status_enum s) { } } -// TODO: add status_to_string on the connection object to prinf full status +// TODO: add status_to_string on the connection object to print full status // convert int status to string - including RGW specific values std::string status_to_string(int s) { @@ -688,7 +688,7 @@ private: // TODO: add exponential backoff for retries conn->next_reconnect = now + reconnect_time; } else { - ldout(cct, 10) << "AMQP run: connection '" << to_string(conn_id) << "' retry successfull" << dendl; + ldout(cct, 10) << "AMQP run: connection '" << to_string(conn_id) << "' retry successful" << dendl; } } INCREMENT_AND_CONTINUE(conn_it); diff --git a/src/rgw/rgw_asio_client.cc b/src/rgw/rgw_asio_client.cc index 51d43140ba56d..145e16139d7e9 100644 --- a/src/rgw/rgw_asio_client.cc +++ b/src/rgw/rgw_asio_client.cc @@ -119,9 +119,9 @@ size_t ClientIO::send_status(int status, const char* status_name) size_t ClientIO::send_100_continue() { - const char HTTTP_100_CONTINUE[] = "HTTP/1.1 100 CONTINUE\r\n\r\n"; - const size_t sent = txbuf.sputn(HTTTP_100_CONTINUE, - sizeof(HTTTP_100_CONTINUE) - 1); + const char HTTP_100_CONTINUE[] = "HTTP/1.1 100 CONTINUE\r\n\r\n"; + const size_t sent = txbuf.sputn(HTTP_100_CONTINUE, + sizeof(HTTP_100_CONTINUE) - 1); flush(); sent100continue = true; return sent; diff --git a/src/rgw/rgw_asio_frontend.cc b/src/rgw/rgw_asio_frontend.cc index e7d39d7cece61..23ccfcafeb086 100644 --- a/src/rgw/rgw_asio_frontend.cc +++ b/src/rgw/rgw_asio_frontend.cc @@ -513,7 +513,7 @@ tcp::endpoint parse_endpoint(boost::asio::string_view input, return endpoint; } if (addr_end + 1 < input.size()) { - // :port must must follow [ipv6] + // :port must follow [ipv6] if (input[addr_end + 1] != ':') { ec = boost::asio::error::invalid_argument; return endpoint; diff --git a/src/rgw/rgw_auth.h b/src/rgw/rgw_auth.h index 82e0d0c9755a4..2668af3a3c1d8 100644 --- a/src/rgw/rgw_auth.h +++ b/src/rgw/rgw_auth.h @@ -220,7 +220,7 @@ public: reason(reason) { } - /* Allow only the reasonable combintations - returning just Completer + /* Allow only the reasonable combinations - returning just Completer * without accompanying IdentityApplier is strictly prohibited! */ explicit AuthResult(IdentityApplier::aplptr_t&& applier) : result_pair(std::move(applier), nullptr) { @@ -236,7 +236,7 @@ public: /* Engine doesn't grant the access but also doesn't reject it. */ DENIED, - /* Engine successfully authenicated requester. */ + /* Engine successfully authenticated requester. */ GRANTED, /* Engine strictly indicates that a request should be rejected @@ -311,7 +311,7 @@ public: /* Abstract class for stacking sub-engines to expose them as a single * Engine. It is responsible for ordering its sub-engines and managing - * fall-backs between them. Derivatee is supposed to encapsulate engine + * fall-backs between them. Derivative is supposed to encapsulate engine * instances and add them using the add_engine() method in the order it * wants to be tried during the call to authenticate(). * diff --git a/src/rgw/rgw_auth_keystone.cc b/src/rgw/rgw_auth_keystone.cc index a1d76c3aaf323..552159823bb4b 100644 --- a/src/rgw/rgw_auth_keystone.cc +++ b/src/rgw/rgw_auth_keystone.cc @@ -662,7 +662,7 @@ rgw::auth::Engine::result_t EC2Engine::authenticate( const string_to_sign_t& string_to_sign, const signature_factory_t& signature_factory, const completer_factory_t& completer_factory, - /* Passthorugh only! */ + /* Passthrough only! */ const req_state* s, optional_yield y) const { diff --git a/src/rgw/rgw_basic_types.h b/src/rgw/rgw_basic_types.h index 4cffb333288ea..a8190aa35adab 100644 --- a/src/rgw/rgw_basic_types.h +++ b/src/rgw/rgw_basic_types.h @@ -66,12 +66,12 @@ struct rgw_zone_id { rgw_zone_id(std::string&& _id) : id(std::move(_id)) {} void encode(ceph::buffer::list& bl) const { - /* backward compatiblity, not using ENCODE_{START,END} macros */ + /* backward compatibility, not using ENCODE_{START,END} macros */ ceph::encode(id, bl); } void decode(ceph::buffer::list::const_iterator& bl) { - /* backward compatiblity, not using DECODE_{START,END} macros */ + /* backward compatibility, not using DECODE_{START,END} macros */ ceph::decode(id, bl); } diff --git a/src/rgw/rgw_common.cc b/src/rgw/rgw_common.cc index 26557afd8d695..6c07e32324358 100644 --- a/src/rgw/rgw_common.cc +++ b/src/rgw/rgw_common.cc @@ -201,10 +201,10 @@ is_err() const // S3 authorization and some other processes depending on the requestURI // The absoluteURI can start with "http://", "https://", "ws://" or "wss://" static string get_abs_path(const string& request_uri) { - const static string ABS_PREFIXS[] = {"http://", "https://", "ws://", "wss://"}; + const static string ABS_PREFIXES[] = {"http://", "https://", "ws://", "wss://"}; bool isAbs = false; for (int i = 0; i < 4; ++i) { - if (boost::algorithm::starts_with(request_uri, ABS_PREFIXS[i])) { + if (boost::algorithm::starts_with(request_uri, ABS_PREFIXES[i])) { isAbs = true; break; } @@ -351,7 +351,7 @@ void set_req_state_err(req_state* s, int err_no, const string& err_msg) /* TODO(rzarzynski): there never ever should be a check like this one. * It's here only for the sake of the patch's backportability. Further * commits will move the logic to a per-RGWHandler replacement of - * the end_header() function. Alternativaly, we might consider making + * the end_header() function. Alternatively, we might consider making * that just for the dump(). Please take a look on @cbodley's comments * in PR #10690 (https://github.com/ceph/ceph/pull/10690). */ s->err.err_code = err_msg; @@ -2456,7 +2456,7 @@ void RGWBucketEnt::dump(Formatter *f) const encode_json("size", size, f); encode_json("size_rounded", size_rounded, f); utime_t ut(creation_time); - encode_json("mtime", ut, f); /* mtime / creation time discrepency needed for backward compatibility */ + encode_json("mtime", ut, f); /* mtime / creation time discrepancy needed for backward compatibility */ encode_json("count", count, f); encode_json("placement_rule", placement_rule.to_str(), f); } diff --git a/src/rgw/rgw_common.h b/src/rgw/rgw_common.h index ee17e2c8d43e8..85d5ab2c155be 100644 --- a/src/rgw/rgw_common.h +++ b/src/rgw/rgw_common.h @@ -1531,14 +1531,14 @@ struct perm_state_base { const RGWBucketInfo& _bucket_info, int _perm_mask, bool _defer_to_bucket_acls, - boost::optional _bucket_acess_conf = boost::none) : + boost::optional _bucket_access_conf = boost::none) : cct(_cct), env(_env), identity(_identity), bucket_info(_bucket_info), perm_mask(_perm_mask), defer_to_bucket_acls(_defer_to_bucket_acls), - bucket_access_conf(_bucket_acess_conf) + bucket_access_conf(_bucket_access_conf) {} virtual ~perm_state_base() {} @@ -1684,7 +1684,7 @@ extern std::string url_decode(const std::string_view& src_str, extern void url_encode(const std::string& src, std::string& dst, bool encode_slash = true); extern std::string url_encode(const std::string& src, bool encode_slash = true); -extern std::string url_remove_prefix(const std::string& url); // Removes hhtp, https and www from url +extern std::string url_remove_prefix(const std::string& url); // Removes http, https and www from url /* destination should be CEPH_CRYPTO_HMACSHA1_DIGESTSIZE bytes long */ extern void calc_hmac_sha1(const char *key, int key_len, const char *msg, int msg_len, char *dest); diff --git a/src/rgw/rgw_crypt.cc b/src/rgw/rgw_crypt.cc index 6bc4bb9c16750..085fda0a44bed 100644 --- a/src/rgw/rgw_crypt.cc +++ b/src/rgw/rgw_crypt.cc @@ -1535,7 +1535,7 @@ int rgw_remove_sse_s3_bucket_key(req_state *s, optional_yield y) * I've left some commented out lines above. They are there for * a reason, which I will explain. The "canonical" json constructed * by the code above as a crypto context must take a json object and -* turn it into a unique determinstic fixed form. For most json +* turn it into a unique deterministic fixed form. For most json * types this is easy. The hardest problem that is handled above is * detailing with unicode strings; they must be turned into * NFC form and sorted in a fixed order. Numbers, however, diff --git a/src/rgw/rgw_env.cc b/src/rgw/rgw_env.cc index d528f0e6d479a..2415a93c34fb2 100644 --- a/src/rgw/rgw_env.cc +++ b/src/rgw/rgw_env.cc @@ -113,7 +113,7 @@ size_t RGWEnv::get_size(const char *name, size_t def_val) const sz = stoull(iter->second); } catch(...){ /* it is very unlikely that we'll ever encounter out_of_range, but let's - return the default eitherway */ + return the default either way */ sz = def_val; } diff --git a/src/rgw/rgw_file.cc b/src/rgw/rgw_file.cc index 6130019df032c..e1217173feda9 100644 --- a/src/rgw/rgw_file.cc +++ b/src/rgw/rgw_file.cc @@ -182,7 +182,7 @@ namespace rgw { LookupFHResult fhr{nullptr, 0}; /* XXX the need for two round-trip operations to identify file or - * directory leaf objects is unecessary--the current proposed + * directory leaf objects is unnecessary--the current proposed * mechanism to avoid this is to store leaf object names with an * object locator w/o trailing slash */ @@ -415,7 +415,7 @@ namespace rgw { * leaf object */ if (! rgw_fh) { - /* XXX for now, peform a hard lookup to deduce the type of + /* XXX for now, perform a hard lookup to deduce the type of * object to be deleted ("foo" vs. "foo/")--also, ensures * atomicity at this endpoint */ struct rgw_file_handle *fh; @@ -1510,7 +1510,7 @@ namespace rgw { if (factory == nullptr) { return false; } - /* make sure the reclaiming object is the same partiton with newobject factory, + /* make sure the reclaiming object is the same partition with newobject factory, * then we can recycle the object, and replace with newobject */ if (!fs->fh_cache.is_same_partition(factory->fhk.fh_hk.object, fh.fh_hk.object)) { return false; diff --git a/src/rgw/rgw_iam_policy.cc b/src/rgw/rgw_iam_policy.cc index eb8662d582174..3a118ed78d243 100644 --- a/src/rgw/rgw_iam_policy.cc +++ b/src/rgw/rgw_iam_policy.cc @@ -541,7 +541,7 @@ bool ParseState::do_string(CephContext* cct, const char* s, size_t l) { auto k = pp->tokens.lookup(s, l); Policy& p = pp->policy; bool is_action = false; - bool is_validaction = false; + bool is_valid_action = false; Statement* t = p.statements.empty() ? nullptr : &(p.statements.back()); // Top level! @@ -579,13 +579,13 @@ bool ParseState::do_string(CephContext* cct, const char* s, size_t l) { (w->id == TokenID::NotAction)) { is_action = true; if (*s == '*') { - is_validaction = true; + is_valid_action = true; (w->id == TokenID::Action ? t->action = allValue : t->notaction = allValue); } else { for (auto& p : actpairs) { if (match_policy({s, l}, p.name, MATCH_POLICY_ACTION)) { - is_validaction = true; + is_valid_action = true; (w->id == TokenID::Action ? t->action[p.bit] = 1 : t->notaction[p.bit] = 1); } if ((t->action & s3AllValue) == s3AllValue) { @@ -687,7 +687,7 @@ bool ParseState::do_string(CephContext* cct, const char* s, size_t l) { pp->s.pop_back(); } - if (is_action && !is_validaction) { + if (is_action && !is_valid_action) { annotate(fmt::format("`{}` is not a valid action.", std::string_view{s, l})); return false; diff --git a/src/rgw/rgw_kafka.cc b/src/rgw/rgw_kafka.cc index 832c4667b94e1..eb6b85071a780 100644 --- a/src/rgw/rgw_kafka.cc +++ b/src/rgw/rgw_kafka.cc @@ -23,7 +23,7 @@ // (2) check performance of emptying queue to local list, and go over the list and publish // (3) use std::shared_mutex (c++17) or equivalent for the connections lock -// cmparisson operator between topic pointer and name +// comparison operator between topic pointer and name bool operator==(const rd_kafka_topic_t* rkt, const std::string& name) { return name == std::string_view(rd_kafka_topic_name(rkt)); } @@ -69,7 +69,7 @@ struct connection_t { CallbackList callbacks; const std::string broker; const bool use_ssl; - const bool verify_ssl; // TODO currently iognored, not supported in librdkafka v0.11.6 + const bool verify_ssl; // TODO currently ignored, not supported in librdkafka v0.11.6 const boost::optional ca_location; const std::string user; const std::string password; @@ -209,7 +209,7 @@ bool new_producer(connection_t* conn) { return false; } - // get list of brokers based on the bootsrap broker + // get list of brokers based on the bootstrap broker if (rd_kafka_conf_set(conn->temp_conf, "bootstrap.servers", conn->broker.c_str(), errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) goto conf_error; if (conn->use_ssl) { @@ -459,7 +459,7 @@ private: auto& conn = conn_it->second; - // Checking the connection idlesness + // Checking the connection idleness if(conn->timestamp.sec() + max_idle_time < ceph_clock_now()) { ldout(conn->cct, 20) << "kafka run: deleting a connection due to idle behaviour: " << ceph_clock_now() << dendl; std::lock_guard lock(connections_lock); @@ -478,7 +478,7 @@ private: // TODO: add error counter for failed retries // TODO: add exponential backoff for retries } else { - ldout(conn->cct, 10) << "Kafka run: connection (" << broker << ") retry successfull" << dendl; + ldout(conn->cct, 10) << "Kafka run: connection (" << broker << ") retry successful" << dendl; } ++conn_it; continue; @@ -570,7 +570,7 @@ public: std::lock_guard lock(connections_lock); const auto it = connections.find(broker); - // note that ssl vs. non-ssl connection to the same host are two separate conenctions + // note that ssl vs. non-ssl connection to the same host are two separate connections if (it != connections.end()) { // connection found - return even if non-ok ldout(cct, 20) << "Kafka connect: connection found" << dendl; diff --git a/src/rgw/rgw_keystone.cc b/src/rgw/rgw_keystone.cc index d0aba2f7832cb..7d5264f980eee 100644 --- a/src/rgw/rgw_keystone.cc +++ b/src/rgw/rgw_keystone.cc @@ -142,7 +142,7 @@ int Service::get_admin_token(const DoutPrefixProvider *dpp, optional_yield y, std::string& token) { - /* Let's check whether someone uses the deprecated "admin token" feauture + /* Let's check whether someone uses the deprecated "admin token" feature * based on a shared secret from keystone.conf file. */ const auto& admin_token = config.get_admin_token(); if (! admin_token.empty()) { @@ -345,7 +345,7 @@ int TokenEnvelope::parse(const DoutPrefixProvider *dpp, * speaks in v2 disregarding the promise to go with v3. */ decode_v3(*token_iter); - /* Identity v3 conveys the token inforamtion not as a part of JSON but + /* Identity v3 conveys the token information not as a part of JSON but * in the X-Subject-Token HTTP header we're getting from caller. */ token.id = token_str; } else { @@ -354,7 +354,7 @@ int TokenEnvelope::parse(const DoutPrefixProvider *dpp, } else if (version == rgw::keystone::ApiVersion::VER_3) { if (! token_iter.end()) { decode_v3(*token_iter); - /* v3 suceeded. We have to fill token.id from external input as it + /* v3 succeeded. We have to fill token.id from external input as it * isn't a part of the JSON response anymore. It has been moved * to X-Subject-Token HTTP header instead. */ token.id = token_str; diff --git a/src/rgw/rgw_kmip_client_impl.cc b/src/rgw/rgw_kmip_client_impl.cc index 7f0160748a903..e25ce95a82813 100644 --- a/src/rgw/rgw_kmip_client_impl.cc +++ b/src/rgw/rgw_kmip_client_impl.cc @@ -650,7 +650,7 @@ RGWKmipHandles::do_one_entry(RGWKMIPTransceiver &element) KeyBlock *kp = static_cast(pld->object)->key_block; ByteString *bp; if (kp->key_format_type != KMIP_KEYFORMAT_RAW) { - lderr(cct) << "get: expected raw key fromat got " << kp->key_format_type << dendl; + lderr(cct) << "get: expected raw key format got " << kp->key_format_type << dendl; element.ret = -EINVAL; goto Done; } diff --git a/src/rgw/rgw_lc.cc b/src/rgw/rgw_lc.cc index 4ae3a950ade63..79b81270457d4 100644 --- a/src/rgw/rgw_lc.cc +++ b/src/rgw/rgw_lc.cc @@ -2146,7 +2146,7 @@ int RGWLC::process(int index, int max_lock_secs, LCWorker* worker, SimpleBackoff shard_lock(5 /* max retries */, 50ms); if (! shard_lock.wait_backoff(lock_lambda)) { - ldpp_dout(this, 0) << "RGWLC::process(): failed to aquire lock on " + ldpp_dout(this, 0) << "RGWLC::process(): failed to acquire lock on " << lc_shard << " after " << shard_lock.get_retries() << dendl; return 0; @@ -2314,7 +2314,7 @@ int RGWLC::process(int index, int max_lock_secs, LCWorker* worker, /* postamble */ //bucket_lc_post(index, max_lock_secs, entry, ret, worker); if (! shard_lock.wait_backoff(lock_lambda)) { - ldpp_dout(this, 0) << "RGWLC::process(): failed to aquire lock on " + ldpp_dout(this, 0) << "RGWLC::process(): failed to acquire lock on " << lc_shard << " after " << shard_lock.get_retries() << dendl; return 0; diff --git a/src/rgw/rgw_lib.cc b/src/rgw/rgw_lib.cc index f449cce21c026..5b0a3c728490b 100644 --- a/src/rgw/rgw_lib.cc +++ b/src/rgw/rgw_lib.cc @@ -114,7 +114,7 @@ namespace rgw { void RGWLibProcess::handle_request(const DoutPrefixProvider *dpp, RGWRequest* r) { /* - * invariant: valid requests are derived from RGWLibRequst + * invariant: valid requests are derived from RGWLibRequest */ RGWLibRequest* req = static_cast(r); diff --git a/src/rgw/rgw_lib.h b/src/rgw/rgw_lib.h index 1ad54b49b4857..30234eebcdd33 100644 --- a/src/rgw/rgw_lib.h +++ b/src/rgw/rgw_lib.h @@ -125,7 +125,7 @@ namespace rgw { public: CephContext* cct; - /* unambiguiously return req_state */ + /* unambiguously return req_state */ inline req_state* get_state() { return this->RGWRequest::s; } RGWLibRequest(CephContext* _cct, std::unique_ptr _user) diff --git a/src/rgw/rgw_lua_background.h b/src/rgw/rgw_lua_background.h index e2f290213b5cf..7b8d12599f4e8 100644 --- a/src/rgw/rgw_lua_background.h +++ b/src/rgw/rgw_lua_background.h @@ -180,5 +180,5 @@ public: void resume(rgw::sal::Driver* _driver) override; }; -} //namepsace rgw::lua +} //namespace rgw::lua diff --git a/src/rgw/rgw_lua_utils.cc b/src/rgw/rgw_lua_utils.cc index 4118bed42e087..35f9529746139 100644 --- a/src/rgw/rgw_lua_utils.cc +++ b/src/rgw/rgw_lua_utils.cc @@ -9,7 +9,7 @@ namespace rgw::lua { -// TODO - add the folowing generic functions +// TODO - add the following generic functions // lua_push(lua_State* L, const std::string& str) // template lua_push(lua_State* L, const std::optional& val) // lua_push(lua_State* L, const ceph::real_time& tp) diff --git a/src/rgw/rgw_lua_utils.h b/src/rgw/rgw_lua_utils.h index 0e8dfb2bf6ba9..79ebb09fac07a 100644 --- a/src/rgw/rgw_lua_utils.h +++ b/src/rgw/rgw_lua_utils.h @@ -222,7 +222,7 @@ void create_metatable(lua_State* L, const std::string_view parent_name, const st // following struct may be used as a base class for other MetaTable classes // note, however, this is not mandatory to use it as a base struct EmptyMetaTable { - // by default everythinmg is "readonly" + // by default everything is "readonly" // to change, overload this function in the derived static int NewIndexClosure(lua_State* L) { return luaL_error(L, "trying to write to readonly field"); diff --git a/src/rgw/rgw_op.cc b/src/rgw/rgw_op.cc index 6a7a7313e1171..7363db728a496 100644 --- a/src/rgw/rgw_op.cc +++ b/src/rgw/rgw_op.cc @@ -3321,7 +3321,7 @@ static int filter_out_quota_info(std::map& add_attrs, } } - /* Swift requries checking on raw usage instead of the 4 KiB rounded one. */ + /* Swift requires checking on raw usage instead of the 4 KiB rounded one. */ quota.check_on_raw = true; quota.enabled = quota.max_size > 0 || quota.max_objects > 0; @@ -7601,7 +7601,7 @@ RGWBulkUploadOp::parse_path(const std::string_view& path) const size_t start_pos = path.find_first_not_of('/'); if (std::string_view::npos != start_pos) { - /* Seperator is the first slash after the leading ones. */ + /* Separator is the first slash after the leading ones. */ const size_t sep_pos = path.substr(start_pos).find('/'); if (std::string_view::npos != sep_pos) { @@ -8074,10 +8074,10 @@ void RGWBulkUploadOp::execute(optional_yield y) RGWBulkUploadOp::AlignedStreamGetter::~AlignedStreamGetter() { - const size_t aligned_legnth = length + (-length % alignment); + const size_t aligned_length = length + (-length % alignment); ceph::bufferlist junk; - DecoratedStreamGetter::get_exactly(aligned_legnth - position, junk); + DecoratedStreamGetter::get_exactly(aligned_length - position, junk); } ssize_t RGWBulkUploadOp::AlignedStreamGetter::get_at_most(const size_t want, diff --git a/src/rgw/rgw_op.h b/src/rgw/rgw_op.h index dc5bc8be55ecd..be7232ae392da 100644 --- a/src/rgw/rgw_op.h +++ b/src/rgw/rgw_op.h @@ -2198,7 +2198,7 @@ inline int rgw_get_request_metadata(const DoutPrefixProvider *dpp, return -ENAMETOOLONG; } - /* Similar remarks apply to the check for value size. We're veryfing + /* Similar remarks apply to the check for value size. We're verifying * it early at the RGW's side as it's being claimed in /info. */ const auto max_attr_size = cct->_conf->rgw_max_attr_size; if (max_attr_size && xattr.length() > max_attr_size) { diff --git a/src/rgw/rgw_perf_counters.cc b/src/rgw/rgw_perf_counters.cc index 8cbda840c5ad8..125bace144506 100644 --- a/src/rgw/rgw_perf_counters.cc +++ b/src/rgw/rgw_perf_counters.cc @@ -56,7 +56,7 @@ void add_rgw_frontend_counters(PerfCountersBuilder *pcb) { pcb->add_u64(l_rgw_pubsub_push_pending, "pubsub_push_pending", "Pubsub events pending reply from endpoint"); pcb->add_u64_counter(l_rgw_pubsub_missing_conf, "pubsub_missing_conf", "Pubsub events could not be handled because of missing configuration"); - pcb->add_u64_counter(l_rgw_lua_script_ok, "lua_script_ok", "Successfull executions of Lua scripts"); + pcb->add_u64_counter(l_rgw_lua_script_ok, "lua_script_ok", "Successful executions of Lua scripts"); pcb->add_u64_counter(l_rgw_lua_script_fail, "lua_script_fail", "Failed executions of Lua scripts"); pcb->add_u64(l_rgw_lua_current_vms, "lua_current_vms", "Number of Lua VMs currently being executed"); } diff --git a/src/rgw/rgw_period_history.cc b/src/rgw/rgw_period_history.cc index 40a0daf3b4465..66ad7151d0a3b 100644 --- a/src/rgw/rgw_period_history.cc +++ b/src/rgw/rgw_period_history.cc @@ -106,7 +106,7 @@ class RGWPeriodHistory::Impl final { /// and return an iterator to the merged history Set::iterator merge(Set::iterator dst, Set::iterator src); - /// construct a Cursor object using Cursor's private constuctor + /// construct a Cursor object using Cursor's private constructor Cursor make_cursor(Set::const_iterator history, epoch_t epoch); CephContext *const cct; diff --git a/src/rgw/rgw_pubsub.cc b/src/rgw/rgw_pubsub.cc index 14d85abe3b62d..7031c2363f0b8 100644 --- a/src/rgw/rgw_pubsub.cc +++ b/src/rgw/rgw_pubsub.cc @@ -675,7 +675,7 @@ int RGWPubSub::Bucket::remove_notifications(const DoutPrefixProvider *dpp, optio return ret ; } - // remove all auto-genrated topics + // remove all auto-generated topics for (const auto& topic : bucket_topics.topics) { const auto& topic_name = topic.first; ret = ps.remove_topic(dpp, topic_name, y); diff --git a/src/rgw/rgw_realm_reloader.cc b/src/rgw/rgw_realm_reloader.cc index 745dac7fefe47..b3a511becb685 100644 --- a/src/rgw/rgw_realm_reloader.cc +++ b/src/rgw/rgw_realm_reloader.cc @@ -22,7 +22,7 @@ #define dout_prefix (*_dout << "rgw realm reloader: ") -// safe callbacks from SafeTimer are unneccessary. reload() can take a long +// safe callbacks from SafeTimer are unnecessary. reload() can take a long // time, so we don't want to hold the mutex and block handle_notify() for the // duration static constexpr bool USE_SAFE_TIMER_CALLBACKS = false; diff --git a/src/rgw/rgw_rest.cc b/src/rgw/rgw_rest.cc index 47272515d3488..6b827f983fb62 100644 --- a/src/rgw/rgw_rest.cc +++ b/src/rgw/rgw_rest.cc @@ -222,13 +222,13 @@ void rgw_rest_init(CephContext *cct, const rgw::sal::ZoneGroup& zone_group) hostnames_set.erase(""); // filter out empty hostnames ldout(cct, 20) << "RGW hostnames: " << hostnames_set << dendl; /* TODO: We should have a sanity check that no hostname matches the end of - * any other hostname, otherwise we will get ambigious results from + * any other hostname, otherwise we will get ambiguous results from * rgw_find_host_in_domains. * Eg: * Hostnames: [A, B.A] * Inputs: [Z.A, X.B.A] * Z.A clearly splits to subdomain=Z, domain=Z - * X.B.A ambigously splits to both {X, B.A} and {X.B, A} + * X.B.A ambiguously splits to both {X, B.A} and {X.B, A} */ zone_group.get_s3website_hostnames(names); @@ -646,7 +646,7 @@ static void build_redirect_url(req_state *s, const string& redirect_base, string dest_uri = redirect_base; /* - * reqest_uri is always start with slash, so we need to remove + * request_uri is always start with slash, so we need to remove * the unnecessary slash at the end of dest_uri. */ if (dest_uri[dest_uri.size() - 1] == '/') { @@ -766,11 +766,11 @@ int dump_body(req_state* const s, const char* const buf, const size_t len) { - bool healthchk = false; + bool healthcheck = false; // we dont want to limit health checks if(s->op_type == RGW_OP_GET_HEALTH_CHECK) - healthchk = true; - if(len > 0 && !healthchk) { + healthcheck = true; + if(len > 0 && !healthcheck) { const char *method = s->info.method; s->ratelimit_data->decrease_bytes(method, s->ratelimit_user_name, len, &s->user_ratelimit); if(!rgw::sal::Bucket::empty(s->bucket.get())) @@ -803,11 +803,11 @@ int recv_body(req_state* const s, } catch (rgw::io::Exception& e) { return -e.code().value(); } - bool healthchk = false; + bool healthcheck = false; // we dont want to limit health checks if(s->op_type == RGW_OP_GET_HEALTH_CHECK) - healthchk = true; - if(len > 0 && !healthchk) { + healthcheck = true; + if(len > 0 && !healthcheck) { const char *method = s->info.method; s->ratelimit_data->decrease_bytes(method, s->ratelimit_user_name, len, &s->user_ratelimit); if(!rgw::sal::Bucket::empty(s->bucket.get())) diff --git a/src/rgw/rgw_rest_pubsub.cc b/src/rgw/rgw_rest_pubsub.cc index 04f21d9b87d90..843b7eb7d0848 100644 --- a/src/rgw/rgw_rest_pubsub.cc +++ b/src/rgw/rgw_rest_pubsub.cc @@ -1035,7 +1035,7 @@ void RGWPSCreateNotifOp::execute(optional_yield y) { // create unique topic name. this has 2 reasons: // (1) topics cannot be shared between different S3 notifications because they hold the filter information - // (2) make topic clneaup easier, when notification is removed + // (2) make topic cleanup easier, when notification is removed const auto unique_topic_name = topic_to_unique(topic_name, notif_name); // generate the internal topic. destination is stored here for the "push-only" case // when no subscription exists diff --git a/src/rgw/rgw_rest_ratelimit.cc b/src/rgw/rgw_rest_ratelimit.cc index 3de8ca2618dc8..128e44d9a5ae0 100644 --- a/src/rgw/rgw_rest_ratelimit.cc +++ b/src/rgw/rgw_rest_ratelimit.cc @@ -221,7 +221,7 @@ void RGWOp_Ratelimit_Set::execute(optional_yield y) sval = s->info.args.get("global", &exists); if (exists) { if (!boost::iequals(sval,"true") && !boost::iequals(sval,"false")) { - ldpp_dout(this, 20) << "global is not equal to true or faslse" << dendl; + ldpp_dout(this, 20) << "global is not equal to true or false" << dendl; op_ret = -EINVAL; return; } diff --git a/src/rgw/rgw_rest_s3.cc b/src/rgw/rgw_rest_s3.cc index 5425406b1eb95..f12d5e733007a 100644 --- a/src/rgw/rgw_rest_s3.cc +++ b/src/rgw/rgw_rest_s3.cc @@ -215,7 +215,7 @@ ldpp_dout(s, 20) << "get_encryption_defaults: found kms_attr " << kms_attr << " } kms_attr_seen = true; } else if (!rest_only && kms_master_key_id != "") { -ldpp_dout(s, 20) << "get_encryption_defaults: no kms_attr, but kms_master_key_id = " << kms_master_key_id << ", settig kms_attr_seen" << dendl; +ldpp_dout(s, 20) << "get_encryption_defaults: no kms_attr, but kms_master_key_id = " << kms_master_key_id << ", setting kms_attr_seen" << dendl; kms_attr_seen = true; rgw_set_amz_meta_header(s->info.crypt_attribute_map, kms_attr, kms_master_key_id, OVERWRITE); } @@ -469,7 +469,7 @@ int RGWGetObj_ObjStore_S3::send_response_data(bufferlist& bl, off_t bl_ofs, if (! op_ret) { if (! lo_etag.empty()) { - /* Handle etag of Swift API's large objects (DLO/SLO). It's entirerly + /* Handle etag of Swift API's large objects (DLO/SLO). It's entirely * legit to perform GET on them through S3 API. In such situation, * a client should receive the composited content with corresponding * etag value. */ @@ -3303,7 +3303,7 @@ void RGWPostObj_ObjStore_S3::send_response() * What we really would like is to quaily the bucket name, so * that the client could simply copy it and paste into next request. * Unfortunately, in S3 we cannot know if the client will decide - * to come through DNS, with "bucket.tenant" sytanx, or through + * to come through DNS, with "bucket.tenant" syntax, or through * URL with "tenant\bucket" syntax. Therefore, we provide the * tenant separately. */ diff --git a/src/rgw/rgw_rest_sts.cc b/src/rgw/rgw_rest_sts.cc index c5bd8b1b7077e..d94181f4e6342 100644 --- a/src/rgw/rgw_rest_sts.cc +++ b/src/rgw/rgw_rest_sts.cc @@ -577,7 +577,7 @@ int RGWSTSGetSessionToken::verify_permission(optional_yield y) s, rgw::ARN(partition, service, "", s->user->get_tenant(), ""), rgw::IAM::stsGetSessionToken)) { - ldpp_dout(this, 0) << "User does not have permssion to perform GetSessionToken" << dendl; + ldpp_dout(this, 0) << "User does not have permission to perform GetSessionToken" << dendl; return -EACCES; } diff --git a/src/rgw/rgw_rest_swift.cc b/src/rgw/rgw_rest_swift.cc index 5f0b1f7722263..25ba61841613f 100644 --- a/src/rgw/rgw_rest_swift.cc +++ b/src/rgw/rgw_rest_swift.cc @@ -1620,7 +1620,7 @@ int RGWBulkDelete_ObjStore_SWIFT::get_data( const size_t start_pos = path_str.find_first_not_of('/'); if (string::npos != start_pos) { - /* Seperator is the first slash after the leading ones. */ + /* Separator is the first slash after the leading ones. */ const size_t sep_pos = path_str.find('/', start_pos); if (string::npos != sep_pos) { diff --git a/src/rgw/rgw_s3select.cc b/src/rgw/rgw_s3select.cc index c7eaa69842f3d..cfcba2a63e100 100644 --- a/src/rgw/rgw_s3select.cc +++ b/src/rgw/rgw_s3select.cc @@ -370,7 +370,7 @@ int RGWSelectObj_ObjStore_S3::run_s3select_on_csv(const char* query, const char* uint32_t length_before_processing, length_post_processing; csv_object::csv_defintions csv; const char* s3select_syntax_error = "s3select-Syntax-Error"; - const char* s3select_resource_id = "resourcse-id"; + const char* s3select_resource_id = "resource-id"; const char* s3select_processTime_error = "s3select-ProcessingTime-Error"; s3select_syntax.parse_query(query); diff --git a/src/rgw/rgw_s3select_private.h b/src/rgw/rgw_s3select_private.h index fa595b0da599f..f4fd59a29ac8e 100644 --- a/src/rgw/rgw_s3select_private.h +++ b/src/rgw/rgw_s3select_private.h @@ -201,7 +201,7 @@ private: #ifdef _ARROW_EXIST s3selectEngine::rgw_s3select_api m_rgw_api; #endif - //a request for range may statisfy by several calls to send_response_date; + //a request for range may satisfy by several calls to send_response_date; size_t m_request_range; std::string requested_buffer; std::string range_req_str; diff --git a/src/rgw/rgw_sal.h b/src/rgw/rgw_sal.h index f0314517bebda..cc45d5f3a2ceb 100644 --- a/src/rgw/rgw_sal.h +++ b/src/rgw/rgw_sal.h @@ -699,7 +699,7 @@ class Bucket { virtual const rgw_user& get_owner() const = 0; /** Check in the backing store if this bucket is empty */ virtual int check_empty(const DoutPrefixProvider* dpp, optional_yield y) = 0; - /** Chec k if the given size fits within the quota */ + /** Check if the given size fits within the quota */ virtual int check_quota(const DoutPrefixProvider *dpp, RGWQuota& quota, uint64_t obj_size, optional_yield y, bool check_size_only = false) = 0; /** Set the attributes in attrs, leaving any other existing attrs set, and * write them to the backing store; a merge operation */ diff --git a/src/rgw/rgw_sal_dbstore.cc b/src/rgw/rgw_sal_dbstore.cc index 6c8778b872383..1a28a7724f4c4 100644 --- a/src/rgw/rgw_sal_dbstore.cc +++ b/src/rgw/rgw_sal_dbstore.cc @@ -310,7 +310,7 @@ namespace rgw::sal { int DBBucket::remove_objs_from_index(const DoutPrefixProvider *dpp, std::list& objs_to_unlink) { - /* XXX: CHECK: Unlike RadosStore, there is no seperate bucket index table. + /* XXX: CHECK: Unlike RadosStore, there is no separate bucket index table. * Delete all the object in the list from the object table of this * bucket */ @@ -1206,7 +1206,7 @@ namespace rgw::sal { return 0; /* nothing more to write */ } - /* flush watever tail data is present */ + /* flush whatever tail data is present */ int ret = parent_op.write_data(dpp, tail_part_data, tail_part_offset); if (ret < 0) { return ret; @@ -1360,7 +1360,7 @@ namespace rgw::sal { return 0; /* nothing more to write */ } - /* flush watever tail data is present */ + /* flush whatever tail data is present */ int ret = parent_op.write_data(dpp, tail_part_data, tail_part_offset); if (ret < 0) { return ret; diff --git a/src/rgw/rgw_sts.cc b/src/rgw/rgw_sts.cc index 2b31d5c5a2972..557bcf24f2b31 100644 --- a/src/rgw/rgw_sts.cc +++ b/src/rgw/rgw_sts.cc @@ -72,7 +72,7 @@ int Credentials::generateCredentials(const DoutPrefixProvider *dpp, //Session Token - Encrypt using AES auto* cryptohandler = cct->get_crypto_handler(CEPH_CRYPTO_AES); if (! cryptohandler) { - ldpp_dout(dpp, 0) << "ERROR: No AES cryto handler found !" << dendl; + ldpp_dout(dpp, 0) << "ERROR: No AES crypto handler found !" << dendl; return -EINVAL; } string secret_s = cct->_conf->rgw_sts_key; diff --git a/src/rgw/rgw_swift_auth.cc b/src/rgw/rgw_swift_auth.cc index b58f0b45d8e7b..765721df190d3 100644 --- a/src/rgw/rgw_swift_auth.cc +++ b/src/rgw/rgw_swift_auth.cc @@ -88,7 +88,7 @@ void TempURLEngine::get_owner_info(const DoutPrefixProvider* dpp, const req_stat /* TempURL case is completely different than the Keystone auth - you may * get account name only through extraction from URL. In turn, knowledge - * about account is neccessary to obtain its bucket tenant. Without that, + * about account is necessary to obtain its bucket tenant. Without that, * the access would be limited to accounts with empty tenant. */ string bucket_tenant; if (!s->account_name.empty()) { diff --git a/src/rgw/rgw_sync_policy.h b/src/rgw/rgw_sync_policy.h index 98d0a4798a5d9..ec9d1f2c62341 100644 --- a/src/rgw/rgw_sync_policy.h +++ b/src/rgw/rgw_sync_policy.h @@ -591,7 +591,7 @@ WRITE_CLASS_ENCODER(rgw_sync_data_flow_group) struct rgw_sync_policy_group { std::string id; - rgw_sync_data_flow_group data_flow; /* override data flow, howver, will not be able to + rgw_sync_data_flow_group data_flow; /* override data flow, however, will not be able to add new flows that don't exist at higher level */ std::vector pipes; /* if not defined then applies to all buckets (DR sync) */ diff --git a/src/rgw/rgw_zone_types.h b/src/rgw/rgw_zone_types.h index c8711a96d6d54..13fce000c4124 100644 --- a/src/rgw/rgw_zone_types.h +++ b/src/rgw/rgw_zone_types.h @@ -332,7 +332,7 @@ struct RGWZone { */ uint32_t bucket_index_max_shards; - // pre-shard buckets on creation to enable some write-parallism by default, + // pre-shard buckets on creation to enable some write-parallelism by default, // delay the need to reshard as the bucket grows, and (in multisite) get some // bucket index sharding where dynamic resharding is not supported static constexpr uint32_t default_bucket_index_max_shards = 11; diff --git a/src/rgw/services/svc_rados.cc b/src/rgw/services/svc_rados.cc index 99f400f42b021..d682b87eb2c27 100644 --- a/src/rgw/services/svc_rados.cc +++ b/src/rgw/services/svc_rados.cc @@ -424,7 +424,7 @@ bool RGWSI_RADOS::check_secure_mon_conn(const DoutPrefixProvider *dpp) const std::vector modes; reg.get_supported_methods(CEPH_ENTITY_TYPE_MON, &methods, &modes); - ldpp_dout(dpp, 20) << __func__ << "(): auth registy supported: methods=" << methods << " modes=" << modes << dendl; + ldpp_dout(dpp, 20) << __func__ << "(): auth registry supported: methods=" << methods << " modes=" << modes << dendl; for (auto method : methods) { if (!reg.is_secure_method(method)) { diff --git a/src/test/cls_2pc_queue/test_cls_2pc_queue.cc b/src/test/cls_2pc_queue/test_cls_2pc_queue.cc index 8f392721f5b74..8384979249093 100644 --- a/src/test/cls_2pc_queue/test_cls_2pc_queue.cc +++ b/src/test/cls_2pc_queue/test_cls_2pc_queue.cc @@ -409,7 +409,7 @@ TEST_F(TestCls2PCQueue, CommitError) } cls_2pc_reservations reservations; ASSERT_EQ(0, cls_2pc_queue_list_reservations(ioctx, queue_name, reservations)); - // 2 reservations were not comitted + // 2 reservations were not committed ASSERT_EQ(reservations.size(), 2); } @@ -675,7 +675,7 @@ TEST_F(TestCls2PCQueue, ManualCleanup) cls_2pc_queue_init(op, queue_name, max_size); ASSERT_EQ(0, ioctx.operate(queue_name, &op)); - // anything older than 100ms is cosidered stale + // anything older than 100ms is considered stale ceph::coarse_real_time stale_time = ceph::coarse_real_clock::now() + std::chrono::milliseconds(100); std::vector reservers(max_workers); @@ -749,7 +749,7 @@ TEST_F(TestCls2PCQueue, Cleanup) cls_2pc_queue_init(op, queue_name, max_size); ASSERT_EQ(0, ioctx.operate(queue_name, &op)); - // anything older than 100ms is cosidered stale + // anything older than 100ms is considered stale ceph::coarse_real_time stale_time = ceph::coarse_real_clock::now() + std::chrono::milliseconds(100); std::vector reservers(max_workers); @@ -989,7 +989,7 @@ TEST_F(TestCls2PCQueue, MultiProducerConsumer) std::for_each(producers.begin(), producers.end(), [](auto& p) { p.join(); }); std::for_each(consumers.begin(), consumers.end(), [](auto& c) { c.join(); }); if (!retry_happened) { - std::cerr << "Queue was never full - all reservations were sucessfull." << + std::cerr << "Queue was never full - all reservations were successful." << "Please decrease the amount of consumer threads" << std::endl; } // make sure that queue is empty and no reservations remain diff --git a/src/test/cls_queue/test_cls_queue.cc b/src/test/cls_queue/test_cls_queue.cc index a1f68638c1f41..8b2f9dc1041f2 100644 --- a/src/test/cls_queue/test_cls_queue.cc +++ b/src/test/cls_queue/test_cls_queue.cc @@ -85,7 +85,7 @@ TEST_F(TestClsQueue, Enqueue) ASSERT_EQ(0, ioctx.operate(queue_name, &op)); // test multiple enqueues - // 10 iterations, 100 elelemts each + // 10 iterations, 100 elements each // expect 0 (OK) test_enqueue(queue_name, 10, 100, 0); } @@ -99,10 +99,10 @@ TEST_F(TestClsQueue, QueueFull) cls_queue_init(op, queue_name, queue_size); ASSERT_EQ(0, ioctx.operate(queue_name, &op)); - // 8 iterations, 5 elelemts each + // 8 iterations, 5 elements each // expect 0 (OK) test_enqueue(queue_name, 8, 5, 0); - // 2 iterations, 5 elelemts each + // 2 iterations, 5 elements each // expect -28 (Q FULL) test_enqueue(queue_name, 2, 5, -28); } diff --git a/src/test/cls_rgw/test_cls_rgw.cc b/src/test/cls_rgw/test_cls_rgw.cc index bf60dfdd04248..a2f2fa66a7672 100644 --- a/src/test/cls_rgw/test_cls_rgw.cc +++ b/src/test/cls_rgw/test_cls_rgw.cc @@ -456,7 +456,7 @@ TEST_F(cls_rgw, index_list) { static_cast(0xCF), static_cast(0x8F) }, /* treble byte utf8 character */ { static_cast(0xDF), static_cast(0x8F), static_cast(0x8F) }, - /* quadruble byte utf8 character */ + /* quadruple byte utf8 character */ { static_cast(0xF7), static_cast(0x8F), static_cast(0x8F), static_cast(0x8F) }, }; @@ -738,7 +738,7 @@ TEST_F(cls_rgw, bi_list) "bi list test with filters should return correct truncation indicator"; } - // test whether combined segment count is correcgt + // test whether combined segment count is correct is_truncated = false; entries.clear(); marker.clear(); diff --git a/src/test/cls_rgw/test_cls_rgw_stats.cc b/src/test/cls_rgw/test_cls_rgw_stats.cc index 004ccc6d1ce07..80fa88fabf531 100644 --- a/src/test/cls_rgw/test_cls_rgw_stats.cc +++ b/src/test/cls_rgw/test_cls_rgw_stats.cc @@ -259,7 +259,7 @@ object_map::iterator simulator::find_or_create(const cls_rgw_obj_key& key) int simulator::try_start(const cls_rgw_obj_key& key, const std::string& tag) { - // choose randomly betwen create and delete + // choose randomly between create and delete const auto type = static_cast( ceph::util::generate_random_number(CLS_RGW_OP_ADD, CLS_RGW_OP_DEL)); diff --git a/src/test/rgw/CMakeLists.txt b/src/test/rgw/CMakeLists.txt index d6bb3db1c6903..dec7ea1c149c4 100644 --- a/src/test/rgw/CMakeLists.txt +++ b/src/test/rgw/CMakeLists.txt @@ -68,24 +68,24 @@ add_executable(unittest_rgw_bucket_sync_cache test_rgw_bucket_sync_cache.cc) add_ceph_unittest(unittest_rgw_bucket_sync_cache) target_link_libraries(unittest_rgw_bucket_sync_cache ${rgw_libs}) -#unitttest_rgw_period_history +#unittest_rgw_period_history add_executable(unittest_rgw_period_history test_rgw_period_history.cc) add_ceph_unittest(unittest_rgw_period_history) target_link_libraries(unittest_rgw_period_history ${rgw_libs}) -# unitttest_rgw_compression +# unittest_rgw_compression add_executable(unittest_rgw_compression test_rgw_compression.cc $) add_ceph_unittest(unittest_rgw_compression) target_link_libraries(unittest_rgw_compression ${rgw_libs}) -# unitttest_http_manager +# unittest_http_manager add_executable(unittest_http_manager test_http_manager.cc) add_ceph_unittest(unittest_http_manager) target_link_libraries(unittest_http_manager ${rgw_libs}) -# unitttest_rgw_reshard_wait +# unittest_rgw_reshard_wait add_executable(unittest_rgw_reshard_wait test_rgw_reshard_wait.cc) add_ceph_unittest(unittest_rgw_reshard_wait) target_link_libraries(unittest_rgw_reshard_wait ${rgw_libs}) @@ -213,7 +213,7 @@ add_ceph_unittest(unittest_rgw_string) target_include_directories(unittest_rgw_string SYSTEM PRIVATE "${CMAKE_SOURCE_DIR}/src/rgw") -# unitttest_rgw_dmclock_queue +# unittest_rgw_dmclock_queue add_executable(unittest_rgw_dmclock_scheduler test_rgw_dmclock_scheduler.cc $) add_ceph_unittest(unittest_rgw_dmclock_scheduler) target_include_directories(unittest_rgw_dmclock_scheduler diff --git a/src/test/rgw/bucket_notification/api.py b/src/test/rgw/bucket_notification/api.py index 30fce41a1bb01..0b53d32c7365c 100644 --- a/src/test/rgw/bucket_notification/api.py +++ b/src/test/rgw/bucket_notification/api.py @@ -26,7 +26,7 @@ def put_object_tagging(conn, bucket_name, key, tags): def make_request(conn, method, resource, parameters=None, sign_parameters=False, extra_parameters=None): """generic request sending to pubsub radogw - should cover: topics, notificatios and subscriptions + should cover: topics, notifications and subscriptions """ url_params = '' if parameters is not None: diff --git a/src/test/rgw/bucket_notification/test_bn.py b/src/test/rgw/bucket_notification/test_bn.py index 8860df1555ef9..075d47466a236 100644 --- a/src/test/rgw/bucket_notification/test_bn.py +++ b/src/test/rgw/bucket_notification/test_bn.py @@ -277,7 +277,7 @@ def stop_amqp_receiver(receiver, task): receiver.channel.stop_consuming() log.info('stopping AMQP receiver') except Exception as error: - log.info('failed to gracefuly stop AMQP receiver: %s', str(error)) + log.info('failed to gracefully stop AMQP receiver: %s', str(error)) task.join(5) @@ -487,7 +487,7 @@ def stop_kafka_receiver(receiver, task): receiver.consumer.unsubscribe() receiver.consumer.close() except Exception as error: - log.info('failed to gracefuly stop Kafka receiver: %s', str(error)) + log.info('failed to gracefully stop Kafka receiver: %s', str(error)) def get_ip(): diff --git a/src/test/rgw/rgw_multi/tests_az.py b/src/test/rgw/rgw_multi/tests_az.py index 13ec832a2041c..7ad708350dc51 100644 --- a/src/test/rgw/rgw_multi/tests_az.py +++ b/src/test/rgw/rgw_multi/tests_az.py @@ -192,7 +192,7 @@ def test_az_create_empty_bucket(): def test_az_check_empty_bucket_versioning(): - """ test bucket vesioning with empty bucket """ + """ test bucket versioning with empty bucket """ zones, az_zones = init_env() bucket_name = gen_bucket_name() # create bucket on the non archive zone diff --git a/src/test/rgw/test-ceph-diff-sorted.sh b/src/test/rgw/test-ceph-diff-sorted.sh index dddf4ae1b18f2..c164e5547f4b4 100755 --- a/src/test/rgw/test-ceph-diff-sorted.sh +++ b/src/test/rgw/test-ceph-diff-sorted.sh @@ -37,7 +37,7 @@ cuttlefish fox llama octopus -penguine +penguin seal squid whale diff --git a/src/test/rgw/test_multi.md b/src/test/rgw/test_multi.md index f2c1285300ec1..46bf8e0bfd7c9 100644 --- a/src/test/rgw/test_multi.md +++ b/src/test/rgw/test_multi.md @@ -14,7 +14,7 @@ Since we use the same entry point file for all tests, running specific tests is ``` $ nosetests test_multi.py: ``` -To run miltiple tests based on wildcard string, use the following format: +To run multiple tests based on wildcard string, use the following format: ``` $ nosetests test_multi.py -m "" ``` diff --git a/src/test/rgw/test_rgw_amqp.cc b/src/test/rgw/test_rgw_amqp.cc index c2297dc2235ef..a6c76dd2a2cd5 100644 --- a/src/test/rgw/test_rgw_amqp.cc +++ b/src/test/rgw/test_rgw_amqp.cc @@ -46,7 +46,7 @@ protected: amqp::shutdown(); } - // wait for at least one new (since last drain) message to be dequeueud + // wait for at least one new (since last drain) message to be dequeued // and then wait for all pending answers to be received void wait_until_drained() { while (amqp::get_dequeued() == current_dequeued) { diff --git a/src/test/rgw/test_rgw_kms.cc b/src/test/rgw/test_rgw_kms.cc index aaa4e4174f785..9fc6145d2fe77 100644 --- a/src/test/rgw/test_rgw_kms.cc +++ b/src/test/rgw/test_rgw_kms.cc @@ -242,7 +242,7 @@ TEST_F(TestSSEKMS, concat_url) // Each test has 3 strings: // * the base URL // * the path we want to concatenate - // * the exepected final URL + // * the expected final URL std::string tests[9][3] ={ {"", "", ""}, {"", "bar", "/bar"}, diff --git a/src/test/rgw/test_rgw_lua.cc b/src/test/rgw/test_rgw_lua.cc index be9fa6aed1622..07f8521c2313e 100644 --- a/src/test/rgw/test_rgw_lua.cc +++ b/src/test/rgw/test_rgw_lua.cc @@ -706,7 +706,7 @@ TEST(TestRGWLua, UseFunction) { const std::string script = R"( function print_owner(owner) - print("Owner Dispaly Name: " .. owner.DisplayName) + print("Owner Display Name: " .. owner.DisplayName) print("Owner Id: " .. owner.User.Id) print("Owner Tenanet: " .. owner.User.Tenant) end @@ -921,7 +921,7 @@ TEST(TestRGWLuaBackground, RequestScript) pe.lua.background = &lua_background; - // to make sure test is consistent we have to puase the background + // to make sure test is consistent we have to pause the background lua_background.pause(); const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, request_script); ASSERT_EQ(rc, 0); diff --git a/src/test/rgw/test_rgw_xml.cc b/src/test/rgw/test_rgw_xml.cc index fa9f211577875..a31be59a3c885 100644 --- a/src/test/rgw/test_rgw_xml.cc +++ b/src/test/rgw/test_rgw_xml.cc @@ -376,7 +376,7 @@ TEST(TestDecoder, BasicParsing) ASSERT_STREQ(to_string(result).c_str(), expected_output); } -TEST(TestDecoder, MalfomedInput) +TEST(TestDecoder, MalformedInput) { RGWXMLDecoder::XMLParser parser; ASSERT_TRUE(parser.init()); diff --git a/src/test/test_rgw_admin_log.cc b/src/test/test_rgw_admin_log.cc index fe072ead18a1b..7dd7604db1a1f 100644 --- a/src/test/test_rgw_admin_log.cc +++ b/src/test/test_rgw_admin_log.cc @@ -489,7 +489,7 @@ static int put_bucket_obj(const char *obj_name, char *data, unsigned len) { g_test->send_request(string("PUT"), req, read_bucket_object, (void *)data, (size_t)len); if (g_test->get_resp_code() != 200U) { - cout << "Errror sending object to the bucket, http_code " << g_test->get_resp_code(); + cout << "Error sending object to the bucket, http_code " << g_test->get_resp_code(); return -1; } return 0; @@ -500,7 +500,7 @@ static int read_bucket_obj(const char *obj_name) { req.append(obj_name); g_test->send_request(string("GET"), req); if (g_test->get_resp_code() != 200U) { - cout << "Errror sending object to the bucket, http_code " << g_test->get_resp_code(); + cout << "Error sending object to the bucket, http_code " << g_test->get_resp_code(); return -1; } return 0; @@ -511,7 +511,7 @@ static int delete_obj(const char *obj_name) { req.append(obj_name); g_test->send_request(string("DELETE"), req); if (g_test->get_resp_code() != 204U) { - cout << "Errror deleting object from bucket, http_code " << g_test->get_resp_code(); + cout << "Error deleting object from bucket, http_code " << g_test->get_resp_code(); return -1; } return 0; diff --git a/src/test/test_rgw_admin_meta.cc b/src/test/test_rgw_admin_meta.cc index 4699a876dfebd..b1d5fad06009d 100644 --- a/src/test/test_rgw_admin_meta.cc +++ b/src/test/test_rgw_admin_meta.cc @@ -697,7 +697,7 @@ TEST(TestRGWAdmin, meta_get){ p2 = RGW_CAP_WRITE; EXPECT_TRUE (obt_info.caps.check_cap(meta_caps, p2) != 0); - /*Version and tag infromation*/ + /*Version and tag information*/ EXPECT_TRUE(objv1->ver > objv->ver); EXPECT_EQ(objv1->tag, objv->tag);