This directory contains Golang code examples on how to upload an object to an S3 bucket running on a Ceph RGW cluster.
# Prerequisite
-Linux machine running an RGW Ceph cluster. Preferrably started with the ``OSD=1 MON=1 MDS=0 MGR=0 RGW=1 ../src/vstart.sh --debug --new `` command.
+Linux machine running an RGW Ceph cluster. Preferably started with the ``OSD=1 MON=1 MDS=0 MGR=0 RGW=1 ../src/vstart.sh --debug --new `` command.
Go installed on the Linux machine.
## Workflow Procedure
This directory contains Java code examples on how to upload an object to an S3 bucket running on a Ceph RGW cluster.
# Prerequisites
-Linux machine running an RGW Ceph cluster. Preferrably started with the ``OSD=1 MON=1 MDS=0 MGR=0 RGW=1 ../src/vstart.sh --debug --new `` command.
+Linux machine running an RGW Ceph cluster. Preferably started with the ``OSD=1 MON=1 MDS=0 MGR=0 RGW=1 ../src/vstart.sh --debug --new `` command.
Java and Maven installed on the Linux machine.
## Workflow Procedure
passed in to the main task.
"""
assert isinstance(config, dict)
- log.info('Installing system dependenies for PyKMIP...')
+ log.info('Installing system dependencies for PyKMIP...')
packages = {}
for (client, _) in config.items():
Configuration is read from a skelethon config file
s3tests.teuth.config.yaml in the java-s3tests repository
and missing information is added from the task.
- Existing values are NOT overriden unless they are empty!
+ Existing values are NOT overridden unless they are empty!
"""
log.info("S3 Tests Java: Creating S3 users...")
testdir = teuthology.get_testdir(self.ctx)
@contextlib.contextmanager
def download(ctx, config):
"""
- Download Vault Release from Hashicopr website.
+ Download Vault Release from Hashicorp website.
Remove downloaded file upon exit.
"""
assert isinstance(config, dict)
=head1 DESCRIPTION
-This script intends to test the rgw bucket quota funcionality using s3 interface
+This script intends to test the rgw bucket quota functionality using s3 interface
and reports the test results
=head1 ARGUMENTS
=head1 DESCRIPTION
-This script intends to test the rgw user quota funcionality using s3 interface
+This script intends to test the rgw user quota functionality using s3 interface
and reports the test results
=head1 ARGUMENTS
# nfsns is the main suite
-# create herarchy, and then list it
+# create hierarchy, and then list it
echo "phase 1.1"
ceph_test_librgw_file_nfsns ${K} --hier1 --dirs1 --create --rename --verbose
# remove datacache dir
#cmd = exec_cmd('rm -rf %s' % (cache_dir))
#log.debug("RGW Datacache dir deleted")
- #^ commenting for future refrence - the work unit will continue running tests and if the cache_dir is removed
+ #^ commenting for future reference - the work unit will continue running tests and if the cache_dir is removed
# all the writes to cache will fail with errno 2 ENOENT No such file or directory.
main()
}
urgent_data.reserved_size += res_op.size + overhead;
- // note that last id is incremented regadless of failures
+ // note that last id is incremented regardless of failures
// to avoid "old reservation" issues below
++urgent_data.last_id;
bool result;
* So, the regular usage would be to create an object, to increase the refcount. Then, when
* wanting to have another reference to it, increase the refcount using a different tag. When
* removing a reference it is required to drop the refcount (using the same tag that was used
- * for that reference). When the refcount drops to zero, the object is removed automaticfally.
+ * for that reference). When the refcount drops to zero, the object is removed automatically.
*
* In order to maintain backwards compatibility with objects that were created without having
* their refcount increased, the implicit_ref was added. Any object that was created without
std::string start_after_omap_key;
encode_list_index_key(hctx, op.start_obj, &start_after_omap_key);
- // this is set whenenver start_after_omap_key is set to keep them in
+ // this is set whenever start_after_omap_key is set to keep them in
// sync since this will be the returned marker when a marker is
// returned
cls_rgw_obj_key start_after_entry_key;
* Additionally, each of the three segment functions, if successful,
* is expected to return the number of entries added to the output
* list as a non-negative value. As per usual, negative return values
- * indicate error condtions.
+ * indicate error conditions.
*/
static int rgw_bi_list_op(cls_method_context_t hctx,
bufferlist *in,
try {
decode(op, in_iter);
} catch (ceph::buffer::error& err) {
- CLS_LOG(1, "ERROR: rgw_cls_rehard_list(): failed to decode entry\n");
+ CLS_LOG(1, "ERROR: rgw_cls_reshard_list(): failed to decode entry\n");
return -EINVAL;
}
cls_rgw_reshard_list_ret op_ret;
try {
decode(entry, iter);
} catch (ceph::buffer::error& err) {
- CLS_LOG(1, "ERROR: rgw_cls_rehard_list(): failed to decode entry\n");
+ CLS_LOG(1, "ERROR: rgw_cls_reshard_list(): failed to decode entry\n");
return -EIO;
}
op_ret.entries.push_back(entry);
try {
decode(op, in_iter);
} catch (ceph::buffer::error& err) {
- CLS_LOG(1, "ERROR: rgw_cls_rehard_remove: failed to decode entry\n");
+ CLS_LOG(1, "ERROR: rgw_cls_reshard_remove: failed to decode entry\n");
return -EINVAL;
}
cleanup();
}
return ret;
-} // CLSRGWConcurrintIO::operator()()
+} // CLSRGWConcurrentIO::operator()()
/**
return true;
}
-// note: currently only called by tesing code
+// note: currently only called by testing code
void cls_rgw_bucket_init_index(ObjectWriteOperation& o)
{
bufferlist in;
/**
* Std::list the bucket with the starting object and filter prefix.
* NOTE: this method do listing requests for each bucket index shards identified by
- * the keys of the *list_results* std::map, which means the std::map should be popludated
+ * the keys of the *list_results* std::map, which means the std::map should be populated
* by the caller to fill with each bucket index object id.
*
* io_ctx - IO context for rados.
#define RGW_CLASS "rgw"
/* Special error code returned by cls bucket list operation if it was
- * unable to skip past enough not visibile entries to return any
+ * unable to skip past enough not visible entries to return any
* entries in the call. */
constexpr int RGWBIAdvanceAndRetryError = -EFBIG;
// if is_truncated is true, starting marker for next iteration; this
// is necessary as it's possible after maximum number of tries we
// still might have zero entries to return, in which case we have to
- // at least move the ball foward
+ // at least move the ball forward
cls_rgw_obj_key marker;
// cls_filtered is not transmitted; it is assumed true for versions
struct rgw_cls_bi_list_op {
uint32_t max;
- std::string name_filter; // limit resultto one object and its instances
+ std::string name_filter; // limit result to one object and its instances
std::string marker;
rgw_cls_bi_list_op() : max(0) {}
* path that ends with a delimiter and appends a new character to the
* end such that when a we request bucket-index entries *after* this,
* we'll get the next object after the "subdirectory". This works
- * because we append a '\xFF' charater, and no valid UTF-8 character
+ * because we append a '\xFF' character, and no valid UTF-8 character
* can contain that byte, so no valid entries can be skipped.
*/
inline std::string cls_rgw_after_delim(const std::string& path) {
Main = 1, // b-i entries for standard objs
- Shadow = 2, // presumfably intended for multipart shadow
+ Shadow = 2, // presumably intended for multipart shadow
// uploads; not currently used in the codebase
MultiMeta = 3, // b-i entries for multipart upload metadata objs
* Difference with above structure is that all
* the fields are strings here to accommodate any
* style identifiers used by backend db. By default
- * initialized with sqlitedb style, can be overriden
+ * initialized with sqlitedb style, can be overridden
* using InitPrepareParams()
*
* These identifiers are used in prepare and bind statements
REFERENCES '{}' (BucketName) ON DELETE CASCADE ON UPDATE CASCADE \n);";
static constexpr std::string_view CreateObjectViewQ =
- /* This query creats temporary view with entries from ObjectData table which have
+ /* This query creates temporary view with entries from ObjectData table which have
* corresponding head object (i.e, with same ObjName, ObjInstance, ObjNS, ObjID)
* in the Object table.
*
private:
/* For existing entires, -
* (1) INSERT or REPLACE - it will delete previous entry and then
- * inserts new one. Since it deletes previos enties, it will
- * trigger all foriegn key cascade deletes or other triggers.
+ * inserts new one. Since it deletes previous entries, it will
+ * trigger all foreign key cascade deletes or other triggers.
* (2) INSERT or UPDATE - this will set NULL values to unassigned
* fields.
* more info: https://code-examples.net/en/q/377728
if (ret)
ldpp_dout(dpp, 0)<<"CreateUserTable failed" << dendl;
- ldpp_dout(dpp, 20)<<"CreateUserTable suceeded" << dendl;
+ ldpp_dout(dpp, 20)<<"CreateUserTable succeeded" << dendl;
return ret;
}
if (ret)
ldpp_dout(dpp, 0)<<"CreateBucketTable failed " << dendl;
- ldpp_dout(dpp, 20)<<"CreateBucketTable suceeded " << dendl;
+ ldpp_dout(dpp, 20)<<"CreateBucketTable succeeded " << dendl;
return ret;
}
if (ret)
ldpp_dout(dpp, 0)<<"CreateObjectTable failed " << dendl;
- ldpp_dout(dpp, 20)<<"CreateObjectTable suceeded " << dendl;
+ ldpp_dout(dpp, 20)<<"CreateObjectTable succeeded " << dendl;
return ret;
}
if (ret)
ldpp_dout(dpp, 0)<<"CreateObjectTableTrigger failed " << dendl;
- ldpp_dout(dpp, 20)<<"CreateObjectTableTrigger suceeded " << dendl;
+ ldpp_dout(dpp, 20)<<"CreateObjectTableTrigger succeeded " << dendl;
return ret;
}
if (ret)
ldpp_dout(dpp, 0)<<"CreateObjectView failed " << dendl;
- ldpp_dout(dpp, 20)<<"CreateObjectView suceeded " << dendl;
+ ldpp_dout(dpp, 20)<<"CreateObjectView succeeded " << dendl;
return ret;
}
if (ret)
ldpp_dout(dpp, 0)<<"CreateQuotaTable failed " << dendl;
- ldpp_dout(dpp, 20)<<"CreateQuotaTable suceeded " << dendl;
+ ldpp_dout(dpp, 20)<<"CreateQuotaTable succeeded " << dendl;
return ret;
}
if (ret)
ldpp_dout(dpp, 0)<<"CreateObjectDataTable failed " << dendl;
- ldpp_dout(dpp, 20)<<"CreateObjectDataTable suceeded " << dendl;
+ ldpp_dout(dpp, 20)<<"CreateObjectDataTable succeeded " << dendl;
return ret;
}
ldpp_dout(dpp, 0)<<"CreateLCEntryTable failed" << dendl;
return ret;
}
- ldpp_dout(dpp, 20)<<"CreateLCEntryTable suceeded" << dendl;
+ ldpp_dout(dpp, 20)<<"CreateLCEntryTable succeeded" << dendl;
schema = CreateTableSchema("LCHead", ¶ms);
ret = exec(dpp, schema.c_str(), NULL);
ldpp_dout(dpp, 0)<<"CreateLCHeadTable failed" << dendl;
(void)DeleteLCEntryTable(dpp, ¶ms);
}
- ldpp_dout(dpp, 20)<<"CreateLCHeadTable suceeded" << dendl;
+ ldpp_dout(dpp, 20)<<"CreateLCHeadTable succeeded" << dendl;
return ret;
}
if (ret)
ldpp_dout(dpp, 0)<<"DeleteUserTable failed " << dendl;
- ldpp_dout(dpp, 20)<<"DeleteUserTable suceeded " << dendl;
+ ldpp_dout(dpp, 20)<<"DeleteUserTable succeeded " << dendl;
return ret;
}
if (ret)
ldpp_dout(dpp, 0)<<"DeletebucketTable failed " << dendl;
- ldpp_dout(dpp, 20)<<"DeletebucketTable suceeded " << dendl;
+ ldpp_dout(dpp, 20)<<"DeletebucketTable succeeded " << dendl;
return ret;
}
if (ret)
ldpp_dout(dpp, 0)<<"DeleteObjectTable failed " << dendl;
- ldpp_dout(dpp, 20)<<"DeleteObjectTable suceeded " << dendl;
+ ldpp_dout(dpp, 20)<<"DeleteObjectTable succeeded " << dendl;
return ret;
}
if (ret)
ldpp_dout(dpp, 0)<<"DeleteObjectDataTable failed " << dendl;
- ldpp_dout(dpp, 20)<<"DeleteObjectDataTable suceeded " << dendl;
+ ldpp_dout(dpp, 20)<<"DeleteObjectDataTable succeeded " << dendl;
return ret;
}
if (ret)
ldpp_dout(dpp, 0)<<"DeleteQuotaTable failed " << dendl;
- ldpp_dout(dpp, 20)<<"DeleteQuotaTable suceeded " << dendl;
+ ldpp_dout(dpp, 20)<<"DeleteQuotaTable succeeded " << dendl;
return ret;
}
ret = exec(dpp, schema.c_str(), NULL);
if (ret)
ldpp_dout(dpp, 0)<<"DeleteLCEntryTable failed " << dendl;
- ldpp_dout(dpp, 20)<<"DeleteLCEntryTable suceeded " << dendl;
+ ldpp_dout(dpp, 20)<<"DeleteLCEntryTable succeeded " << dendl;
return ret;
}
ret = exec(dpp, schema.c_str(), NULL);
if (ret)
ldpp_dout(dpp, 0)<<"DeleteLCHeadTable failed " << dendl;
- ldpp_dout(dpp, 20)<<"DeleteLCHeadTable suceeded " << dendl;
+ ldpp_dout(dpp, 20)<<"DeleteLCHeadTable succeeded " << dendl;
return ret;
}
if (ret)
ldpp_dout(dpp, 0)<<"GetUsertable failed " << dendl;
- ldpp_dout(dpp, 20)<<"GetUserTable suceeded " << dendl;
+ ldpp_dout(dpp, 20)<<"GetUserTable succeeded " << dendl;
return ret;
}
if (ret)
ldpp_dout(dpp, 0)<<"Listbuckettable failed " << dendl;
- ldpp_dout(dpp, 20)<<"ListbucketTable suceeded " << dendl;
+ ldpp_dout(dpp, 20)<<"ListbucketTable succeeded " << dendl;
return ret;
}
if (ret)
ldpp_dout(dpp, 0)<<"ListObjecttable failed " << dendl;
- ldpp_dout(dpp, 20)<<"ListObjectTable suceeded " << dendl;
+ ldpp_dout(dpp, 20)<<"ListObjectTable succeeded " << dendl;
}
return ret;
orig_info.user_id = info.user_id;
// XXX: we open and close motr idx 2 times in this method:
// 1) on load_user_from_idx() here and 2) on do_idx_op_by_name(PUT) below.
- // Maybe this can be optimised later somewhow.
+ // Maybe this can be optimised later somehow.
int rc = load_user_from_idx(dpp, store, orig_info, nullptr, &objv_tr);
ldpp_dout(dpp, 10) << "Get user: rc = " << rc << dendl;
int MotrBucket::remove_objs_from_index(const DoutPrefixProvider *dpp, std::list<rgw_obj_index_key>& objs_to_unlink)
{
- /* XXX: CHECK: Unlike RadosStore, there is no seperate bucket index table.
+ /* XXX: CHECK: Unlike RadosStore, there is no separate bucket index table.
* Delete all the object in the list from the object table of this
* bucket
*/
if (prefix.size() &&
(0 != ent.key.name.compare(0, prefix.size(), prefix))) {
ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ <<
- ": skippping \"" << ent.key <<
+ ": skipping \"" << ent.key <<
"\" because doesn't match prefix" << dendl;
continue;
}
bufferlist bl;
rgw_bucket_dir_entry ent;
- // Set rgw_bucet_dir_entry. Some of the member of this structure may not
+ // Set rgw_bucket_dir_entry. Some of the member of this structure may not
// apply to motr. For example the storage_class.
//
// Checkout AtomicObjectProcessor::complete() in rgw_putobj_processor.cc
// Update the dir entry and insert it to the bucket index so
// the object will be seen when listing the bucket.
bufferlist update_bl;
- target_obj->get_key().get_index_key(&ent.key); // Change to offical name :)
+ target_obj->get_key().get_index_key(&ent.key); // Change to official name :)
ent.meta.size = off;
ent.meta.accounted_size = accounted_size;
ldpp_dout(dpp, 20) << "MotrMultipartUpload::complete(): obj size=" << ent.meta.size
return 0;
}
-// The following marcos are from dix/fid_convert.h which are not exposed.
+// The following macros are from dix/fid_convert.h which are not exposed.
enum {
M0_DIX_FID_DEVICE_ID_OFFSET = 32,
M0_DIX_FID_DIX_CONTAINER_MASK = (1ULL << M0_DIX_FID_DEVICE_ID_OFFSET)
// of RGW instances under heavy use. If you would like to turn off cache expiry,
// set this value to zero.
//
- // Currently POC hasn't implemented the watch-notify menchanism yet. So the
+ // Currently POC hasn't implemented the watch-notify mechanism yet. So the
// current implementation is similar to cortx-s3server which is based on expiry
// time. TODO: see comments on distribute_cache).
//
- // Beaware: Motr object data is not cached in current POC as RGW!
+ // Beware: Motr object data is not cached in current POC as RGW!
// RGW caches the first chunk (4MB by default).
ObjectCache cache;
// object part index and for each part an entry is created in extended index.
// The entry for the object is created in bucket (object list) index. The part
// index is deleted and an entry removed from bucket_nnn_multipart_index. Like
-// bucket multipart index, bucket part extened metadata index is created during
+// bucket multipart index, bucket part extend metadata index is created during
// bucket creation.
//
// The extended metadata index is used mainly due to fault tolerant
};
/**
- * store a list of the user's buckets, with associated functinos.
+ * store a list of the user's buckets, with associated functions.
*/
class RGWUserBuckets {
std::map<std::string, RGWBucketEnt> buckets;
} else {
// r >= 0
if (bytes_transferred) {
- // send notification that object was succesfully synced
+ // send notification that object was successfully synced
std::string user_id = "rgw sync";
std::string req_id = "0";
// Check whether fclose returned an error
if (r != 0) {
- ldout(cct, 0) << "ERROR: D3nDataCache::fclsoe file has return error, errno=" << errno << dendl;
+ ldout(cct, 0) << "ERROR: D3nDataCache::fclose file has return error, errno=" << errno << dendl;
return -errno;
}
- { // update cahce_map entries for new chunk in cache
+ { // update cache_map entries for new chunk in cache
const std::lock_guard l(d3n_cache_lock);
chunk_info = new D3nChunkDataInfo;
chunk_info->oid = oid;
struct D3nChunkDataInfo* chdo = iter->second;
struct stat st;
int r = stat(location.c_str(), &st);
- if ( r != -1 && st.st_size == len) { // file exists and containes required data range length
+ if ( r != -1 && st.st_size == len) { // file exists and contains required data range length
exist = true;
/*LRU*/
/*get D3nChunkDataInfo*/
class CheckAllBucketShardStatusIsIncremental : public RGWShardCollectCR {
// start with 1 shard, and only spawn more if we detect an existing shard.
- // this makes the backward compatilibility check far less expensive in the
+ // this makes the backward compatibility check far less expensive in the
// general case where no shards exist
static constexpr int initial_concurrent_shards = 1;
static constexpr int max_concurrent_shards = 16;
* create index from key -> <op, marker>, and from marker -> key
* this is useful so that we can insure that we only have one
* entry for any key that is used. This is needed when doing
- * incremenatl sync of data, and we don't want to run multiple
+ * incremental sync of data, and we don't want to run multiple
* concurrent sync operations for the same bucket shard
* Also, we should make sure that we don't run concurrent operations on the same key with
* different ops.
return set_cr_error(retcode);
}
if (bucket_status.state != BucketSyncState::Stopped) {
- // make sure that state is changed to stopped localy
+ // make sure that state is changed to stopped locally
bucket_status.state = BucketSyncState::Stopped;
yield call(new WriteCR(dpp, env->driver, status_obj, bucket_status,
&objv, false));
auto status = _get_change(bs, gen);
l.unlock();
- ldout(cct, 20) << "RGWDataChangesLog::update_renewd() bucket_name="
+ ldout(cct, 20) << "RGWDataChangesLog::update_renewed() bucket_name="
<< bs.bucket.name << " shard_id=" << bs.shard_id
<< " expiration=" << expiration << dendl;
rest_obj.content_len = m_part_size;
}
- /* For mulitpart attrs are sent as part of InitMultipartCR itself */
+ /* For multipart attrs are sent as part of InitMultipartCR itself */
if (multipart) {
return 0;
}
std::vector<std::string> entryProcessingResultString = {"Failure", "Successful", "Sleeping", "Expired", "Migrating"};
// processing of a specific entry
- // return whether processing was successfull (true) or not (false)
+ // return whether processing was successful (true) or not (false)
EntryProcessingResult process_entry(const ConfigProxy& conf, persistency_tracker& entry_persistency_tracker,
const cls_queue_entry& entry, yield_context yield) {
event_entry_t event_entry;
for (const auto& queue_name : queues) {
// try to lock the queue to check if it is owned by this rgw
- // or if ownershif needs to be taken
+ // or if ownership needs to be taken
librados::ObjectWriteOperation op;
op.assert_exists();
rados::cls::lock::lock(&op, queue_name+"_lock",
// start processing this queue
spawn::spawn(io_context, [this, &queue_gc, &queue_gc_lock, queue_name](yield_context yield) {
process_queue(queue_name, yield);
- // if queue processing ended, it measn that the queue was removed or not owned anymore
+ // if queue processing ended, it means that the queue was removed or not owned anymore
// mark it for deletion
std::lock_guard lock_guard(queue_gc_lock);
queue_gc.push_back(queue_name);
return remove_persistent_topic(s_manager, s_manager->rados_store.getRados()->get_notif_pool_ctx(), topic_name, y);
}
-rgw::sal::Object* get_object_with_atttributes(
+rgw::sal::Object* get_object_with_attributes(
const reservation_t& res, rgw::sal::Object* obj) {
// in case of copy obj, the tags and metadata are taken from source
const auto src_obj = res.src_object ? res.src_object : obj;
static inline void metadata_from_attributes(
reservation_t& res, rgw::sal::Object* obj) {
auto& metadata = res.x_meta_map;
- const auto src_obj = get_object_with_atttributes(res, obj);
+ const auto src_obj = get_object_with_attributes(res, obj);
if (!src_obj) {
return;
}
static inline void tags_from_attributes(
const reservation_t& res, rgw::sal::Object* obj, KeyMultiValueMap& tags) {
- const auto src_obj = get_object_with_atttributes(res, obj);
+ const auto src_obj = get_object_with_attributes(res, obj);
if (!src_obj) {
return;
}
namespace rgw::notify {
// initialize the notification manager
-// notification manager is dequeing the 2-phase-commit queues
+// notification manager is dequeuing the 2-phase-commit queues
// and send the notifications to the endpoints
bool init(CephContext* cct, rgw::sal::RadosStore* store, const DoutPrefixProvider *dpp);
* Such race condition is caused by the fact that the multipart object is the gatekeeper of a multipart
* upload, when it is deleted, a second upload would start with the same suffix("2/"), therefore, objects
* written by the second upload may be deleted by the first upload.
- * details is describled on #11749
+ * details is described on #11749
*
* The above comment still stands, but instead of searching for a specific object in the multipart
* namespace, we just make sure that we remove the object that is marked as the head object after
if (cur_end_marker_valid && cur_end_marker <= index_key) {
truncated = false;
ldpp_dout(dpp, 10) << __func__ <<
- ": finished due to gitting end marker of \"" << cur_end_marker <<
+ ": finished due to getting end marker of \"" << cur_end_marker <<
"\" with \"" << entry.key << "\"" << dendl;
goto done;
}
if (!params.list_versions && !entry.is_visible()) {
ldpp_dout(dpp, 20) << __func__ <<
- ": skippping \"" << index_key <<
- "\" because not listing versions and entry not visibile" << dendl;
+ ": skipping \"" << index_key <<
+ "\" because not listing versions and entry not visible" << dendl;
continue;
}
if (params.enforce_ns && obj.ns != params.ns) {
ldpp_dout(dpp, 20) << __func__ <<
- ": skippping \"" << index_key <<
+ ": skipping \"" << index_key <<
"\" because namespace does not match" << dendl;
continue;
}
// we're not guaranteed items will come in order, so we have
// to loop through all
ldpp_dout(dpp, 20) << __func__ <<
- ": skippping \"" << index_key <<
+ ": skipping \"" << index_key <<
"\" because after end_marker" << dendl;
continue;
}
if (params.access_list_filter &&
!params.access_list_filter->filter(obj.name, index_key.name)) {
ldpp_dout(dpp, 20) << __func__ <<
- ": skippping \"" << index_key <<
+ ": skipping \"" << index_key <<
"\" because doesn't match filter" << dendl;
continue;
}
if (params.prefix.size() &&
(0 != obj.name.compare(0, params.prefix.size(), params.prefix))) {
ldpp_dout(dpp, 20) << __func__ <<
- ": skippping \"" << index_key <<
+ ": skipping \"" << index_key <<
"\" because doesn't match prefix" << dendl;
continue;
}
/* we lost in a race. There are a few options:
* - existing object was rewritten (ECANCELED)
- * - non existing object was created (EEXIST)
+ * - nonexistent object was created (EEXIST)
* - object was removed (ENOENT)
* should treat it as a success
*/
if (src_attrs.count(RGW_ATTR_CRYPT_MODE)) {
// Current implementation does not follow S3 spec and even
// may result in data corruption silently when copying
- // multipart objects acorss pools. So reject COPY operations
+ // multipart objects across pools. So reject COPY operations
//on encrypted objects before it is fully functional.
ldpp_dout(dpp, 0) << "ERROR: copy op for encrypted object " << src_obj
<< " has not been implemented." << dendl;
for (auto& r : shard_list_results) {
results_trackers.emplace_back(r.first, r.second, shard_oids[r.first]);
- // if any *one* shard's result is trucated, the entire result is
+ // if any *one* shard's result is truncated, the entire result is
// truncated
*is_truncated = *is_truncated || r.second.is_truncated;
RGWHandler_REST* get_handler(rgw::sal::Driver* driver,
req_state* const,
const rgw::auth::StrategyRegistry& auth_registry,
- const std::string& frontend_prefixs) override {
+ const std::string& frontend_prefixes) override {
return new RGWHandler_Log(auth_registry);
}
};
RGWObjVersionTracker ot;
// if we deleted children above we will force delete, as any that
- // remain is detrius from a prior bug
+ // remain is detritus from a prior bug
ret = store->getRados()->delete_bucket(info, ot, y, dpp, !delete_children);
if (ret < 0) {
ldpp_dout(dpp, -1) << "ERROR: could not remove bucket " <<
librados::ObjectWriteOperation op;
size_t pos = package_name.find(" ");
if (pos != package_name.npos) {
- // remove specfic version of the the package
+ // remove specific version of the the package
op.omap_rm_keys(std::set<std::string>({package_name}));
auto ret = rgw_rados_operate(dpp, ioctx,
PACKAGE_LIST_OBJECT_NAME, &op, y);
sync_marker, tn));
/*
- * mdlog_marker: the remote sync marker positiion
+ * mdlog_marker: the remote sync marker position
* sync_marker: the local sync marker position
* max_marker: the max mdlog position that we fetched
* marker: the current position we try to sync
# anything below here is for non trivial configuration
- # can be used in conjuction with the above
+ # can be used in conjunction with the above
"default": {
"connection": {
target path optional variables:
(evaluated at init)
-sid: sync instance id, randomly generated by sync process on first sync initalization
+sid: sync instance id, randomly generated by sync process on first sync initialization
zonegroup: zonegroup name
zonegroup_id: zonegroup name
zone: zone name
f->open_array_section("custom-date");
for (auto i : custom_date) {
/*
- * try to exlicitly parse date field, otherwise elasticsearch could reject the whole doc,
+ * try to explicitly parse date field, otherwise elasticsearch could reject the whole doc,
* which will end up with failed sync
*/
real_time t;
// TODO: Maybe convert hostnames to a map<std::string,std::list<std::string>> for
// endpoint_type->hostnames
/*
-20:05 < _robbat21irssi> maybe I do someting like: if (hostname_map.empty()) { populate all map keys from hostnames; };
+20:05 < _robbat21irssi> maybe I do something like: if (hostname_map.empty()) { populate all map keys from hostnames; };
20:05 < _robbat21irssi> but that's a later compatability migration planning bit
20:06 < yehudasa> more like if (!hostnames.empty()) {
20:06 < yehudasa> for (std::list<std::string>::iterator iter = hostnames.begin(); iter != hostnames.end(); ++iter) {
namespace helper {
inline
std::string extract_pubkey_from_cert(const std::string& certstr, const std::string& pw = "") {
- // TODO: Cannot find the exact version this change happended
+ // TODO: Cannot find the exact version this change happened
#if OPENSSL_VERSION_NUMBER <= 0x1000114fL
std::unique_ptr<BIO, decltype(&BIO_free_all)> certbio(BIO_new_mem_buf(const_cast<char*>(certstr.data()), certstr.size()), BIO_free_all);
#else
return alg_name;
}
private:
- /// HMAC secrect
+ /// HMAC secret
const std::string secret;
/// HMAC hash generator
const EVP_MD*(*md)();
/**
* Get type of contained object
* \return Type
- * \throws std::logic_error An internal error occured
+ * \throws std::logic_error An internal error occurred
*/
type get_type() const {
if (val.is<picojson::null>()) return type::null;
std::unordered_map<std::string, claim> header_claims;
public:
/**
- * Check if algortihm is present ("alg")
+ * Check if algorithm is present ("alg")
* \return true if present, false otherwise
*/
bool has_algorithm() const noexcept { return has_header_claim("alg"); }
*/
class decoded_jwt : public header, public payload {
protected:
- /// Unmodifed token, as passed to constructor
+ /// Unmodified token, as passed to constructor
const std::string token;
/// Header part decoded from base64
std::string header;
# Last revision 2023-01-13
-# NOTE: This script based based on rgw-orphan-list but doing the
+# NOTE: This script based on rgw-orphan-list but doing the
# reverse calculation.
# NOTE: The awk included in this script replaces the 'ceph-diff-sorted'
fi
done
-# Create an awk script in a file for parsing the two command outoputs.
+# Create an awk script in a file for parsing the two command outputs.
log "Creating awk script for comparing outputs: ${incremental_grep_awk}"
cat <<"EOF" >$incremental_grep_awk
# indicates a possible deleted tail object and the accompanying
# bucket / user object name is output, assuming it had not been
# previously identified.
-# - A map of outputed bucket / user object is maintained in memory
+# - A map of outputted bucket / user object is maintained in memory
# * If a value appears in $rados_out, but not in $rgwadmin_out, the
# $rados_out file is iterated until the $rados_out line is equal
# or > (alphabetically) the value from the $rgwadmin_out file.
namespace_found=1
fi
- # check for locators (w/o namespace); we idenitfy them by skipping
+ # check for locators (w/o namespace); we identify them by skipping
# past the empty namespace (i.e., one TAB), skipping past the oid,
# then looking for a TAB; note we use egrep to get the '+' character
# and the $ in front of the ' allows the \t to be interpreted as a TAB
std::vector<std::string> readwrite;
std::vector<std::string> readonly;
- /* Parition the grant map into three not-overlapping groups. */
+ /* Partition the grant map into three not-overlapping groups. */
for (const auto& item : get_acl().get_grant_map()) {
const ACLGrant& grant = item.second;
const uint32_t perm = grant.get_permission().get_permissions();
for (auto& zone_id : zone_ids) {
auto z = static_cast<rgw::sal::RadosStore*>(driver)->svc()->zone->get_zonegroup().zones.find(zone_id.id);
- if (z == static_cast<rgw::sal::RadosStore*>(driver)->svc()->zone->get_zonegroup().zones.end()) { /* should't happen */
+ if (z == static_cast<rgw::sal::RadosStore*>(driver)->svc()->zone->get_zonegroup().zones.end()) { /* shouldn't happen */
continue;
}
auto c = zone_conn_map.find(zone_id.id);
- if (c == zone_conn_map.end()) { /* should't happen */
+ if (c == zone_conn_map.end()) { /* shouldn't happen */
continue;
}
do {
entries.clear();
- // if object is specified, we use that as a filter to only retrieve some some entries
+ // if object is specified, we use that as a filter to only retrieve some entries
ret = static_cast<rgw::sal::RadosStore*>(driver)->getRados()->bi_list(bs, object, marker, max_entries, &entries, &is_truncated, null_yield);
if (ret < 0) {
ldpp_dout(dpp(), 0) << "ERROR: bi_list(): " << cpp_strerror(-ret) << dendl;
}
}
-// TODO: add status_to_string on the connection object to prinf full status
+// TODO: add status_to_string on the connection object to print full status
// convert int status to string - including RGW specific values
std::string status_to_string(int s) {
// TODO: add exponential backoff for retries
conn->next_reconnect = now + reconnect_time;
} else {
- ldout(cct, 10) << "AMQP run: connection '" << to_string(conn_id) << "' retry successfull" << dendl;
+ ldout(cct, 10) << "AMQP run: connection '" << to_string(conn_id) << "' retry successful" << dendl;
}
}
INCREMENT_AND_CONTINUE(conn_it);
size_t ClientIO::send_100_continue()
{
- const char HTTTP_100_CONTINUE[] = "HTTP/1.1 100 CONTINUE\r\n\r\n";
- const size_t sent = txbuf.sputn(HTTTP_100_CONTINUE,
- sizeof(HTTTP_100_CONTINUE) - 1);
+ const char HTTP_100_CONTINUE[] = "HTTP/1.1 100 CONTINUE\r\n\r\n";
+ const size_t sent = txbuf.sputn(HTTP_100_CONTINUE,
+ sizeof(HTTP_100_CONTINUE) - 1);
flush();
sent100continue = true;
return sent;
return endpoint;
}
if (addr_end + 1 < input.size()) {
- // :port must must follow [ipv6]
+ // :port must follow [ipv6]
if (input[addr_end + 1] != ':') {
ec = boost::asio::error::invalid_argument;
return endpoint;
reason(reason) {
}
- /* Allow only the reasonable combintations - returning just Completer
+ /* Allow only the reasonable combinations - returning just Completer
* without accompanying IdentityApplier is strictly prohibited! */
explicit AuthResult(IdentityApplier::aplptr_t&& applier)
: result_pair(std::move(applier), nullptr) {
/* Engine doesn't grant the access but also doesn't reject it. */
DENIED,
- /* Engine successfully authenicated requester. */
+ /* Engine successfully authenticated requester. */
GRANTED,
/* Engine strictly indicates that a request should be rejected
/* Abstract class for stacking sub-engines to expose them as a single
* Engine. It is responsible for ordering its sub-engines and managing
- * fall-backs between them. Derivatee is supposed to encapsulate engine
+ * fall-backs between them. Derivative is supposed to encapsulate engine
* instances and add them using the add_engine() method in the order it
* wants to be tried during the call to authenticate().
*
const string_to_sign_t& string_to_sign,
const signature_factory_t& signature_factory,
const completer_factory_t& completer_factory,
- /* Passthorugh only! */
+ /* Passthrough only! */
const req_state* s,
optional_yield y) const
{
rgw_zone_id(std::string&& _id) : id(std::move(_id)) {}
void encode(ceph::buffer::list& bl) const {
- /* backward compatiblity, not using ENCODE_{START,END} macros */
+ /* backward compatibility, not using ENCODE_{START,END} macros */
ceph::encode(id, bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
- /* backward compatiblity, not using DECODE_{START,END} macros */
+ /* backward compatibility, not using DECODE_{START,END} macros */
ceph::decode(id, bl);
}
// S3 authorization and some other processes depending on the requestURI
// The absoluteURI can start with "http://", "https://", "ws://" or "wss://"
static string get_abs_path(const string& request_uri) {
- const static string ABS_PREFIXS[] = {"http://", "https://", "ws://", "wss://"};
+ const static string ABS_PREFIXES[] = {"http://", "https://", "ws://", "wss://"};
bool isAbs = false;
for (int i = 0; i < 4; ++i) {
- if (boost::algorithm::starts_with(request_uri, ABS_PREFIXS[i])) {
+ if (boost::algorithm::starts_with(request_uri, ABS_PREFIXES[i])) {
isAbs = true;
break;
}
/* TODO(rzarzynski): there never ever should be a check like this one.
* It's here only for the sake of the patch's backportability. Further
* commits will move the logic to a per-RGWHandler replacement of
- * the end_header() function. Alternativaly, we might consider making
+ * the end_header() function. Alternatively, we might consider making
* that just for the dump(). Please take a look on @cbodley's comments
* in PR #10690 (https://github.com/ceph/ceph/pull/10690). */
s->err.err_code = err_msg;
encode_json("size", size, f);
encode_json("size_rounded", size_rounded, f);
utime_t ut(creation_time);
- encode_json("mtime", ut, f); /* mtime / creation time discrepency needed for backward compatibility */
+ encode_json("mtime", ut, f); /* mtime / creation time discrepancy needed for backward compatibility */
encode_json("count", count, f);
encode_json("placement_rule", placement_rule.to_str(), f);
}
const RGWBucketInfo& _bucket_info,
int _perm_mask,
bool _defer_to_bucket_acls,
- boost::optional<PublicAccessBlockConfiguration> _bucket_acess_conf = boost::none) :
+ boost::optional<PublicAccessBlockConfiguration> _bucket_access_conf = boost::none) :
cct(_cct),
env(_env),
identity(_identity),
bucket_info(_bucket_info),
perm_mask(_perm_mask),
defer_to_bucket_acls(_defer_to_bucket_acls),
- bucket_access_conf(_bucket_acess_conf)
+ bucket_access_conf(_bucket_access_conf)
{}
virtual ~perm_state_base() {}
extern void url_encode(const std::string& src, std::string& dst,
bool encode_slash = true);
extern std::string url_encode(const std::string& src, bool encode_slash = true);
-extern std::string url_remove_prefix(const std::string& url); // Removes hhtp, https and www from url
+extern std::string url_remove_prefix(const std::string& url); // Removes http, https and www from url
/* destination should be CEPH_CRYPTO_HMACSHA1_DIGESTSIZE bytes long */
extern void calc_hmac_sha1(const char *key, int key_len,
const char *msg, int msg_len, char *dest);
* I've left some commented out lines above. They are there for
* a reason, which I will explain. The "canonical" json constructed
* by the code above as a crypto context must take a json object and
-* turn it into a unique determinstic fixed form. For most json
+* turn it into a unique deterministic fixed form. For most json
* types this is easy. The hardest problem that is handled above is
* detailing with unicode strings; they must be turned into
* NFC form and sorted in a fixed order. Numbers, however,
sz = stoull(iter->second);
} catch(...){
/* it is very unlikely that we'll ever encounter out_of_range, but let's
- return the default eitherway */
+ return the default either way */
sz = def_val;
}
LookupFHResult fhr{nullptr, 0};
/* XXX the need for two round-trip operations to identify file or
- * directory leaf objects is unecessary--the current proposed
+ * directory leaf objects is unnecessary--the current proposed
* mechanism to avoid this is to store leaf object names with an
* object locator w/o trailing slash */
* leaf object
*/
if (! rgw_fh) {
- /* XXX for now, peform a hard lookup to deduce the type of
+ /* XXX for now, perform a hard lookup to deduce the type of
* object to be deleted ("foo" vs. "foo/")--also, ensures
* atomicity at this endpoint */
struct rgw_file_handle *fh;
if (factory == nullptr) {
return false;
}
- /* make sure the reclaiming object is the same partiton with newobject factory,
+ /* make sure the reclaiming object is the same partition with newobject factory,
* then we can recycle the object, and replace with newobject */
if (!fs->fh_cache.is_same_partition(factory->fhk.fh_hk.object, fh.fh_hk.object)) {
return false;
auto k = pp->tokens.lookup(s, l);
Policy& p = pp->policy;
bool is_action = false;
- bool is_validaction = false;
+ bool is_valid_action = false;
Statement* t = p.statements.empty() ? nullptr : &(p.statements.back());
// Top level!
(w->id == TokenID::NotAction)) {
is_action = true;
if (*s == '*') {
- is_validaction = true;
+ is_valid_action = true;
(w->id == TokenID::Action ?
t->action = allValue : t->notaction = allValue);
} else {
for (auto& p : actpairs) {
if (match_policy({s, l}, p.name, MATCH_POLICY_ACTION)) {
- is_validaction = true;
+ is_valid_action = true;
(w->id == TokenID::Action ? t->action[p.bit] = 1 : t->notaction[p.bit] = 1);
}
if ((t->action & s3AllValue) == s3AllValue) {
pp->s.pop_back();
}
- if (is_action && !is_validaction) {
+ if (is_action && !is_valid_action) {
annotate(fmt::format("`{}` is not a valid action.",
std::string_view{s, l}));
return false;
// (2) check performance of emptying queue to local list, and go over the list and publish
// (3) use std::shared_mutex (c++17) or equivalent for the connections lock
-// cmparisson operator between topic pointer and name
+// comparison operator between topic pointer and name
bool operator==(const rd_kafka_topic_t* rkt, const std::string& name) {
return name == std::string_view(rd_kafka_topic_name(rkt));
}
CallbackList callbacks;
const std::string broker;
const bool use_ssl;
- const bool verify_ssl; // TODO currently iognored, not supported in librdkafka v0.11.6
+ const bool verify_ssl; // TODO currently ignored, not supported in librdkafka v0.11.6
const boost::optional<std::string> ca_location;
const std::string user;
const std::string password;
return false;
}
- // get list of brokers based on the bootsrap broker
+ // get list of brokers based on the bootstrap broker
if (rd_kafka_conf_set(conn->temp_conf, "bootstrap.servers", conn->broker.c_str(), errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) goto conf_error;
if (conn->use_ssl) {
auto& conn = conn_it->second;
- // Checking the connection idlesness
+ // Checking the connection idleness
if(conn->timestamp.sec() + max_idle_time < ceph_clock_now()) {
ldout(conn->cct, 20) << "kafka run: deleting a connection due to idle behaviour: " << ceph_clock_now() << dendl;
std::lock_guard lock(connections_lock);
// TODO: add error counter for failed retries
// TODO: add exponential backoff for retries
} else {
- ldout(conn->cct, 10) << "Kafka run: connection (" << broker << ") retry successfull" << dendl;
+ ldout(conn->cct, 10) << "Kafka run: connection (" << broker << ") retry successful" << dendl;
}
++conn_it;
continue;
std::lock_guard lock(connections_lock);
const auto it = connections.find(broker);
- // note that ssl vs. non-ssl connection to the same host are two separate conenctions
+ // note that ssl vs. non-ssl connection to the same host are two separate connections
if (it != connections.end()) {
// connection found - return even if non-ok
ldout(cct, 20) << "Kafka connect: connection found" << dendl;
optional_yield y,
std::string& token)
{
- /* Let's check whether someone uses the deprecated "admin token" feauture
+ /* Let's check whether someone uses the deprecated "admin token" feature
* based on a shared secret from keystone.conf file. */
const auto& admin_token = config.get_admin_token();
if (! admin_token.empty()) {
* speaks in v2 disregarding the promise to go with v3. */
decode_v3(*token_iter);
- /* Identity v3 conveys the token inforamtion not as a part of JSON but
+ /* Identity v3 conveys the token information not as a part of JSON but
* in the X-Subject-Token HTTP header we're getting from caller. */
token.id = token_str;
} else {
} else if (version == rgw::keystone::ApiVersion::VER_3) {
if (! token_iter.end()) {
decode_v3(*token_iter);
- /* v3 suceeded. We have to fill token.id from external input as it
+ /* v3 succeeded. We have to fill token.id from external input as it
* isn't a part of the JSON response anymore. It has been moved
* to X-Subject-Token HTTP header instead. */
token.id = token_str;
KeyBlock *kp = static_cast<SymmetricKey *>(pld->object)->key_block;
ByteString *bp;
if (kp->key_format_type != KMIP_KEYFORMAT_RAW) {
- lderr(cct) << "get: expected raw key fromat got " << kp->key_format_type << dendl;
+ lderr(cct) << "get: expected raw key format got " << kp->key_format_type << dendl;
element.ret = -EINVAL;
goto Done;
}
SimpleBackoff shard_lock(5 /* max retries */, 50ms);
if (! shard_lock.wait_backoff(lock_lambda)) {
- ldpp_dout(this, 0) << "RGWLC::process(): failed to aquire lock on "
+ ldpp_dout(this, 0) << "RGWLC::process(): failed to acquire lock on "
<< lc_shard << " after " << shard_lock.get_retries()
<< dendl;
return 0;
/* postamble */
//bucket_lc_post(index, max_lock_secs, entry, ret, worker);
if (! shard_lock.wait_backoff(lock_lambda)) {
- ldpp_dout(this, 0) << "RGWLC::process(): failed to aquire lock on "
+ ldpp_dout(this, 0) << "RGWLC::process(): failed to acquire lock on "
<< lc_shard << " after " << shard_lock.get_retries()
<< dendl;
return 0;
void RGWLibProcess::handle_request(const DoutPrefixProvider *dpp, RGWRequest* r)
{
/*
- * invariant: valid requests are derived from RGWLibRequst
+ * invariant: valid requests are derived from RGWLibRequest
*/
RGWLibRequest* req = static_cast<RGWLibRequest*>(r);
public:
CephContext* cct;
- /* unambiguiously return req_state */
+ /* unambiguously return req_state */
inline req_state* get_state() { return this->RGWRequest::s; }
RGWLibRequest(CephContext* _cct, std::unique_ptr<rgw::sal::User> _user)
void resume(rgw::sal::Driver* _driver) override;
};
-} //namepsace rgw::lua
+} //namespace rgw::lua
namespace rgw::lua {
-// TODO - add the folowing generic functions
+// TODO - add the following generic functions
// lua_push(lua_State* L, const std::string& str)
// template<typename T> lua_push(lua_State* L, const std::optional<T>& val)
// lua_push(lua_State* L, const ceph::real_time& tp)
// following struct may be used as a base class for other MetaTable classes
// note, however, this is not mandatory to use it as a base
struct EmptyMetaTable {
- // by default everythinmg is "readonly"
+ // by default everything is "readonly"
// to change, overload this function in the derived
static int NewIndexClosure(lua_State* L) {
return luaL_error(L, "trying to write to readonly field");
}
}
- /* Swift requries checking on raw usage instead of the 4 KiB rounded one. */
+ /* Swift requires checking on raw usage instead of the 4 KiB rounded one. */
quota.check_on_raw = true;
quota.enabled = quota.max_size > 0 || quota.max_objects > 0;
const size_t start_pos = path.find_first_not_of('/');
if (std::string_view::npos != start_pos) {
- /* Seperator is the first slash after the leading ones. */
+ /* Separator is the first slash after the leading ones. */
const size_t sep_pos = path.substr(start_pos).find('/');
if (std::string_view::npos != sep_pos) {
RGWBulkUploadOp::AlignedStreamGetter::~AlignedStreamGetter()
{
- const size_t aligned_legnth = length + (-length % alignment);
+ const size_t aligned_length = length + (-length % alignment);
ceph::bufferlist junk;
- DecoratedStreamGetter::get_exactly(aligned_legnth - position, junk);
+ DecoratedStreamGetter::get_exactly(aligned_length - position, junk);
}
ssize_t RGWBulkUploadOp::AlignedStreamGetter::get_at_most(const size_t want,
return -ENAMETOOLONG;
}
- /* Similar remarks apply to the check for value size. We're veryfing
+ /* Similar remarks apply to the check for value size. We're verifying
* it early at the RGW's side as it's being claimed in /info. */
const auto max_attr_size = cct->_conf->rgw_max_attr_size;
if (max_attr_size && xattr.length() > max_attr_size) {
pcb->add_u64(l_rgw_pubsub_push_pending, "pubsub_push_pending", "Pubsub events pending reply from endpoint");
pcb->add_u64_counter(l_rgw_pubsub_missing_conf, "pubsub_missing_conf", "Pubsub events could not be handled because of missing configuration");
- pcb->add_u64_counter(l_rgw_lua_script_ok, "lua_script_ok", "Successfull executions of Lua scripts");
+ pcb->add_u64_counter(l_rgw_lua_script_ok, "lua_script_ok", "Successful executions of Lua scripts");
pcb->add_u64_counter(l_rgw_lua_script_fail, "lua_script_fail", "Failed executions of Lua scripts");
pcb->add_u64(l_rgw_lua_current_vms, "lua_current_vms", "Number of Lua VMs currently being executed");
}
/// and return an iterator to the merged history
Set::iterator merge(Set::iterator dst, Set::iterator src);
- /// construct a Cursor object using Cursor's private constuctor
+ /// construct a Cursor object using Cursor's private constructor
Cursor make_cursor(Set::const_iterator history, epoch_t epoch);
CephContext *const cct;
return ret ;
}
- // remove all auto-genrated topics
+ // remove all auto-generated topics
for (const auto& topic : bucket_topics.topics) {
const auto& topic_name = topic.first;
ret = ps.remove_topic(dpp, topic_name, y);
#define dout_prefix (*_dout << "rgw realm reloader: ")
-// safe callbacks from SafeTimer are unneccessary. reload() can take a long
+// safe callbacks from SafeTimer are unnecessary. reload() can take a long
// time, so we don't want to hold the mutex and block handle_notify() for the
// duration
static constexpr bool USE_SAFE_TIMER_CALLBACKS = false;
hostnames_set.erase(""); // filter out empty hostnames
ldout(cct, 20) << "RGW hostnames: " << hostnames_set << dendl;
/* TODO: We should have a sanity check that no hostname matches the end of
- * any other hostname, otherwise we will get ambigious results from
+ * any other hostname, otherwise we will get ambiguous results from
* rgw_find_host_in_domains.
* Eg:
* Hostnames: [A, B.A]
* Inputs: [Z.A, X.B.A]
* Z.A clearly splits to subdomain=Z, domain=Z
- * X.B.A ambigously splits to both {X, B.A} and {X.B, A}
+ * X.B.A ambiguously splits to both {X, B.A} and {X.B, A}
*/
zone_group.get_s3website_hostnames(names);
dest_uri = redirect_base;
/*
- * reqest_uri is always start with slash, so we need to remove
+ * request_uri is always start with slash, so we need to remove
* the unnecessary slash at the end of dest_uri.
*/
if (dest_uri[dest_uri.size() - 1] == '/') {
const char* const buf,
const size_t len)
{
- bool healthchk = false;
+ bool healthcheck = false;
// we dont want to limit health checks
if(s->op_type == RGW_OP_GET_HEALTH_CHECK)
- healthchk = true;
- if(len > 0 && !healthchk) {
+ healthcheck = true;
+ if(len > 0 && !healthcheck) {
const char *method = s->info.method;
s->ratelimit_data->decrease_bytes(method, s->ratelimit_user_name, len, &s->user_ratelimit);
if(!rgw::sal::Bucket::empty(s->bucket.get()))
} catch (rgw::io::Exception& e) {
return -e.code().value();
}
- bool healthchk = false;
+ bool healthcheck = false;
// we dont want to limit health checks
if(s->op_type == RGW_OP_GET_HEALTH_CHECK)
- healthchk = true;
- if(len > 0 && !healthchk) {
+ healthcheck = true;
+ if(len > 0 && !healthcheck) {
const char *method = s->info.method;
s->ratelimit_data->decrease_bytes(method, s->ratelimit_user_name, len, &s->user_ratelimit);
if(!rgw::sal::Bucket::empty(s->bucket.get()))
// create unique topic name. this has 2 reasons:
// (1) topics cannot be shared between different S3 notifications because they hold the filter information
- // (2) make topic clneaup easier, when notification is removed
+ // (2) make topic cleanup easier, when notification is removed
const auto unique_topic_name = topic_to_unique(topic_name, notif_name);
// generate the internal topic. destination is stored here for the "push-only" case
// when no subscription exists
sval = s->info.args.get("global", &exists);
if (exists) {
if (!boost::iequals(sval,"true") && !boost::iequals(sval,"false")) {
- ldpp_dout(this, 20) << "global is not equal to true or faslse" << dendl;
+ ldpp_dout(this, 20) << "global is not equal to true or false" << dendl;
op_ret = -EINVAL;
return;
}
}
kms_attr_seen = true;
} else if (!rest_only && kms_master_key_id != "") {
-ldpp_dout(s, 20) << "get_encryption_defaults: no kms_attr, but kms_master_key_id = " << kms_master_key_id << ", settig kms_attr_seen" << dendl;
+ldpp_dout(s, 20) << "get_encryption_defaults: no kms_attr, but kms_master_key_id = " << kms_master_key_id << ", setting kms_attr_seen" << dendl;
kms_attr_seen = true;
rgw_set_amz_meta_header(s->info.crypt_attribute_map, kms_attr, kms_master_key_id, OVERWRITE);
}
if (! op_ret) {
if (! lo_etag.empty()) {
- /* Handle etag of Swift API's large objects (DLO/SLO). It's entirerly
+ /* Handle etag of Swift API's large objects (DLO/SLO). It's entirely
* legit to perform GET on them through S3 API. In such situation,
* a client should receive the composited content with corresponding
* etag value. */
* What we really would like is to quaily the bucket name, so
* that the client could simply copy it and paste into next request.
* Unfortunately, in S3 we cannot know if the client will decide
- * to come through DNS, with "bucket.tenant" sytanx, or through
+ * to come through DNS, with "bucket.tenant" syntax, or through
* URL with "tenant\bucket" syntax. Therefore, we provide the
* tenant separately.
*/
s,
rgw::ARN(partition, service, "", s->user->get_tenant(), ""),
rgw::IAM::stsGetSessionToken)) {
- ldpp_dout(this, 0) << "User does not have permssion to perform GetSessionToken" << dendl;
+ ldpp_dout(this, 0) << "User does not have permission to perform GetSessionToken" << dendl;
return -EACCES;
}
const size_t start_pos = path_str.find_first_not_of('/');
if (string::npos != start_pos) {
- /* Seperator is the first slash after the leading ones. */
+ /* Separator is the first slash after the leading ones. */
const size_t sep_pos = path_str.find('/', start_pos);
if (string::npos != sep_pos) {
uint32_t length_before_processing, length_post_processing;
csv_object::csv_defintions csv;
const char* s3select_syntax_error = "s3select-Syntax-Error";
- const char* s3select_resource_id = "resourcse-id";
+ const char* s3select_resource_id = "resource-id";
const char* s3select_processTime_error = "s3select-ProcessingTime-Error";
s3select_syntax.parse_query(query);
#ifdef _ARROW_EXIST
s3selectEngine::rgw_s3select_api m_rgw_api;
#endif
- //a request for range may statisfy by several calls to send_response_date;
+ //a request for range may satisfy by several calls to send_response_date;
size_t m_request_range;
std::string requested_buffer;
std::string range_req_str;
virtual const rgw_user& get_owner() const = 0;
/** Check in the backing store if this bucket is empty */
virtual int check_empty(const DoutPrefixProvider* dpp, optional_yield y) = 0;
- /** Chec k if the given size fits within the quota */
+ /** Check if the given size fits within the quota */
virtual int check_quota(const DoutPrefixProvider *dpp, RGWQuota& quota, uint64_t obj_size, optional_yield y, bool check_size_only = false) = 0;
/** Set the attributes in attrs, leaving any other existing attrs set, and
* write them to the backing store; a merge operation */
int DBBucket::remove_objs_from_index(const DoutPrefixProvider *dpp, std::list<rgw_obj_index_key>& objs_to_unlink)
{
- /* XXX: CHECK: Unlike RadosStore, there is no seperate bucket index table.
+ /* XXX: CHECK: Unlike RadosStore, there is no separate bucket index table.
* Delete all the object in the list from the object table of this
* bucket
*/
return 0; /* nothing more to write */
}
- /* flush watever tail data is present */
+ /* flush whatever tail data is present */
int ret = parent_op.write_data(dpp, tail_part_data, tail_part_offset);
if (ret < 0) {
return ret;
return 0; /* nothing more to write */
}
- /* flush watever tail data is present */
+ /* flush whatever tail data is present */
int ret = parent_op.write_data(dpp, tail_part_data, tail_part_offset);
if (ret < 0) {
return ret;
//Session Token - Encrypt using AES
auto* cryptohandler = cct->get_crypto_handler(CEPH_CRYPTO_AES);
if (! cryptohandler) {
- ldpp_dout(dpp, 0) << "ERROR: No AES cryto handler found !" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: No AES crypto handler found !" << dendl;
return -EINVAL;
}
string secret_s = cct->_conf->rgw_sts_key;
/* TempURL case is completely different than the Keystone auth - you may
* get account name only through extraction from URL. In turn, knowledge
- * about account is neccessary to obtain its bucket tenant. Without that,
+ * about account is necessary to obtain its bucket tenant. Without that,
* the access would be limited to accounts with empty tenant. */
string bucket_tenant;
if (!s->account_name.empty()) {
struct rgw_sync_policy_group {
std::string id;
- rgw_sync_data_flow_group data_flow; /* override data flow, howver, will not be able to
+ rgw_sync_data_flow_group data_flow; /* override data flow, however, will not be able to
add new flows that don't exist at higher level */
std::vector<rgw_sync_bucket_pipes> pipes; /* if not defined then applies to all
buckets (DR sync) */
*/
uint32_t bucket_index_max_shards;
- // pre-shard buckets on creation to enable some write-parallism by default,
+ // pre-shard buckets on creation to enable some write-parallelism by default,
// delay the need to reshard as the bucket grows, and (in multisite) get some
// bucket index sharding where dynamic resharding is not supported
static constexpr uint32_t default_bucket_index_max_shards = 11;
std::vector<uint32_t> modes;
reg.get_supported_methods(CEPH_ENTITY_TYPE_MON, &methods, &modes);
- ldpp_dout(dpp, 20) << __func__ << "(): auth registy supported: methods=" << methods << " modes=" << modes << dendl;
+ ldpp_dout(dpp, 20) << __func__ << "(): auth registry supported: methods=" << methods << " modes=" << modes << dendl;
for (auto method : methods) {
if (!reg.is_secure_method(method)) {
}
cls_2pc_reservations reservations;
ASSERT_EQ(0, cls_2pc_queue_list_reservations(ioctx, queue_name, reservations));
- // 2 reservations were not comitted
+ // 2 reservations were not committed
ASSERT_EQ(reservations.size(), 2);
}
cls_2pc_queue_init(op, queue_name, max_size);
ASSERT_EQ(0, ioctx.operate(queue_name, &op));
- // anything older than 100ms is cosidered stale
+ // anything older than 100ms is considered stale
ceph::coarse_real_time stale_time = ceph::coarse_real_clock::now() + std::chrono::milliseconds(100);
std::vector<std::thread> reservers(max_workers);
cls_2pc_queue_init(op, queue_name, max_size);
ASSERT_EQ(0, ioctx.operate(queue_name, &op));
- // anything older than 100ms is cosidered stale
+ // anything older than 100ms is considered stale
ceph::coarse_real_time stale_time = ceph::coarse_real_clock::now() + std::chrono::milliseconds(100);
std::vector<std::thread> reservers(max_workers);
std::for_each(producers.begin(), producers.end(), [](auto& p) { p.join(); });
std::for_each(consumers.begin(), consumers.end(), [](auto& c) { c.join(); });
if (!retry_happened) {
- std::cerr << "Queue was never full - all reservations were sucessfull." <<
+ std::cerr << "Queue was never full - all reservations were successful." <<
"Please decrease the amount of consumer threads" << std::endl;
}
// make sure that queue is empty and no reservations remain
ASSERT_EQ(0, ioctx.operate(queue_name, &op));
// test multiple enqueues
- // 10 iterations, 100 elelemts each
+ // 10 iterations, 100 elements each
// expect 0 (OK)
test_enqueue(queue_name, 10, 100, 0);
}
cls_queue_init(op, queue_name, queue_size);
ASSERT_EQ(0, ioctx.operate(queue_name, &op));
- // 8 iterations, 5 elelemts each
+ // 8 iterations, 5 elements each
// expect 0 (OK)
test_enqueue(queue_name, 8, 5, 0);
- // 2 iterations, 5 elelemts each
+ // 2 iterations, 5 elements each
// expect -28 (Q FULL)
test_enqueue(queue_name, 2, 5, -28);
}
{ static_cast<char>(0xCF), static_cast<char>(0x8F) },
/* treble byte utf8 character */
{ static_cast<char>(0xDF), static_cast<char>(0x8F), static_cast<char>(0x8F) },
- /* quadruble byte utf8 character */
+ /* quadruple byte utf8 character */
{ static_cast<char>(0xF7), static_cast<char>(0x8F), static_cast<char>(0x8F), static_cast<char>(0x8F) },
};
"bi list test with filters should return correct truncation indicator";
}
- // test whether combined segment count is correcgt
+ // test whether combined segment count is correct
is_truncated = false;
entries.clear();
marker.clear();
int simulator::try_start(const cls_rgw_obj_key& key, const std::string& tag)
{
- // choose randomly betwen create and delete
+ // choose randomly between create and delete
const auto type = static_cast<RGWModifyOp>(
ceph::util::generate_random_number<size_t, size_t>(CLS_RGW_OP_ADD,
CLS_RGW_OP_DEL));
add_ceph_unittest(unittest_rgw_bucket_sync_cache)
target_link_libraries(unittest_rgw_bucket_sync_cache ${rgw_libs})
-#unitttest_rgw_period_history
+#unittest_rgw_period_history
add_executable(unittest_rgw_period_history test_rgw_period_history.cc)
add_ceph_unittest(unittest_rgw_period_history)
target_link_libraries(unittest_rgw_period_history ${rgw_libs})
-# unitttest_rgw_compression
+# unittest_rgw_compression
add_executable(unittest_rgw_compression
test_rgw_compression.cc
$<TARGET_OBJECTS:unit-main>)
add_ceph_unittest(unittest_rgw_compression)
target_link_libraries(unittest_rgw_compression ${rgw_libs})
-# unitttest_http_manager
+# unittest_http_manager
add_executable(unittest_http_manager test_http_manager.cc)
add_ceph_unittest(unittest_http_manager)
target_link_libraries(unittest_http_manager ${rgw_libs})
-# unitttest_rgw_reshard_wait
+# unittest_rgw_reshard_wait
add_executable(unittest_rgw_reshard_wait test_rgw_reshard_wait.cc)
add_ceph_unittest(unittest_rgw_reshard_wait)
target_link_libraries(unittest_rgw_reshard_wait ${rgw_libs})
target_include_directories(unittest_rgw_string
SYSTEM PRIVATE "${CMAKE_SOURCE_DIR}/src/rgw")
-# unitttest_rgw_dmclock_queue
+# unittest_rgw_dmclock_queue
add_executable(unittest_rgw_dmclock_scheduler test_rgw_dmclock_scheduler.cc $<TARGET_OBJECTS:unit-main>)
add_ceph_unittest(unittest_rgw_dmclock_scheduler)
target_include_directories(unittest_rgw_dmclock_scheduler
def make_request(conn, method, resource, parameters=None, sign_parameters=False, extra_parameters=None):
"""generic request sending to pubsub radogw
- should cover: topics, notificatios and subscriptions
+ should cover: topics, notifications and subscriptions
"""
url_params = ''
if parameters is not None:
receiver.channel.stop_consuming()
log.info('stopping AMQP receiver')
except Exception as error:
- log.info('failed to gracefuly stop AMQP receiver: %s', str(error))
+ log.info('failed to gracefully stop AMQP receiver: %s', str(error))
task.join(5)
receiver.consumer.unsubscribe()
receiver.consumer.close()
except Exception as error:
- log.info('failed to gracefuly stop Kafka receiver: %s', str(error))
+ log.info('failed to gracefully stop Kafka receiver: %s', str(error))
def get_ip():
def test_az_check_empty_bucket_versioning():
- """ test bucket vesioning with empty bucket """
+ """ test bucket versioning with empty bucket """
zones, az_zones = init_env()
bucket_name = gen_bucket_name()
# create bucket on the non archive zone
fox
llama
octopus
-penguine
+penguin
seal
squid
whale
```
$ nosetests test_multi.py:<specific_test_name>
```
-To run miltiple tests based on wildcard string, use the following format:
+To run multiple tests based on wildcard string, use the following format:
```
$ nosetests test_multi.py -m "<wildcard string>"
```
amqp::shutdown();
}
- // wait for at least one new (since last drain) message to be dequeueud
+ // wait for at least one new (since last drain) message to be dequeued
// and then wait for all pending answers to be received
void wait_until_drained() {
while (amqp::get_dequeued() == current_dequeued) {
// Each test has 3 strings:
// * the base URL
// * the path we want to concatenate
- // * the exepected final URL
+ // * the expected final URL
std::string tests[9][3] ={
{"", "", ""},
{"", "bar", "/bar"},
{
const std::string script = R"(
function print_owner(owner)
- print("Owner Dispaly Name: " .. owner.DisplayName)
+ print("Owner Display Name: " .. owner.DisplayName)
print("Owner Id: " .. owner.User.Id)
print("Owner Tenanet: " .. owner.User.Tenant)
end
pe.lua.background = &lua_background;
- // to make sure test is consistent we have to puase the background
+ // to make sure test is consistent we have to pause the background
lua_background.pause();
const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, request_script);
ASSERT_EQ(rc, 0);
ASSERT_STREQ(to_string(result).c_str(), expected_output);
}
-TEST(TestDecoder, MalfomedInput)
+TEST(TestDecoder, MalformedInput)
{
RGWXMLDecoder::XMLParser parser;
ASSERT_TRUE(parser.init());
g_test->send_request(string("PUT"), req,
read_bucket_object, (void *)data, (size_t)len);
if (g_test->get_resp_code() != 200U) {
- cout << "Errror sending object to the bucket, http_code " << g_test->get_resp_code();
+ cout << "Error sending object to the bucket, http_code " << g_test->get_resp_code();
return -1;
}
return 0;
req.append(obj_name);
g_test->send_request(string("GET"), req);
if (g_test->get_resp_code() != 200U) {
- cout << "Errror sending object to the bucket, http_code " << g_test->get_resp_code();
+ cout << "Error sending object to the bucket, http_code " << g_test->get_resp_code();
return -1;
}
return 0;
req.append(obj_name);
g_test->send_request(string("DELETE"), req);
if (g_test->get_resp_code() != 204U) {
- cout << "Errror deleting object from bucket, http_code " << g_test->get_resp_code();
+ cout << "Error deleting object from bucket, http_code " << g_test->get_resp_code();
return -1;
}
return 0;
p2 = RGW_CAP_WRITE;
EXPECT_TRUE (obt_info.caps.check_cap(meta_caps, p2) != 0);
- /*Version and tag infromation*/
+ /*Version and tag information*/
EXPECT_TRUE(objv1->ver > objv->ver);
EXPECT_EQ(objv1->tag, objv->tag);