The main device will have a lot of metadata, including information
that used to be stored in small files in the OSD data directory. The
-auxilliary devices (db and wal) will only have the minimum required
+auxiliary devices (db and wal) will only have the minimum required
fields (OSD UUID, size, device type, birth time).
OSD directory priming
def valgrind_post(ctx, config):
"""
After the tests run, look throught all the valgrind logs. Exceptions are raised
- if textual errors occured in the logs, or if valgrind exceptions were detected in
+ if textual errors occurred in the logs, or if valgrind exceptions were detected in
the logs.
:param ctx: Context
Mkfs mon nodes.
On exit:
- If errors occured, extract a failure message and store in ctx.summary.
+ If errors occurred, extract a failure message and store in ctx.summary.
Unmount all test files and temporary journaling files.
Save the monitor information and archive all ceph logs.
Cleanup the keyring setup, and remove all monitor map and data files left over.
self.do_thrash()
except Exception as e:
# Log exceptions here so we get the full backtrace (gevent loses them).
- # Also allow succesful completion as gevent exception handling is a broken mess:
+ # Also allow successful completion as gevent exception handling is a broken mess:
#
# 2017-02-03T14:34:01.259 CRITICAL:root: File "gevent.libev.corecext.pyx", line 367, in gevent.libev.corecext.loop.handle_error (src/gevent/libev/gevent.corecext.c:5051)
# File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/gevent/hub.py", line 558, in handle_error
bool more;
int ret = list_plain_entries(hctx, op.name, op.marker, max, &op_ret.entries, &more);
if (ret < 0) {
- CLS_LOG(0, "ERROR: %s(): list_plain_entries retured ret=%d", __func__, ret);
+ CLS_LOG(0, "ERROR: %s(): list_plain_entries returned ret=%d", __func__, ret);
return ret;
}
int count = ret;
if (!more) {
ret = list_instance_entries(hctx, op.name, op.marker, max - count, &op_ret.entries, &more);
if (ret < 0) {
- CLS_LOG(0, "ERROR: %s(): list_instance_entries retured ret=%d", __func__, ret);
+ CLS_LOG(0, "ERROR: %s(): list_instance_entries returned ret=%d", __func__, ret);
return ret;
}
if (!more) {
ret = list_olh_entries(hctx, op.name, op.marker, max - count, &op_ret.entries, &more);
if (ret < 0) {
- CLS_LOG(0, "ERROR: %s(): list_olh_entries retured ret=%d", __func__, ret);
+ CLS_LOG(0, "ERROR: %s(): list_olh_entries returned ret=%d", __func__, ret);
return ret;
}
/* The following are tunables for torrent data */
OPTION(rgw_torrent_flag, OPT_BOOL) // produce torrent function flag
-OPTION(rgw_torrent_tracker, OPT_STR) // torrent field annouce and annouce list
+OPTION(rgw_torrent_tracker, OPT_STR) // torrent field announce and announce list
OPTION(rgw_torrent_createby, OPT_STR) // torrent field created by
OPTION(rgw_torrent_comment, OPT_STR) // torrent field comment
OPTION(rgw_torrent_encoding, OPT_STR) // torrent field encoding
.set_default(0)
.set_description("HTTP return code override for object creation")
.set_long_description(
- "If not zero, this is the HTTP return code that will be returned on a succesful S3 "
+ "If not zero, this is the HTTP return code that will be returned on a successful S3 "
"object creation."),
Option("rgw_resolve_cname", Option::TYPE_BOOL, Option::LEVEL_ADVANCED)
.set_description("Length of time to aggregate metadata changes")
.set_long_description(
"Length of time (in milliseconds) in which the master zone aggregates all the "
- "metadata changes that occured, before sending notifications to all the other "
+ "metadata changes that occurred, before sending notifications to all the other "
"zones."),
Option("rgw_run_sync_thread", Option::TYPE_BOOL, Option::LEVEL_ADVANCED)
Option("rgw_torrent_tracker", Option::TYPE_STR, Option::LEVEL_ADVANCED)
.set_default("")
- .set_description("Torrent field annouce and annouce list"),
+ .set_description("Torrent field announce and announce list"),
Option("rgw_torrent_createby", Option::TYPE_STR, Option::LEVEL_ADVANCED)
.set_default("")
/*
* Cast a jlong to ceph_mount_info. Each JNI function is expected to pass in
* the class instance variable instance_ptr. Passing a parameter is faster
- * than reaching back into Java via an upcall to retreive this pointer.
+ * than reaching back into Java via an upcall to retrieve this pointer.
*/
static inline struct ceph_mount_info *get_ceph_mount(jlong j_mntp)
{
// avoid watch delay when pruning stale tags from journal objects
if (pruned) {
- ldout(m_cct, 15) << __func__ << ": reseting refetch state to immediate"
+ ldout(m_cct, 15) << __func__ << ": resetting refetch state to immediate"
<< dendl;
for (auto &player_pair : m_object_players) {
ObjectPlayerPtr object_player(player_pair.second);
*result = cls_client::dir_get_name_finish(&it, &m_image_ctx->name);
}
if (*result < 0 && *result != -ENOENT) {
- lderr(cct) << "failed to retreive name: "
+ lderr(cct) << "failed to retrieve name: "
<< cpp_strerror(*result) << dendl;
send_close_image(*result);
} else if (*result == -ENOENT) {
ldout(cct, 5) << "failed to retrieve name for image id "
<< m_image_ctx->id << dendl;
} else {
- lderr(cct) << "failed to retreive name from trash: "
+ lderr(cct) << "failed to retrieve name from trash: "
<< cpp_strerror(*result) << dendl;
}
send_close_image(*result);
&it, &m_image_ctx->object_prefix, &m_image_ctx->order, &m_image_ctx->features);
}
if (*result < 0) {
- lderr(cct) << "failed to retreive initial metadata: "
+ lderr(cct) << "failed to retrieve initial metadata: "
<< cpp_strerror(*result) << dendl;
send_close_image(*result);
return nullptr;
ldout(cct, 5) << this << " " << __func__
<< ": mirroring is not supported by OSD" << dendl;
} else {
- lderr(cct) << "failed to retreive mirror image: " << cpp_strerror(*result)
+ lderr(cct) << "failed to retrieve mirror image: " << cpp_strerror(*result)
<< dendl;
}
return m_on_finish;
}
if (*result != -ENOENT) {
- lderr(m_cct) << "failed to retreive mirror image: " << cpp_strerror(*result)
+ lderr(m_cct) << "failed to retrieve mirror image: " << cpp_strerror(*result)
<< dendl;
return m_on_finish;
}
dn->link_remote(dn->get_linkage(), in);
dout(12) << "_fetched got remote link " << ino << " which we have " << *in << dendl;
} else {
- dout(12) << "_fetched got remote link " << ino << " (dont' have it)" << dendl;
+ dout(12) << "_fetched got remote link " << ino << " (don't have it)" << dendl;
}
}
}
}
// return the sent length
- // < 0 means error occured
+ // < 0 means error occurred
static ssize_t do_sendmsg(int fd, struct msghdr &msg, unsigned len, bool more)
{
size_t sent = 0;
if (comp_mode == Compressor::COMP_NONE) {
dout(10) << __func__ << " compression mode set to 'none', "
- << "ignore other compression setttings" << dendl;
+ << "ignore other compression settings" << dendl;
return;
}
// only support one device per osd now!
assert(shared_driver_datas.empty());
- // index 0 is occured by master thread
+ // index 0 is occurred by master thread
shared_driver_datas.push_back(new SharedDriverData(shared_driver_datas.size()+1, sn_tag, c, ns));
*driver = shared_driver_datas.back();
}
return fs;
}
-void infering_bluefs_devices(vector<string>& devs, std::string& path)
+void inferring_bluefs_devices(vector<string>& devs, std::string& path)
{
- cout << "infering bluefs devices from bluestore path" << std::endl;
+ cout << "inferring bluefs devices from bluestore path" << std::endl;
for (auto fn : {"block", "block.wal", "block.db"}) {
string p = path + "/" + fn;
struct stat st;
exit(EXIT_FAILURE);
}
if (devs.empty())
- infering_bluefs_devices(devs, path);
+ inferring_bluefs_devices(devs, path);
}
if (action == "bluefs-export" || action == "bluefs-log-dump") {
if (path.empty()) {
cerr << "must specify out-dir to export bluefs" << std::endl;
exit(EXIT_FAILURE);
}
- infering_bluefs_devices(devs, path);
+ inferring_bluefs_devices(devs, path);
}
if (action == "bluefs-bdev-sizes" || action == "bluefs-bdev-expand") {
if (path.empty()) {
cerr << "must specify bluestore path" << std::endl;
exit(EXIT_FAILURE);
}
- infering_bluefs_devices(devs, path);
+ inferring_bluefs_devices(devs, path);
}
vector<const char*> args;
@requires(('write_op', WriteOp), ('oid', str_type), ('mtime', opt(int)), ('flags', opt(int)))
def operate_write_op(self, write_op, oid, mtime=0, flags=LIBRADOS_OPERATION_NOFLAG):
"""
- excute the real write operation
+ execute the real write operation
:para write_op: write operation object
:type write_op: WriteOp
:para oid: object name
@requires(('write_op', WriteOp), ('oid', str_type), ('oncomplete', opt(Callable)), ('onsafe', opt(Callable)), ('mtime', opt(int)), ('flags', opt(int)))
def operate_aio_write_op(self, write_op, oid, oncomplete=None, onsafe=None, mtime=0, flags=LIBRADOS_OPERATION_NOFLAG):
"""
- excute the real write operation asynchronously
+ execute the real write operation asynchronously
:para write_op: write operation object
:type write_op: WriteOp
:para oid: object name
@requires(('read_op', ReadOp), ('oid', str_type), ('flag', opt(int)))
def operate_read_op(self, read_op, oid, flag=LIBRADOS_OPERATION_NOFLAG):
"""
- excute the real read operation
+ execute the real read operation
:para read_op: read operation object
:type read_op: ReadOp
:para oid: object name
@requires(('read_op', ReadOp), ('oid', str_type), ('oncomplete', opt(Callable)), ('onsafe', opt(Callable)), ('flag', opt(int)))
def operate_aio_read_op(self, read_op, oid, oncomplete=None, onsafe=None, flag=LIBRADOS_OPERATION_NOFLAG):
"""
- excute the real read operation
+ execute the real read operation
:para read_op: read operation object
:type read_op: ReadOp
:para oid: object name
def aio_flush(self, oncomplete):
"""
- Asyncronously wait until all writes are fully flushed if caching is
+ Asynchronously wait until all writes are fully flushed if caching is
enabled.
"""
t.write(cid, hoid, 0, 0x30000, bl);
t.zero(cid, hoid, 0, 0x10000);
t.zero(cid, hoid, 0x20000, 0x10000);
- cerr << "Rewrite an object and create two holes at the begining and the end" << std::endl;
+ cerr << "Rewrite an object and create two holes at the beginning and the end" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
("version,v", po::value<unsigned>(&v),
"map version to obtain")
("readable,r", po::value<bool>(&readable)->default_value(false),
- "print the map infomation in human readable format")
+ "print the map information in human readable format")
;
// this is going to be a positional argument; we don't want to show
// it as an option during --help, but we do want to have it captured
target.unset_feature(f.feature);
}
} else {
- cerr << "unknow feature operation type '" << f.op << "'" << std::endl;
+ cerr << "unknown feature operation type '" << f.op << "'" << std::endl;
}
}
return modified;
return false;
}
if (r != entry_size) {
- std::cerr << "rbd: error reading from stdin: trucated"
+ std::cerr << "rbd: error reading from stdin: truncated"
<< std::endl;
r = -EINVAL;
return false;
r = krbd_is_mapped(krbd, poolname, imgname, snapname, &devnode);
if (r < 0) {
- std::cerr << "rbd: warning: can't get image map infomation: "
+ std::cerr << "rbd: warning: can't get image map information: "
<< cpp_strerror(r) << std::endl;
} else if (r > 0) {
std::cerr << "rbd: warning: image already mapped as " << devnode
os << " TRIM ";
break;
default:
- os << " UNKNOW(" << ctx.command << ") ";
+ os << " UNKNOWN(" << ctx.command << ") ";
break;
}