public:
/// Initialize new histogram object
PerfHistogram(std::initializer_list<axis_config_d> axes_config) {
- assert(axes_config.size() == DIM &&
+ ceph_assert(axes_config.size() == DIM &&
"Invalid number of axis configuration objects");
int i = 0;
for (const auto &ac : axes_config) {
- assert(ac.m_buckets > 0 && "Must have at least one bucket on axis");
- assert(ac.m_quant_size > 0 &&
+ ceph_assert(ac.m_buckets > 0 && "Must have at least one bucket on axis");
+ ceph_assert(ac.m_quant_size > 0 &&
"Quantization unit must be non-zero positive integer value");
m_axes_config[i++] = ac;
static_assert(sizeof...(T) == DIM, "Incorrect number of arguments");
return get_raw_index_internal<0>(
[](int64_t bucket, const axis_config_d &ac) {
- assert(bucket >= 0 && "Bucket index can not be negative");
- assert(bucket < ac.m_buckets && "Bucket index too large");
+ ceph_assert(bucket >= 0 && "Bucket index can not be negative");
+ ceph_assert(bucket < ac.m_buckets && "Bucket index too large");
return bucket;
},
0, buckets...);
int RadosDump::get_footer(footer *f)
{
- assert(f != NULL);
+ ceph_assert(f != NULL);
bufferlist ebl;
auto ebliter = ebl.cbegin();
superbl.clear();
sh.encode(superbl);
- assert(super_header::FIXED_LENGTH == superbl.length());
+ ceph_assert(super_header::FIXED_LENGTH == superbl.length());
superbl.write_fd(file_fd);
}
} else {
if (!inode->dentries.empty()) {
Dentry *dn = *(inode->dentries.begin());
- assert(dn->dir);
- assert(dn->dir->parent_inode);
+ ceph_assert(dn->dir);
+ ceph_assert(dn->dir->parent_inode);
r = lookup_trace(client, dn->dir->parent_inode->ino);
if (r) {
return r;
}
} else {
// We reached the root of the tree
- assert(inode->ino == CEPH_INO_ROOT);
+ ceph_assert(inode->ino == CEPH_INO_ROOT);
}
}
}
bool exists(const string &prefix) {
- assert(!prefix.empty());
+ ceph_assert(!prefix.empty());
KeyValueDB::WholeSpaceIterator iter = db->get_wholespace_iterator();
iter->seek_to_first(prefix);
return (iter->valid() && (iter->raw_key().first == prefix));
}
bool exists(const string &prefix, const string &key) {
- assert(!prefix.empty());
+ ceph_assert(!prefix.empty());
if (key.empty()) {
return exists(prefix);
}
bufferlist get(const string &prefix, const string &key, bool &exists) {
- assert(!prefix.empty() && !key.empty());
+ ceph_assert(!prefix.empty() && !key.empty());
map<string,bufferlist> result;
std::set<std::string> keys;
}
bool set(const string &prefix, const string &key, bufferlist &val) {
- assert(!prefix.empty());
- assert(!key.empty());
- assert(val.length() > 0);
+ ceph_assert(!prefix.empty());
+ ceph_assert(!key.empty());
+ ceph_assert(val.length() > 0);
KeyValueDB::Transaction tx = db->get_transaction();
tx->set(prefix, key, val);
}
bool rm(const string& prefix, const string& key) {
- assert(!prefix.empty());
- assert(!key.empty());
+ ceph_assert(!prefix.empty());
+ ceph_assert(!key.empty());
KeyValueDB::Transaction tx = db->get_transaction();
tx->rmkey(prefix, key);
}
bool rm_prefix(const string& prefix) {
- assert(!prefix.empty());
+ ceph_assert(!prefix.empty());
KeyValueDB::Transaction tx = db->get_transaction();
tx->rmkeys_by_prefix(prefix);
return fd != -1;
}
MonitorDBStore::TransactionRef cur() {
- assert(valid());
+ ceph_assert(valid());
return t;
}
unsigned num() { return idx; }
// and that's what 'desc_all' is all about.
//
- assert(desc != NULL);
+ ceph_assert(desc != NULL);
po::options_description desc_all;
desc_all.add(*desc);
fullmap.encode(inc.fullmap);
}
}
- assert(osdmap.have_crc());
+ ceph_assert(osdmap.have_crc());
inc.full_crc = osdmap.get_crc();
bl.clear();
// be consistent with OSDMonitor::update_from_paxos()
// (good_version, last_committed]
// with the good crush map.
// XXX: may need to break this into several paxos versions?
- assert(good_version < last_committed);
+ ceph_assert(good_version < last_committed);
for (version_t v = good_version + 1; v <= last_committed; v++) {
cout << "rewriting epoch #" << v << "/" << last_committed << std::endl;
r = update_osdmap(store, v, false, crush, t);
total += bytes;
} while(true);
- assert(bl.length() == total);
+ ceph_assert(bl.length() == total);
return 0;
}
return -ENOENT;
}
ostringstream oss;
- assert(struct_ver > 0);
+ ceph_assert(struct_ver > 0);
PGLog::read_log_and_missing(
fs, ch,
pgid.make_pgmeta_oid(),
cerr << __func__ << " error on read_info " << cpp_strerror(r) << std::endl;
return r;
}
- assert(struct_v >= 8);
+ ceph_assert(struct_v >= 8);
// new omap key
cout << "setting '_remove' omap key" << std::endl;
map<string,bufferlist> values;
map<string,bufferlist> km;
if (!divergent.empty()) {
- assert(missing.get_items().empty());
+ ceph_assert(missing.get_items().empty());
PGLog::write_log_and_missing_wo_missing(
t, &km, log, coll, info.pgid.make_pgmeta_oid(), divergent, true);
} else {
struct stat st;
auto ch = store->open_collection(coll);
int r = store->stat(ch, oid, &st);
- assert(r == 0);
- assert(st.st_size == 0);
+ ceph_assert(r == 0);
+ ceph_assert(st.st_size == 0);
cerr << "Log bounds are: " << "(" << info.log_tail << ","
<< info.last_update << "]" << std::endl;
return 0;
}
- assert(info.last_update.version > max_entries);
+ ceph_assert(info.last_update.version > max_entries);
version_t trim_to = info.last_update.version - max_entries;
size_t trim_at_once = g_ceph_context->_conf->osd_pg_log_trim_max;
eversion_t new_tail;
for (vector<ghobject_t>::iterator i = objects.begin();
i != objects.end();
++i) {
- assert(!i->hobj.is_meta());
+ ceph_assert(!i->hobj.is_meta());
if (i->is_pgmeta() || i->hobj.is_temp() || !i->is_no_gen()) {
continue;
}
if (debug) {
Formatter *formatter = Formatter::create("json-pretty");
- assert(formatter);
+ ceph_assert(formatter);
dump_log(formatter, cerr, log, missing);
delete formatter;
}
cerr << "\tsetting " << clone.hobj << " snaps " << snaps
<< std::endl;
OSDriver::OSTransaction _t(driver.get_transaction(t));
- assert(!snaps.empty());
+ ceph_assert(!snaps.empty());
snap_mapper.add_oid(clone.hobj, snaps, &_t);
}
} else {
cerr << "ERROR: Export contains temporary object '" << ob.hoid << "'" << std::endl;
return -EFAULT;
}
- assert(g_ceph_context);
+ ceph_assert(g_ceph_context);
auto ch = store->open_collection(coll);
if (ob.hoid.hobj.nspace != g_ceph_context->_conf->osd_hit_set_namespace) {
bool ok = user_pgid.parse(pgidstr.c_str());
// This succeeded in main() already
- assert(ok);
+ ceph_assert(ok);
if (pgid != user_pgid) {
if (pgid.pool() != user_pgid.pool()) {
cerr << "Can't specify a different pgid pool, must be " << pgid.pool() << std::endl;
ms.missing.filter_objects([&](const hobject_t &obj) {
if (obj.nspace == g_ceph_context->_conf->osd_hit_set_namespace)
return false;
- assert(!obj.is_temp());
+ ceph_assert(!obj.is_temp());
object_t oid = obj.oid;
object_locator_t loc(obj);
pg_t raw_pgid = curmap.object_locator_to_pg(oid, loc);
return -ENOENT;
}
- assert(out.size() == 1);
+ ceph_assert(out.size() == 1);
bufferlist bl = out.begin()->second;
string value(bl.c_str(), bl.length());
throw std::runtime_error(ss.str());
}
vector<json_spirit::Value>::iterator i = array.begin();
- assert(i != array.end());
+ ceph_assert(i != array.end());
if (i->type() != json_spirit::str_type) {
ss << "Object '" << object
<< "' must be a JSON array with the first element a string";
if (op == "export-remove") {
ret = initiate_new_remove_pg(fs, pgid);
// Export succeeded, so pgid is there
- assert(ret == 0);
+ ceph_assert(ret == 0);
cerr << "Remove successful" << std::endl;
}
}
cout << "Finished trimming pg log" << std::endl;
goto out;
} else {
- assert(!"Should have already checked for valid --op");
+ ceph_assert(!"Should have already checked for valid --op");
}
} else {
cerr << "PG '" << pgid << "' not found" << std::endl;
}
}
auto fs = fsmap->get_filesystem(fscid);
- assert(fs != nullptr);
+ ceph_assert(fs != nullptr);
// Default to output to metadata pool
if (driver == NULL) {
}
} else if (untagged_only) {
- assert(obj_name_offset == 0);
+ ceph_assert(obj_name_offset == 0);
dout(20) << "OSD matched oid " << oid << dendl;
}
continue;
} else {
// parse_oid can only do 0 or -EINVAL
- assert(r == 0);
+ ceph_assert(r == 0);
}
if (!valid_ino(dir_ino)) {
inodeno_t ino, frag_t frag, fnode_t *fnode,
uint64_t *last_version)
{
- assert(fnode != NULL);
+ ceph_assert(fnode != NULL);
object_t frag_oid = InodeStore::get_object_name(ino, frag, "");
bufferlist fnode_bl;
int MetadataTool::read_dentry(inodeno_t parent_ino, frag_t frag,
const std::string &dname, InodeStore *inode)
{
- assert(inode != NULL);
+ ceph_assert(inode != NULL);
std::string key;
frag_t fragment,
bool *created)
{
- assert(created != NULL);
+ ceph_assert(created != NULL);
fnode_t existing_fnode;
*created = false;
librados::ObjectWriteOperation op;
if (read_version) {
- assert(r == -EINVAL);
+ ceph_assert(r == -EINVAL);
// Case A: We must assert that the version isn't changed since we saw the object
// was unreadable, to avoid the possibility of two data-scan processes
// both creating the frag.
op.assert_version(read_version);
} else {
- assert(r == -ENOENT);
+ ceph_assert(r == -ENOENT);
// Case B: The object didn't exist in read_fnode, so while creating it we must
// use an exclusive create to correctly populate *creating with
// whether we created it ourselves or someone beat us to it.
{
if (metadata_pool_name.empty()) {
auto fs = fsmap->get_filesystem(fscid);
- assert(fs != nullptr);
+ ceph_assert(fs != nullptr);
int64_t const metadata_pool_id = fs->mds_map.get_metadata_pool();
dout(4) << "resolving metadata pool " << metadata_pool_id << dendl;
inodeno_t ino, uint64_t file_size, time_t file_mtime,
const file_layout_t &layout, InodeStore *out)
{
- assert(out != NULL);
+ ceph_assert(out != NULL);
out->inode.mode = 0500 | S_IFREG;
out->inode.size = file_size;
inodeno_t ino, const frag_info_t &fragstat,
const file_layout_t &layout, InodeStore *out)
{
- assert(out != NULL);
+ ceph_assert(out != NULL);
out->inode.mode = 0755 | S_IFDIR;
out->inode.dirstat = fragstat;
}
auto fs = fsmap->get_filesystem(role.fscid);
- assert(fs != nullptr);
+ ceph_assert(fs != nullptr);
if (type == "mdlog") {
JournalPointer jp(role.rank, fs->mds_map.get_metadata_pool());
int r = 0;
auto fs = fsmap->get_filesystem(role.fscid);
- assert(fs != nullptr);
+ ceph_assert(fs != nullptr);
Journaler journaler("dumper", ino, fs->mds_map.get_metadata_pool(),
CEPH_FS_ONDISK_MAGIC, objecter, 0, 0,
cout << "undump " << dump_file << std::endl;
auto fs = fsmap->get_filesystem(role.fscid);
- assert(fs != nullptr);
+ ceph_assert(fs != nullptr);
int r = 0;
// try get layout info from cluster
* prezeroing behaviour */
{
uint32_t const object_size = h.layout.object_size;
- assert(object_size > 0);
+ ceph_assert(object_size > 0);
uint64_t last_obj = h.write_pos / object_size;
uint64_t purge_count = 2;
/* When the length is zero, the last_obj should be zeroed
}
auto fs = fsmap->get_filesystem(role_selector.get_ns());
- assert(fs != nullptr);
+ ceph_assert(fs != nullptr);
int64_t const pool_id = fs->mds_map.get_metadata_pool();
dout(4) << "JournalTool: resolving pool " << pool_id << dendl;
std::string pool_name;
dout(4) << "JournalTool: creating IoCtx.." << dendl;
r = rados.ioctx_create(pool_name.c_str(), input);
- assert(r == 0);
+ ceph_assert(r == 0);
output.dup(input);
// Execution
derr << "Header could not be read!" << dendl;
return -ENOENT;
} else {
- assert(js.header != NULL);
+ ceph_assert(js.header != NULL);
}
if (argv.size() == 0) {
std::string const value_str = *arg;
arg = argv.erase(arg);
- assert(argv.empty());
+ ceph_assert(argv.empty());
std::string parse_err;
uint64_t new_val = strict_strtoll(value_str.c_str(), 0, &parse_err);
nullptr)) {
dout(1) << "Using alternate pool " << arg_str << dendl;
int r = rados.ioctx_create(arg_str.c_str(), output);
- assert(r == 0);
+ ceph_assert(r == 0);
other_pool = true;
} else {
cerr << "Unknown argument: '" << *arg << "'" << std::endl;
bool const dry_run,
std::set<inodeno_t> *consumed_inos)
{
- assert(consumed_inos != NULL);
+ ceph_assert(consumed_inos != NULL);
int r = 0;
stream.write(entry, &log_data, pos);
dout(4) << "erase_region data length " << log_data.length() << dendl;
- assert(log_data.length() == length);
+ ceph_assert(log_data.length() == length);
// Write log stream region to RADOS
// FIXME: get object size somewhere common to scan_events
const bool bare,
bufferlist *out_bl)
{
- assert(out_bl != NULL);
+ ceph_assert(out_bl != NULL);
// Compose InodeStore
InodeStore new_inode;
delete monc;
delete messenger;
delete fsmap;
- assert(waiting_for_mds_map == NULL);
+ ceph_assert(waiting_for_mds_map == NULL);
}
Mutex init_lock("MDSUtility:init");
Cond cond;
bool done = false;
- assert(!fsmap->get_epoch());
+ ceph_assert(!fsmap->get_epoch());
lock.Lock();
waiting_for_mds_map = new C_SafeCond(&init_lock, &cond, &done, NULL);
lock.Unlock();
void PgFiles::hit_file(std::string const &path, const struct ceph_statx &stx)
{
- assert(S_ISREG(stx.stx_mode));
+ ceph_assert(S_ISREG(stx.stx_mode));
dout(20) << "Hitting file '" << path << "'" << dendl;
}
auto fs = fsmap->get_filesystem(role.fscid);
- assert(nullptr != fs);
+ ceph_assert(nullptr != fs);
is_mdlog = false;
if (type == "mdlog") {
int r;
auto fs = fsmap->get_filesystem(role.fscid);
- assert(fs != nullptr);
+ ceph_assert(fs != nullptr);
Journaler journaler("resetter", ino,
fs->mds_map.get_metadata_pool(),
int Resetter::_write_reset_event(Journaler *journaler)
{
- assert(journaler != NULL);
+ ceph_assert(journaler != NULL);
LogEvent *le = new EResetJournal;
*/
int TableTool::apply_role_fn(std::function<int(mds_role_t, Formatter *)> fptr, Formatter *f)
{
- assert(f != NULL);
+ ceph_assert(f != NULL);
int r = 0;
int load_and_dump(librados::IoCtx *io, Formatter *f)
{
- assert(io != NULL);
- assert(f != NULL);
+ ceph_assert(io != NULL);
+ ceph_assert(f != NULL);
// Attempt read
bufferlist table_bl;
int load_and_dump(librados::IoCtx *io, Formatter *f)
{
- assert(io != NULL);
- assert(f != NULL);
+ ceph_assert(io != NULL);
+ ceph_assert(f != NULL);
// Read in the header
bufferlist header_bl;
}
auto fs = fsmap->get_filesystem(role_selector.get_ns());
- assert(fs != nullptr);
+ ceph_assert(fs != nullptr);
int64_t const pool_id = fs->mds_map.get_metadata_pool();
dout(4) << "resolving pool " << pool_id << dendl;
std::string pool_name;
total += bytes;
} while(true);
- assert(bl.length() == total);
+ ceph_assert(bl.length() == total);
return 0;
}
return 0;
}
int ruleno = crush.get_rule_id(rule_name);
- assert(ruleno >= 0);
+ ceph_assert(ruleno >= 0);
int r = crush.remove_rule(ruleno);
if (r < 0) {
cerr << "fail to remove rule " << rule_name << std::endl;
if (r > 0) {
print_inc_upmaps(pending_inc, upmap_fd);
r = osdmap.apply_incremental(pending_inc);
- assert(r == 0);
+ ceph_assert(r == 0);
}
}
if (upmap) {
print_inc_upmaps(pending_inc, upmap_fd);
if (upmap_save) {
int r = osdmap.apply_incremental(pending_inc);
- assert(r == 0);
+ ceph_assert(r == 0);
modified = true;
}
} else {
*/
int PoolDump::dump(IoCtx *io_ctx)
{
- assert(io_ctx != NULL);
+ ceph_assert(io_ctx != NULL);
int r = 0;
write_super();
omap_hdr_section oh;
omap_section os;
- assert(g_ceph_context);
+ ceph_assert(g_ceph_context);
if (ob.hoid.hobj.nspace == g_ceph_context->_conf->osd_hit_set_namespace) {
cout << "Skipping internal object " << ob.hoid << std::endl;
skip_object(bl);
<< std::endl;
return ret;
}
- assert(alignment != 0);
+ ceph_assert(alignment != 0);
}
}
cerr << "Discontiguous object data in export" << std::endl;
return -EFAULT;
}
- assert(ds.databl.length() == ds.len);
+ ceph_assert(ds.databl.length() == ds.len);
databl.claim_append(ds.databl);
in_offset += ds.len;
if (databl.length() >= alignment) {
out_offset += rndlen;
bufferlist n;
if (databl.length() > rndlen) {
- assert(databl.length() - rndlen < alignment);
+ ceph_assert(databl.length() - rndlen < alignment);
n.substr_of(databl, rndlen, databl.length() - rndlen);
}
databl = n;
case TYPE_OBJECT_END:
done = true;
if (need_align && databl.length() > 0) {
- assert(databl.length() < alignment);
+ ceph_assert(databl.length() < alignment);
dout(10) << "END write offset=" << out_offset << " len=" << databl.length() << dendl;
if (dry_run || skipping)
break;
|| shard.has_obj_size_info_mismatch()) &&
!shard.has_info_missing()) {
map<std::string, ceph::bufferlist>::iterator k = (const_cast<shard_info_t&>(shard)).attrs.find(OI_ATTR);
- assert(k != shard.attrs.end()); // Can't be missing
+ ceph_assert(k != shard.attrs.end()); // Can't be missing
if (!shard.has_info_corrupted()) {
object_info_t oi;
bufferlist bl;
|| inc.has_snapset_inconsistency()) &&
!shard.has_snapset_missing()) {
map<std::string, ceph::bufferlist>::iterator k = (const_cast<shard_info_t&>(shard)).attrs.find(SS_ATTR);
- assert(k != shard.attrs.end()); // Can't be missing
+ ceph_assert(k != shard.attrs.end()); // Can't be missing
if (!shard.has_snapset_corrupted()) {
SnapSet ss;
bufferlist bl;
|| inc.has_hinfo_inconsistency()) &&
!shard.has_hinfo_missing()) {
map<std::string, ceph::bufferlist>::iterator k = (const_cast<shard_info_t&>(shard)).attrs.find(ECUtil::get_hinfo_key());
- assert(k != shard.attrs.end()); // Can't be missing
+ ceph_assert(k != shard.attrs.end()); // Can't be missing
if (!shard.has_hinfo_corrupted()) {
ECUtil::HashInfo hi;
bufferlist bl;
object_info_t oi;
bufferlist bl;
auto k = shard.attrs.find(OI_ATTR);
- assert(k != shard.attrs.end()); // Can't be missing
+ ceph_assert(k != shard.attrs.end()); // Can't be missing
auto bliter = k->second.cbegin();
decode(oi, bliter); // Can't be corrupted
f.open_object_section("selected_object_info");
}
// It must be the same interval every time. EAGAIN would
// occur if interval changes.
- assert(start.name.empty() || first_interval == interval);
+ ceph_assert(start.name.empty() || first_interval == interval);
if (start.name.empty()) {
first_interval = interval;
formatter.open_object_section("info");
cout << "selected snap " << snapid << " '" << name << "'" << std::endl;
}
- assert(!nargs.empty());
+ ceph_assert(!nargs.empty());
// list pools?
if (strcmp(nargs[0], "lspools") == 0) {
if (format_specified) {
int r = g_conf().set_val("rbd_default_format", stringify(format));
- assert(r == 0);
+ ceph_assert(r == 0);
opts->set(RBD_IMAGE_OPTION_FORMAT, format);
}
}
opts->set(RBD_IMAGE_OPTION_JOURNAL_ORDER, order);
int r = g_conf().set_val("rbd_journal_order", stringify(order));
- assert(r == 0);
+ ceph_assert(r == 0);
}
if (vm.count(at::JOURNAL_SPLAY_WIDTH)) {
opts->set(RBD_IMAGE_OPTION_JOURNAL_SPLAY_WIDTH,
int r = g_conf().set_val("rbd_journal_splay_width",
stringify(
vm[at::JOURNAL_SPLAY_WIDTH].as<uint64_t>()));
- assert(r == 0);
+ ceph_assert(r == 0);
}
if (vm.count(at::JOURNAL_POOL)) {
opts->set(RBD_IMAGE_OPTION_JOURNAL_POOL,
int r = g_conf().set_val("rbd_journal_pool",
vm[at::JOURNAL_POOL].as<std::string>());
- assert(r == 0);
+ ceph_assert(r == 0);
}
return 0;
bool *zeroed) {
if (sparse_size == 0) {
// sparse writes are disabled -- write the full extent
- assert(buffer_offset == 0);
+ ceph_assert(buffer_offset == 0);
*write_length = buffer_length;
*zeroed = false;
return;
if (original_offset == buffer_offset) {
*zeroed = extent_is_zero;
} else if (*zeroed != extent_is_zero) {
- assert(*write_length > 0);
+ ceph_assert(*write_length > 0);
return;
}
{
// If error cases occur, the code is aborted, because
// constructor cannot return error value.
- assert(g_ceph_context != nullptr);
+ ceph_assert(g_ceph_context != nullptr);
bl.append_zero(block_size);
librbd::image_info_t info;
int r = image->stat(info, sizeof(info));
- assert(r >= 0);
+ ceph_assert(r >= 0);
uint64_t order;
if (info.order == 0) {
order = g_conf().get_val<int64_t>("rbd_default_order");
// To prevent writesame from discarding data, thick_write sets
// the rbd_discard_on_zeroed_write_same option to false.
- assert(g_ceph_context != nullptr);
+ ceph_assert(g_ceph_context != nullptr);
r = g_conf().set_val("rbd_discard_on_zeroed_write_same", "false");
- assert(r == 0);
+ ceph_assert(r == 0);
r = utils::open_image(io_ctx, image_name, false, &image);
if (r < 0) {
return r;
#include <boost/program_options.hpp>
+#include "include/assert.h"
+
namespace rbd {
namespace action {
case DEVICE_TYPE_NBD:
return &nbd_operations;
default:
- assert(0);
+ ceph_assert(0);
return nullptr;
}
}
om->f->dump_string("exists", exists ? "true" : "false");
om->f->close_section();
} else {
- assert(om->t);
+ ceph_assert(om->t);
*(om->t) << ofs << len << (exists ? "data" : "zero") << TextTable::endrow;
}
return 0;
return;
}
- assert(m_bufferlist.length() == static_cast<size_t>(r));
+ ceph_assert(m_bufferlist.length() == static_cast<size_t>(r));
if (m_fd != STDOUT_FILENO) {
if (m_bufferlist.is_zero()) {
return;
bool zeroed = false;
utils::calc_sparse_extent(bp, sparse_size, buffer_offset, buffer_length,
&write_length, &zeroed);
- assert(write_length > 0);
+ ceph_assert(write_length > 0);
bufferlist write_bl;
if (!zeroed) {
bufferptr write_ptr(bp, buffer_offset, write_length);
write_bl.push_back(write_ptr);
- assert(write_bl.length() == write_length);
+ ceph_assert(write_bl.length() == write_length);
}
C_ImportDiff *ctx = new C_ImportDiff(idiffctx, write_bl,
bufferlist write_bl;
bufferptr write_ptr(blkptr, buffer_offset, write_length);
write_bl.push_back(write_ptr);
- assert(write_bl.length() == write_length);
+ ceph_assert(write_bl.length() == write_length);
C_Import *ctx = new C_Import(*throttle, image, write_bl,
image_pos + buffer_offset);
utils::ProgressContext pc("Importing image", no_progress);
std::map<std::string, std::string> imagemetas;
- assert(imgname);
+ ceph_assert(imgname);
uint64_t order;
if (opts.get(RBD_IMAGE_OPTION_ORDER, &order) != 0) {
<< std::endl;
goto done;
}
- assert(bdev_size >= 0);
+ ceph_assert(bdev_size >= 0);
size = (uint64_t) bdev_size;
}
#ifdef HAVE_POSIX_FADVISE
<< std::endl;
return false;
}
- assert(entry_size > 0);
+ ceph_assert(entry_size > 0);
// Read entry.
r = bl.read_fd(m_fd, entry_size);
if (r < 0) {
continue;
}
}
- assert(f_off >= s_off);
+ ceph_assert(f_off >= s_off);
if (f_off < s_off + s_len && f_len) {
uint64_t delta = s_off + s_len - f_off;
continue;
}
}
- assert(f_off >= s_off + s_len);
+ ceph_assert(f_off >= s_off + s_len);
if (s_len) {
r = accept_diff_body(sd, pd, s_tag, s_off, s_len);
if (r < 0) {
s_len = 0;
s_tag = 0;
} else {
- assert(f_end && s_end);
+ ceph_assert(f_end && s_end);
}
continue;
}
if (ggate_drv_req_cmd(req->req) == GGATE_DRV_CMD_READ &&
ggate_drv_req_error(req->req) == 0) {
- assert(req->bl.length() == ggate_drv_req_length(req->req));
+ ceph_assert(req->bl.length() == ggate_drv_req_length(req->req));
// TODO: avoid copying?
req->bl.copy(0, ggate_drv_req_length(req->req),
static_cast<char *>(ggate_drv_req_buf(req->req)));
dout(10) << dendl;
int r = start();
- assert(r == 0);
+ ceph_assert(r == 0);
dout(20) << "entering run loop" << dendl;
{
Mutex::Locker locker(m_lock);
- assert(m_stopping);
+ ceph_assert(m_stopping);
}
m_reader_thread.join();
dout(20) << ctx << dendl;
Mutex::Locker locker(m_lock);
- assert(ctx->item.is_on_list());
+ ceph_assert(ctx->item.is_on_list());
ctx->item.remove_myself();
m_io_finished.push_back(&ctx->item);
void Server::wait_clean() {
dout(20) << dendl;
- assert(!m_reader_thread.is_started());
+ ceph_assert(!m_reader_thread.is_started());
Mutex::Locker locker(m_lock);
{
derr << "*** Got signal " << sig_str(signum) << " ***" << dendl;
- assert(signum == SIGINT || signum == SIGTERM);
- assert(drv);
+ ceph_assert(signum == SIGINT || signum == SIGTERM);
+ ceph_assert(drv);
drv->shut_down();
}
shutdown_async_signal_handler();
r = image.update_unwatch(handle);
- assert(r == 0);
+ ceph_assert(r == 0);
done:
image.close();
const ClusterWatcher::PoolPeers& ClusterWatcher::get_pool_peers() const
{
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
return m_pool_peers;
}
bool call(std::string_view command, const cmdmap_t& cmdmap,
std::string_view format, bufferlist& out) override {
Commands::const_iterator i = commands.find(command);
- assert(i != commands.end());
+ ceph_assert(i != commands.end());
Formatter *f = Formatter::create(format);
stringstream ss;
bool r = i->second->call(f, &ss);
template <typename I>
void ImageDeleter<I>::shut_down_trash_watcher(Context* on_finish) {
dout(10) << dendl;
- assert(m_trash_watcher);
+ ceph_assert(m_trash_watcher);
auto ctx = new FunctionContext([this, on_finish](int r) {
delete m_trash_watcher;
m_trash_watcher = nullptr;
{
Mutex::Locker locker(m_lock);
// wake up any external state machines waiting on deletions
- assert(m_in_flight_delete_queue.empty());
+ ceph_assert(m_in_flight_delete_queue.empty());
for (auto& queue : {&m_delete_queue, &m_retry_delete_queue}) {
for (auto& info : *queue) {
notify_on_delete(info->image_id, -ECANCELED);
template <typename I>
typename ImageDeleter<I>::DeleteInfoRef
ImageDeleter<I>::find_delete_info(const std::string &image_id) {
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
DeleteQueue delete_queues[] = {m_in_flight_delete_queue,
m_retry_delete_queue,
m_delete_queue};
DeleteInfoRef delete_info = m_delete_queue.front();
m_delete_queue.pop_front();
- assert(delete_info);
+ ceph_assert(delete_info);
remove_image(delete_info);
}
}
template <typename I>
void ImageDeleter<I>::remove_image(DeleteInfoRef delete_info) {
dout(10) << "info=" << *delete_info << dendl;
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
m_in_flight_delete_queue.push_back(delete_info);
m_async_op_tracker.start_op();
{
Mutex::Locker locker(m_lock);
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
auto it = std::find(m_in_flight_delete_queue.begin(),
m_in_flight_delete_queue.end(), delete_info);
- assert(it != m_in_flight_delete_queue.end());
+ ceph_assert(it != m_in_flight_delete_queue.end());
m_in_flight_delete_queue.erase(it);
}
template <typename I>
void ImageDeleter<I>::schedule_retry_timer() {
- assert(m_threads->timer_lock.is_locked());
- assert(m_lock.is_locked());
+ ceph_assert(m_threads->timer_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
if (!m_running || m_timer_ctx != nullptr || m_retry_delete_queue.empty()) {
return;
}
template <typename I>
void ImageDeleter<I>::cancel_retry_timer() {
dout(10) << dendl;
- assert(m_threads->timer_lock.is_locked());
+ ceph_assert(m_threads->timer_lock.is_locked());
if (m_timer_ctx != nullptr) {
bool canceled = m_threads->timer->cancel_event(m_timer_ctx);
m_timer_ctx = nullptr;
- assert(canceled);
+ ceph_assert(canceled);
}
}
template <typename I>
void ImageDeleter<I>::handle_retry_timer() {
dout(10) << dendl;
- assert(m_threads->timer_lock.is_locked());
+ ceph_assert(m_threads->timer_lock.is_locked());
Mutex::Locker locker(m_lock);
- assert(m_timer_ctx != nullptr);
+ ceph_assert(m_timer_ctx != nullptr);
m_timer_ctx = nullptr;
- assert(m_running);
- assert(!m_retry_delete_queue.empty());
+ ceph_assert(m_running);
+ ceph_assert(!m_retry_delete_queue.empty());
// move all ready-to-ready items back to main queue
utime_t now = ceph_clock_now();
template <typename I>
ImageMap<I>::~ImageMap() {
- assert(m_async_op_tracker.empty());
- assert(m_timer_task == nullptr);
- assert(m_rebalance_task == nullptr);
+ ceph_assert(m_async_op_tracker.empty());
+ ceph_assert(m_timer_task == nullptr);
+ ceph_assert(m_rebalance_task == nullptr);
}
template <typename I>
void ImageMap<I>::process_updates() {
dout(20) << dendl;
- assert(m_threads->timer_lock.is_locked());
- assert(m_timer_task == nullptr);
+ ceph_assert(m_threads->timer_lock.is_locked());
+ ceph_assert(m_timer_task == nullptr);
Updates map_updates;
std::set<std::string> map_removals;
case image_map::ACTION_TYPE_NONE:
continue;
case image_map::ACTION_TYPE_MAP_UPDATE:
- assert(info.instance_id != image_map::UNMAPPED_INSTANCE_ID);
+ ceph_assert(info.instance_id != image_map::UNMAPPED_INSTANCE_ID);
map_updates.emplace_back(global_image_id, info.instance_id,
info.mapped_time);
break;
map_removals.emplace(global_image_id);
break;
case image_map::ACTION_TYPE_ACQUIRE:
- assert(info.instance_id != image_map::UNMAPPED_INSTANCE_ID);
+ ceph_assert(info.instance_id != image_map::UNMAPPED_INSTANCE_ID);
acquire_updates.emplace_back(global_image_id, info.instance_id);
break;
case image_map::ACTION_TYPE_RELEASE:
- assert(info.instance_id != image_map::UNMAPPED_INSTANCE_ID);
+ ceph_assert(info.instance_id != image_map::UNMAPPED_INSTANCE_ID);
release_updates.emplace_back(global_image_id, info.instance_id);
break;
}
template <typename I>
void ImageMap<I>::schedule_update_task(const Mutex &timer_lock) {
- assert(m_threads->timer_lock.is_locked());
+ ceph_assert(m_threads->timer_lock.is_locked());
schedule_rebalance_task();
}
m_timer_task = new FunctionContext([this](int r) {
- assert(m_threads->timer_lock.is_locked());
+ ceph_assert(m_threads->timer_lock.is_locked());
m_timer_task = nullptr;
process_updates();
template <typename I>
void ImageMap<I>::rebalance() {
- assert(m_rebalance_task == nullptr);
+ ceph_assert(m_rebalance_task == nullptr);
{
Mutex::Locker locker(m_lock);
template <typename I>
void ImageMap<I>::schedule_rebalance_task() {
- assert(m_threads->timer_lock.is_locked());
+ ceph_assert(m_threads->timer_lock.is_locked());
CephContext *cct = reinterpret_cast<CephContext *>(m_ioctx.cct());
}
m_rebalance_task = new FunctionContext([this](int _) {
- assert(m_threads->timer_lock.is_locked());
+ ceph_assert(m_threads->timer_lock.is_locked());
m_rebalance_task = nullptr;
rebalance();
template <typename I>
void ImageMap<I>::schedule_action(const std::string &global_image_id) {
dout(20) << "global_image_id=" << global_image_id << dendl;
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
m_global_image_ids.emplace(global_image_id);
}
const std::set<std::string> &global_image_ids) {
dout(5) << "peer_uuid=" << peer_uuid << ", "
<< "global_image_ids=[" << global_image_ids << "]" << dendl;
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
for (auto const &global_image_id : global_image_ids) {
auto result = m_peer_map[global_image_id].insert(peer_uuid);
const std::set<std::string> &global_image_ids) {
dout(5) << "peer_uuid=" << peer_uuid << ", "
<< "global_image_ids=[" << global_image_ids << "]" << dendl;
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
Updates to_remove;
for (auto const &global_image_id : global_image_ids) {
if (policy_type == "none" || policy_type == "simple") {
m_policy.reset(image_map::SimplePolicy::create(m_ioctx));
} else {
- assert(false); // not really needed as such, but catch it.
+ ceph_assert(false); // not really needed as such, but catch it.
}
dout(20) << "mapping policy=" << policy_type << dendl;
{
Mutex::Locker locker(m_lock);
- assert(!m_shutting_down);
+ ceph_assert(!m_shutting_down);
m_shutting_down = true;
m_policy.reset();
bool call(std::string_view command, const cmdmap_t& cmdmap,
std::string_view format, bufferlist& out) override {
auto i = commands.find(command);
- assert(i != commands.end());
+ ceph_assert(i != commands.end());
Formatter *f = Formatter::create(format);
stringstream ss;
bool r = i->second->call(f, &ss);
ImageReplayer<I>::~ImageReplayer()
{
unregister_admin_socket_hook();
- assert(m_event_preprocessor == nullptr);
- assert(m_replay_status_formatter == nullptr);
- assert(m_local_image_ctx == nullptr);
- assert(m_local_replay == nullptr);
- assert(m_remote_journaler == nullptr);
- assert(m_replay_handler == nullptr);
- assert(m_on_start_finish == nullptr);
- assert(m_on_stop_finish == nullptr);
- assert(m_bootstrap_request == nullptr);
- assert(m_in_flight_status_updates == 0);
+ ceph_assert(m_event_preprocessor == nullptr);
+ ceph_assert(m_replay_status_formatter == nullptr);
+ ceph_assert(m_local_image_ctx == nullptr);
+ ceph_assert(m_local_replay == nullptr);
+ ceph_assert(m_remote_journaler == nullptr);
+ ceph_assert(m_replay_handler == nullptr);
+ ceph_assert(m_on_start_finish == nullptr);
+ ceph_assert(m_on_stop_finish == nullptr);
+ ceph_assert(m_bootstrap_request == nullptr);
+ ceph_assert(m_in_flight_status_updates == 0);
delete m_journal_listener;
}
m_delete_requested = false;
if (on_finish != nullptr) {
- assert(m_on_start_finish == nullptr);
+ ceph_assert(m_on_start_finish == nullptr);
m_on_start_finish = on_finish;
}
- assert(m_on_stop_finish == nullptr);
+ ceph_assert(m_on_stop_finish == nullptr);
}
}
}
// TODO need to support multiple remote images
- assert(!m_peers.empty());
+ ceph_assert(!m_peers.empty());
m_remote_image = {*m_peers.begin()};
Context *ctx = create_context_callback<
void ImageReplayer<I>::handle_prepare_remote_image(int r) {
dout(10) << "r=" << r << dendl;
- assert(r < 0 ? m_remote_journaler == nullptr : m_remote_journaler != nullptr);
+ ceph_assert(r < 0 ? m_remote_journaler == nullptr : m_remote_journaler != nullptr);
if (r < 0 && !m_local_image_id.empty() &&
m_local_image_tag_owner == librbd::Journal<>::LOCAL_MIRROR_UUID) {
// local image is primary -- fall-through
return;
}
- assert(m_local_journal == nullptr);
+ ceph_assert(m_local_journal == nullptr);
{
RWLock::RLocker snap_locker(m_local_image_ctx->snap_lock);
if (m_local_image_ctx->journal != nullptr) {
dout(10) << "r=" << r << dendl;
if (r < 0) {
- assert(m_local_replay == nullptr);
+ ceph_assert(m_local_replay == nullptr);
derr << "error starting external replay on local image "
<< m_local_image_id << ": " << cpp_strerror(r) << dendl;
on_start_fail(r, "error starting replay on local image");
Context *on_finish(nullptr);
{
Mutex::Locker locker(m_lock);
- assert(m_state == STATE_STARTING);
+ ceph_assert(m_state == STATE_STARTING);
m_state = STATE_REPLAYING;
std::swap(m_on_start_finish, on_finish);
}
Context *ctx = new FunctionContext([this, r, desc](int _r) {
{
Mutex::Locker locker(m_lock);
- assert(m_state == STATE_STARTING);
+ ceph_assert(m_state == STATE_STARTING);
m_state = STATE_STOPPING;
if (r < 0 && r != -ECANCELED && r != -EREMOTEIO && r != -ENOENT) {
derr << "start failed: " << cpp_strerror(r) << dendl;
bool ImageReplayer<I>::on_start_interrupted()
{
Mutex::Locker locker(m_lock);
- assert(m_state == STATE_STARTING);
+ ceph_assert(m_state == STATE_STARTING);
if (m_on_stop_finish == nullptr) {
return false;
}
shut_down_replay = true;
}
- assert(m_on_stop_finish == nullptr);
+ ceph_assert(m_on_stop_finish == nullptr);
std::swap(m_on_stop_finish, on_finish);
m_stop_requested = true;
m_manual_stop = manual;
on_flush_local_replay_flush_finish(on_flush, r);
});
- assert(m_lock.is_locked());
- assert(m_state == STATE_REPLAYING);
+ ceph_assert(m_lock.is_locked());
+ ceph_assert(m_state == STATE_REPLAYING);
m_local_replay->flush(ctx);
}
{
Mutex::Locker locker(m_lock);
- assert(m_state == STATE_REPLAY_FLUSHING);
+ ceph_assert(m_state == STATE_REPLAY_FLUSHING);
m_state = STATE_REPLAYING;
}
dout(20) << "delaying replay by " << delay << " sec" << dendl;
Mutex::Locker timer_locker(m_threads->timer_lock);
- assert(m_delayed_preprocess_task == nullptr);
+ ceph_assert(m_delayed_preprocess_task == nullptr);
m_delayed_preprocess_task = new FunctionContext(
[this](int r) {
- assert(m_threads->timer_lock.is_locked());
+ ceph_assert(m_threads->timer_lock.is_locked());
m_delayed_preprocess_task = nullptr;
m_threads->work_queue->queue(
create_context_callback<ImageReplayer,
template <typename I>
void ImageReplayer<I>::handle_preprocess_entry_ready(int r) {
dout(20) << "r=" << r << dendl;
- assert(r == 0);
+ ceph_assert(r == 0);
if (!m_event_preprocessor->is_required(m_event_entry)) {
process_entry();
template <typename I>
void ImageReplayer<I>::handle_process_entry_ready(int r) {
dout(20) << dendl;
- assert(r == 0);
+ ceph_assert(r == 0);
bool update_status = false;
{
derr << "failed to commit journal event: " << cpp_strerror(r) << dendl;
handle_replay_complete(r, "failed to commit journal event");
} else {
- assert(m_remote_journaler != nullptr);
+ ceph_assert(m_remote_journaler != nullptr);
m_remote_journaler->committed(replay_entry);
}
m_event_replay_tracker.finish_op();
template <typename I>
bool ImageReplayer<I>::start_mirror_image_status_update(bool force,
bool restarting) {
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
if (!force && !is_stopped_()) {
if (!is_running_()) {
Context *on_finish = nullptr;
{
Mutex::Locker locker(m_lock);
- assert(m_in_flight_status_updates > 0);
+ ceph_assert(m_in_flight_status_updates > 0);
if (--m_in_flight_status_updates > 0) {
dout(15) << "waiting on " << m_in_flight_status_updates << " in-flight "
<< "updates" << dendl;
});
std::string desc;
- assert(m_replay_status_formatter != nullptr);
+ ceph_assert(m_replay_status_formatter != nullptr);
if (!m_replay_status_formatter->get_or_send_update(&desc,
on_req_finish)) {
dout(20) << "waiting for replay status" << dendl;
}
break;
default:
- assert(!"invalid state");
+ ceph_assert(!"invalid state");
}
{
librados::ObjectWriteOperation op;
librbd::cls_client::mirror_image_status_set(&op, m_global_image_id, status);
- assert(m_local_ioctx);
+ ceph_assert(m_local_ioctx);
librados::AioCompletion *aio_comp = create_rados_callback<
ImageReplayer<I>, &ImageReplayer<I>::handle_mirror_status_update>(this);
int r = m_local_ioctx->aio_operate(RBD_MIRRORING, aio_comp, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
aio_comp->release();
}
start_mirror_image_status_update(false, restarting)) {
m_update_status_task = new FunctionContext(
[this](int r) {
- assert(m_threads->timer_lock.is_locked());
+ ceph_assert(m_threads->timer_lock.is_locked());
m_update_status_task = nullptr;
queue_mirror_image_status_update(boost::none);
if (m_delayed_preprocess_task != nullptr) {
canceled_delayed_preprocess_task = m_threads->timer->cancel_event(
m_delayed_preprocess_task);
- assert(canceled_delayed_preprocess_task);
+ ceph_assert(canceled_delayed_preprocess_task);
m_delayed_preprocess_task = nullptr;
}
}
{
Mutex::Locker locker(m_lock);
- assert(m_state == STATE_STOPPING);
+ ceph_assert(m_state == STATE_STOPPING);
// if status updates are in-flight, wait for them to complete
// before proceeding
}
if (m_delete_requested && !m_local_image_id.empty()) {
- assert(m_remote_image.image_id.empty());
+ ceph_assert(m_remote_image.image_id.empty());
dout(0) << "remote image no longer exists: scheduling deletion" << dendl;
unregister_asok_hook = true;
std::swap(delete_requested, m_delete_requested);
std::swap(on_start, m_on_start_finish);
std::swap(on_stop, m_on_stop_finish);
m_stop_requested = false;
- assert(m_delayed_preprocess_task == nullptr);
- assert(m_state == STATE_STOPPING);
+ ceph_assert(m_delayed_preprocess_task == nullptr);
+ ceph_assert(m_state == STATE_STOPPING);
m_state = STATE_STOPPED;
}
template <typename I>
ImageSync<I>::~ImageSync() {
- assert(m_image_copy_request == nullptr);
- assert(m_image_copy_prog_ctx == nullptr);
- assert(m_update_sync_ctx == nullptr);
+ ceph_assert(m_image_copy_request == nullptr);
+ ceph_assert(m_image_copy_prog_ctx == nullptr);
+ ceph_assert(m_update_sync_ctx == nullptr);
}
template <typename I>
int r = 0;
{
RWLock::RLocker snap_locker(m_remote_image_ctx->snap_lock);
- assert(!m_client_meta->sync_points.empty());
+ ceph_assert(!m_client_meta->sync_points.empty());
auto &sync_point = m_client_meta->sync_points.front();
snap_id_end = m_remote_image_ctx->get_snap_id(
cls::rbd::UserSnapshotNamespace(), sync_point.snap_name);
template <typename I>
void ImageSync<I>::send_update_sync_point() {
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
m_update_sync_ctx = nullptr;
g_ceph_context->_conf.remove_observer(this);
Mutex::Locker locker(m_lock);
- assert(m_inflight_ops.empty());
- assert(m_queue.empty());
+ ceph_assert(m_inflight_ops.empty());
+ ceph_assert(m_queue.empty());
}
template <typename I>
dout(20) << "duplicate for already started op " << id << dendl;
} else if (m_max_concurrent_syncs == 0 ||
m_inflight_ops.size() < m_max_concurrent_syncs) {
- assert(m_queue.empty());
+ ceph_assert(m_queue.empty());
m_inflight_ops.insert(id);
dout(20) << "ready to start sync for " << id << " ["
<< m_inflight_ops.size() << "/" << m_max_concurrent_syncs << "]"
template <typename I>
InstanceReplayer<I>::~InstanceReplayer() {
- assert(m_image_state_check_task == nullptr);
- assert(m_async_op_tracker.empty());
- assert(m_image_replayers.empty());
+ ceph_assert(m_image_state_check_task == nullptr);
+ ceph_assert(m_async_op_tracker.empty());
+ ceph_assert(m_image_replayers.empty());
}
template <typename I>
C_SaferCond shut_down_ctx;
shut_down(&shut_down_ctx);
int r = shut_down_ctx.wait();
- assert(r == 0);
+ ceph_assert(r == 0);
}
template <typename I>
Mutex::Locker locker(m_lock);
- assert(m_on_shut_down == nullptr);
+ ceph_assert(m_on_shut_down == nullptr);
m_on_shut_down = on_finish;
Context *ctx = new FunctionContext(
Mutex::Locker locker(m_lock);
auto result = m_peers.insert(Peer(peer_uuid, io_ctx)).second;
- assert(result);
+ ceph_assert(result);
}
template <typename I>
Mutex::Locker locker(m_lock);
- assert(m_on_shut_down == nullptr);
+ ceph_assert(m_on_shut_down == nullptr);
auto it = m_image_replayers.find(global_image_id);
if (it == m_image_replayers.end()) {
image_replayer)).first;
// TODO only a single peer is currently supported
- assert(m_peers.size() == 1);
+ ceph_assert(m_peers.size() == 1);
auto peer = *m_peers.begin();
image_replayer->add_peer(peer.peer_uuid, peer.io_ctx);
start_image_replayer(image_replayer);
dout(10) << "global_image_id=" << global_image_id << dendl;
Mutex::Locker locker(m_lock);
- assert(m_on_shut_down == nullptr);
+ ceph_assert(m_on_shut_down == nullptr);
auto it = m_image_replayers.find(global_image_id);
if (it == m_image_replayers.end()) {
<< "peer_mirror_uuid=" << peer_mirror_uuid << dendl;
Mutex::Locker locker(m_lock);
- assert(m_on_shut_down == nullptr);
+ ceph_assert(m_on_shut_down == nullptr);
auto it = m_image_replayers.find(global_image_id);
if (it != m_image_replayers.end()) {
template <typename I>
void InstanceReplayer<I>::start_image_replayer(
ImageReplayer<I> *image_replayer) {
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
std::string global_image_id = image_replayer->get_global_image_id();
if (!image_replayer->is_stopped()) {
void InstanceReplayer<I>::handle_wait_for_ops(int r) {
dout(10) << "r=" << r << dendl;
- assert(r == 0);
+ ceph_assert(r == 0);
Mutex::Locker locker(m_lock);
stop_image_replayers();
void InstanceReplayer<I>::stop_image_replayers() {
dout(10) << dendl;
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
Context *ctx = create_async_context_callback(
m_threads->work_queue, create_context_callback<InstanceReplayer<I>,
void InstanceReplayer<I>::handle_stop_image_replayers(int r) {
dout(10) << "r=" << r << dendl;
- assert(r == 0);
+ ceph_assert(r == 0);
Context *on_finish = nullptr;
{
Mutex::Locker locker(m_lock);
for (auto &it : m_image_replayers) {
- assert(it.second->is_stopped());
+ ceph_assert(it.second->is_stopped());
it.second->destroy();
}
m_image_replayers.clear();
- assert(m_on_shut_down != nullptr);
+ ceph_assert(m_on_shut_down != nullptr);
std::swap(on_finish, m_on_shut_down);
}
on_finish->complete(r);
dout(10) << m_image_state_check_task << dendl;
bool canceled = m_threads->timer->cancel_event(m_image_state_check_task);
- assert(canceled);
+ ceph_assert(canceled);
m_image_state_check_task = nullptr;
}
template <typename I>
void InstanceReplayer<I>::schedule_image_state_check_task() {
- assert(m_threads->timer_lock.is_locked());
- assert(m_image_state_check_task == nullptr);
+ ceph_assert(m_threads->timer_lock.is_locked());
+ ceph_assert(m_image_state_check_task == nullptr);
m_image_state_check_task = new FunctionContext(
[this](int r) {
- assert(m_threads->timer_lock.is_locked());
+ ceph_assert(m_threads->timer_lock.is_locked());
m_image_state_check_task = nullptr;
schedule_image_state_check_task();
queue_start_image_replayers();
void finish(int r) override {
dout(10) << "C_RemoveInstanceRequest: " << this << " " << __func__ << ": r="
<< r << dendl;
- assert(r == 0);
+ ceph_assert(r == 0);
on_finish->complete(r);
}
<< ": instance_watcher=" << instance_watcher << ", instance_id="
<< instance_id << ", request_id=" << request_id << dendl;
- assert(instance_watcher->m_lock.is_locked());
+ ceph_assert(instance_watcher->m_lock.is_locked());
if (!send_to_leader) {
- assert((!instance_id.empty()));
+ ceph_assert((!instance_id.empty()));
notifier.reset(new librbd::watcher::Notifier(
instance_watcher->m_work_queue,
instance_watcher->m_ioctx,
instance_watcher->m_notify_op_tracker.start_op();
auto result = instance_watcher->m_notify_ops.insert(
std::make_pair(instance_id, this)).second;
- assert(result);
+ ceph_assert(result);
}
void send() {
dout(10) << "C_NotifyInstanceRequest: " << this << " " << __func__ << dendl;
- assert(instance_watcher->m_lock.is_locked());
+ ceph_assert(instance_watcher->m_lock.is_locked());
if (canceling) {
dout(10) << "C_NotifyInstanceRequest: " << this << " " << __func__
if (instance_watcher->m_leader_instance_id != instance_id) {
auto count = instance_watcher->m_notify_ops.erase(
std::make_pair(instance_id, this));
- assert(count > 0);
+ ceph_assert(count > 0);
instance_id = instance_watcher->m_leader_instance_id;
auto result = instance_watcher->m_notify_ops.insert(
std::make_pair(instance_id, this)).second;
- assert(result);
+ ceph_assert(result);
notifier.reset(new librbd::watcher::Notifier(
instance_watcher->m_work_queue,
void cancel() {
dout(10) << "C_NotifyInstanceRequest: " << this << " " << __func__ << dendl;
- assert(instance_watcher->m_lock.is_locked());
+ ceph_assert(instance_watcher->m_lock.is_locked());
canceling = true;
instance_watcher->unsuspend_notify_request(this);
Mutex::Locker locker(instance_watcher->m_lock);
auto result = instance_watcher->m_notify_ops.erase(
std::make_pair(instance_id, this));
- assert(result > 0);
+ ceph_assert(result > 0);
instance_watcher->m_notify_op_tracker.finish_op();
}
librados::AioCompletion *aio_comp = create_rados_callback(ctx);
int r = io_ctx.aio_operate(RBD_MIRROR_LEADER, aio_comp, &op, &ctx->out_bl);
- assert(r == 0);
+ ceph_assert(r == 0);
aio_comp->release();
}
template <typename I>
InstanceWatcher<I>::~InstanceWatcher() {
- assert(m_notify_ops.empty());
- assert(m_notify_op_tracker.empty());
- assert(m_suspended_ops.empty());
- assert(m_inflight_sync_reqs.empty());
- assert(m_image_sync_throttler == nullptr);
+ ceph_assert(m_notify_ops.empty());
+ ceph_assert(m_notify_op_tracker.empty());
+ ceph_assert(m_suspended_ops.empty());
+ ceph_assert(m_inflight_sync_reqs.empty());
+ ceph_assert(m_image_sync_throttler == nullptr);
m_instance_lock->destroy();
}
Mutex::Locker locker(m_lock);
- assert(m_on_finish == nullptr);
+ ceph_assert(m_on_finish == nullptr);
m_on_finish = on_finish;
m_ret_val = 0;
C_SaferCond shut_down_ctx;
shut_down(&shut_down_ctx);
int r = shut_down_ctx.wait();
- assert(r == 0);
+ ceph_assert(r == 0);
}
template <typename I>
Mutex::Locker locker(m_lock);
- assert(m_on_finish == nullptr);
+ ceph_assert(m_on_finish == nullptr);
m_on_finish = on_finish;
m_ret_val = 0;
Mutex::Locker locker(m_lock);
- assert(m_on_finish == nullptr);
+ ceph_assert(m_on_finish == nullptr);
m_on_finish = on_finish;
m_ret_val = 0;
Mutex::Locker locker(m_lock);
- assert(m_on_finish == nullptr);
+ ceph_assert(m_on_finish == nullptr);
if (instance_id == m_instance_id) {
handle_image_acquire(global_image_id, on_notify_ack);
Mutex::Locker locker(m_lock);
- assert(m_on_finish == nullptr);
+ ceph_assert(m_on_finish == nullptr);
if (instance_id == m_instance_id) {
handle_image_release(global_image_id, on_notify_ack);
<< "peer_mirror_uuid=" << peer_mirror_uuid << dendl;
Mutex::Locker locker(m_lock);
- assert(m_on_finish == nullptr);
+ ceph_assert(m_on_finish == nullptr);
if (instance_id == m_instance_id) {
handle_peer_image_removed(global_image_id, peer_mirror_uuid, on_notify_ack);
Mutex::Locker locker(m_lock);
- assert(m_inflight_sync_reqs.count(sync_id) == 0);
+ ceph_assert(m_inflight_sync_reqs.count(sync_id) == 0);
uint64_t request_id = ++m_request_seq;
return false;
}
- assert(sync_ctx->req != nullptr);
+ ceph_assert(sync_ctx->req != nullptr);
sync_ctx->req->cancel();
return true;
}
void InstanceWatcher<I>::notify_sync_complete(const Mutex&,
const std::string &sync_id) {
dout(10) << "sync_id=" << sync_id << dendl;
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
auto it = m_inflight_sync_reqs.find(sync_id);
- assert(it != m_inflight_sync_reqs.end());
+ ceph_assert(it != m_inflight_sync_reqs.end());
auto sync_ctx = it->second;
- assert(sync_ctx->req == nullptr);
+ ceph_assert(sync_ctx->req == nullptr);
m_inflight_sync_reqs.erase(it);
m_work_queue->queue(sync_ctx, 0);
Context *on_start = nullptr;
{
Mutex::Locker locker(m_lock);
- assert(sync_ctx->req != nullptr);
- assert(sync_ctx->on_start != nullptr);
+ ceph_assert(sync_ctx->req != nullptr);
+ ceph_assert(sync_ctx->on_start != nullptr);
if (sync_ctx->req->canceling) {
r = -ECANCELED;
Mutex::Locker locker(m_lock);
- assert(m_image_sync_throttler == nullptr);
+ ceph_assert(m_image_sync_throttler == nullptr);
m_image_sync_throttler = ImageSyncThrottler<I>::create();
m_leader_instance_id = m_instance_id;
Mutex::Locker locker(m_lock);
- assert(m_image_sync_throttler != nullptr);
+ ceph_assert(m_image_sync_throttler != nullptr);
m_leader_instance_id.clear();
template <typename I>
void InstanceWatcher<I>::register_instance() {
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
dout(10) << dendl;
InstanceWatcher<I>, &InstanceWatcher<I>::handle_register_instance>(this);
int r = m_ioctx.aio_operate(RBD_MIRROR_LEADER, aio_comp, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
aio_comp->release();
}
void InstanceWatcher<I>::create_instance_object() {
dout(10) << dendl;
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
librados::ObjectWriteOperation op;
op.create(true);
InstanceWatcher<I>,
&InstanceWatcher<I>::handle_create_instance_object>(this);
int r = m_ioctx.aio_operate(m_oid, aio_comp, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
aio_comp->release();
}
void InstanceWatcher<I>::register_watch() {
dout(10) << dendl;
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
Context *ctx = create_async_context_callback(
m_work_queue, create_context_callback<
void InstanceWatcher<I>::acquire_lock() {
dout(10) << dendl;
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
Context *ctx = create_async_context_callback(
m_work_queue, create_context_callback<
void InstanceWatcher<I>::release_lock() {
dout(10) << dendl;
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
Context *ctx = create_async_context_callback(
m_work_queue, create_context_callback<
void InstanceWatcher<I>::unregister_watch() {
dout(10) << dendl;
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
Context *ctx = create_async_context_callback(
m_work_queue, create_context_callback<
template <typename I>
void InstanceWatcher<I>::remove_instance_object() {
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
dout(10) << dendl;
InstanceWatcher<I>,
&InstanceWatcher<I>::handle_remove_instance_object>(this);
int r = m_ioctx.aio_operate(m_oid, aio_comp, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
aio_comp->release();
}
void InstanceWatcher<I>::unregister_instance() {
dout(10) << dendl;
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
librados::ObjectWriteOperation op;
librbd::cls_client::mirror_instances_remove(&op, m_instance_id);
InstanceWatcher<I>, &InstanceWatcher<I>::handle_unregister_instance>(this);
int r = m_ioctx.aio_operate(RBD_MIRROR_LEADER, aio_comp, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
aio_comp->release();
}
void InstanceWatcher<I>::wait_for_notify_ops() {
dout(10) << dendl;
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
for (auto op : m_notify_ops) {
op.second->cancel();
void InstanceWatcher<I>::handle_wait_for_notify_ops(int r) {
dout(10) << "r=" << r << dendl;
- assert(r == 0);
+ ceph_assert(r == 0);
Context *on_finish = nullptr;
{
Mutex::Locker locker(m_lock);
- assert(m_notify_ops.empty());
+ ceph_assert(m_notify_ops.empty());
std::swap(on_finish, m_on_finish);
r = m_ret_val;
void InstanceWatcher<I>::get_instance_locker() {
dout(10) << dendl;
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
Context *ctx = create_async_context_callback(
m_work_queue, create_context_callback<
void InstanceWatcher<I>::break_instance_lock() {
dout(10) << dendl;
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
Context *ctx = create_async_context_callback(
m_work_queue, create_context_callback<
void InstanceWatcher<I>::suspend_notify_request(C_NotifyInstanceRequest *req) {
dout(10) << req << dendl;
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
auto result = m_suspended_ops.insert(req).second;
- assert(result);
+ ceph_assert(result);
}
template <typename I>
C_NotifyInstanceRequest *req) {
dout(10) << req << dendl;
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
auto result = m_suspended_ops.erase(req);
if (result == 0) {
void InstanceWatcher<I>::unsuspend_notify_requests() {
dout(10) << dendl;
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
std::set<C_NotifyInstanceRequest *> suspended_ops;
std::swap(m_suspended_ops, suspended_ops);
Mutex::Locker locker(m_lock);
Request request(instance_id, request_id);
auto it = m_requests.find(request);
- assert(it != m_requests.end());
+ ceph_assert(it != m_requests.end());
on_notify_ack = it->on_notify_ack;
m_requests.erase(it);
}
dout(10) << dendl;
Mutex::Locker locker(m_lock);
- assert(m_on_finish == nullptr);
+ ceph_assert(m_on_finish == nullptr);
m_on_finish = on_finish;
get_instances();
}
dout(10) << dendl;
Mutex::Locker locker(m_lock);
- assert(m_on_finish == nullptr);
+ ceph_assert(m_on_finish == nullptr);
m_on_finish = on_finish;
Context *ctx = new FunctionContext(
dout(5) << dendl;
Mutex::Locker locker(m_lock);
- assert(m_listener_blocked);
+ ceph_assert(m_listener_blocked);
m_listener_blocked = false;
InstanceIds added_instance_ids;
void Instances<I>::get_instances() {
dout(10) << dendl;
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
Context *ctx = create_context_callback<
Instances, &Instances<I>::handle_get_instances>(this);
void Instances<I>::wait_for_ops() {
dout(10) << dendl;
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
Context *ctx = create_async_context_callback(
m_threads->work_queue, create_context_callback<
void Instances<I>::handle_wait_for_ops(int r) {
dout(10) << "r=" << r << dendl;
- assert(r == 0);
+ ceph_assert(r == 0);
Context *on_finish = nullptr;
{
template <typename I>
void Instances<I>::remove_instances(const utime_t& time) {
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
InstanceIds instance_ids;
for (auto& instance_pair : m_instances) {
instance_ids.push_back(instance_pair.first);
}
}
- assert(!instance_ids.empty());
+ ceph_assert(!instance_ids.empty());
dout(10) << "instance_ids=" << instance_ids << dendl;
Context* ctx = new FunctionContext([this, instance_ids](int r) {
Mutex::Locker locker(m_lock);
dout(10) << "r=" << r << ", instance_ids=" << instance_ids << dendl;
- assert(r == 0);
+ ceph_assert(r == 0);
// fire removed notification now that instaces have been blacklisted
m_threads->work_queue->queue(
template <typename I>
void Instances<I>::cancel_remove_task() {
- assert(m_threads->timer_lock.is_locked());
- assert(m_lock.is_locked());
+ ceph_assert(m_threads->timer_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
if (m_timer_task == nullptr) {
return;
dout(10) << dendl;
bool canceled = m_threads->timer->cancel_event(m_timer_task);
- assert(canceled);
+ ceph_assert(canceled);
m_timer_task = nullptr;
}
// schedule a time to fire when the oldest instance should be removed
m_timer_task = new FunctionContext(
[this, oldest_time](int r) {
- assert(m_threads->timer_lock.is_locked());
+ ceph_assert(m_threads->timer_lock.is_locked());
Mutex::Locker locker(m_lock);
m_timer_task = nullptr;
template <typename I>
LeaderWatcher<I>::~LeaderWatcher() {
- assert(m_status_watcher == nullptr);
- assert(m_instances == nullptr);
- assert(m_timer_task == nullptr);
+ ceph_assert(m_status_watcher == nullptr);
+ ceph_assert(m_instances == nullptr);
+ ceph_assert(m_timer_task == nullptr);
delete m_leader_lock;
}
Mutex::Locker locker(m_lock);
- assert(m_on_finish == nullptr);
+ ceph_assert(m_on_finish == nullptr);
m_on_finish = on_finish;
create_leader_object();
void LeaderWatcher<I>::create_leader_object() {
dout(10) << dendl;
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
librados::ObjectWriteOperation op;
op.create(false);
librados::AioCompletion *aio_comp = create_rados_callback<
LeaderWatcher<I>, &LeaderWatcher<I>::handle_create_leader_object>(this);
int r = m_ioctx.aio_operate(m_oid, aio_comp, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
aio_comp->release();
}
void LeaderWatcher<I>::register_watch() {
dout(10) << dendl;
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
Context *ctx = create_async_context_callback(
m_work_queue, create_context_callback<
Mutex::Locker locker(m_lock);
derr << "error registering leader watcher for " << m_oid << " object: "
<< cpp_strerror(r) << dendl;
- assert(m_on_finish != nullptr);
+ ceph_assert(m_on_finish != nullptr);
std::swap(on_finish, m_on_finish);
} else {
Mutex::Locker locker(m_lock);
C_SaferCond shut_down_ctx;
shut_down(&shut_down_ctx);
int r = shut_down_ctx.wait();
- assert(r == 0);
+ ceph_assert(r == 0);
}
template <typename I>
Mutex::Locker timer_locker(m_threads->timer_lock);
Mutex::Locker locker(m_lock);
- assert(m_on_shut_down_finish == nullptr);
+ ceph_assert(m_on_shut_down_finish == nullptr);
m_on_shut_down_finish = on_finish;
cancel_timer_task();
shut_down_leader_lock();
void LeaderWatcher<I>::shut_down_leader_lock() {
dout(10) << dendl;
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
Context *ctx = create_async_context_callback(
m_work_queue, create_context_callback<
void LeaderWatcher<I>::unregister_watch() {
dout(10) << dendl;
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
Context *ctx = create_async_context_callback(
m_work_queue, create_context_callback<
void LeaderWatcher<I>::handle_wait_for_tasks() {
dout(10) << dendl;
- assert(m_threads->timer_lock.is_locked());
- assert(m_lock.is_locked());
- assert(m_on_shut_down_finish != nullptr);
+ ceph_assert(m_threads->timer_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
+ ceph_assert(m_on_shut_down_finish != nullptr);
- assert(!m_timer_op_tracker.empty());
+ ceph_assert(!m_timer_op_tracker.empty());
m_timer_op_tracker.finish_op();
auto ctx = new FunctionContext([this](int r) {
{
// ensure lock isn't held when completing shut down
Mutex::Locker locker(m_lock);
- assert(m_on_shut_down_finish != nullptr);
+ ceph_assert(m_on_shut_down_finish != nullptr);
on_finish = m_on_shut_down_finish;
}
on_finish->complete(0);
template <typename I>
bool LeaderWatcher<I>::is_leader(Mutex &lock) const {
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
bool leader = m_leader_lock->is_leader();
dout(10) << leader << dendl;
template <typename I>
bool LeaderWatcher<I>::is_releasing_leader(Mutex &lock) const {
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
bool releasing = m_leader_lock->is_releasing_leader();
dout(10) << releasing << dendl;
template <typename I>
void LeaderWatcher<I>::cancel_timer_task() {
- assert(m_threads->timer_lock.is_locked());
- assert(m_lock.is_locked());
+ ceph_assert(m_threads->timer_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
if (m_timer_task == nullptr) {
return;
dout(10) << m_timer_task << dendl;
bool canceled = m_threads->timer->cancel_event(m_timer_task);
- assert(canceled);
+ ceph_assert(canceled);
m_timer_task = nullptr;
}
int delay_factor, bool leader,
TimerCallback timer_callback,
bool shutting_down) {
- assert(m_threads->timer_lock.is_locked());
- assert(m_lock.is_locked());
+ ceph_assert(m_threads->timer_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
if (!shutting_down && m_on_shut_down_finish != nullptr) {
return;
m_timer_task = new FunctionContext(
[this, leader, timer_callback](int r) {
- assert(m_threads->timer_lock.is_locked());
+ ceph_assert(m_threads->timer_lock.is_locked());
m_timer_task = nullptr;
if (m_timer_op_tracker.empty()) {
TimerCallback timer_callback) {
dout(10) << dendl;
- assert(m_threads->timer_lock.is_locked());
- assert(m_lock.is_locked());
- assert(m_timer_op_tracker.empty());
+ ceph_assert(m_threads->timer_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
+ ceph_assert(m_timer_op_tracker.empty());
if (is_leader(m_lock) != leader) {
return;
}
Mutex::Locker locker(m_lock);
- assert(m_on_finish == nullptr);
+ ceph_assert(m_on_finish == nullptr);
m_on_finish = on_finish;
m_ret_val = 0;
dout(10) << dendl;
Mutex::Locker locker(m_lock);
- assert(m_on_finish == nullptr);
+ ceph_assert(m_on_finish == nullptr);
m_on_finish = on_finish;
m_ret_val = 0;
}
Mutex::Locker locker(m_lock);
- assert(m_on_finish == nullptr);
+ ceph_assert(m_on_finish == nullptr);
m_on_finish = on_finish;
notify_lock_released();
void LeaderWatcher<I>::break_leader_lock() {
dout(10) << dendl;
- assert(m_threads->timer_lock.is_locked());
- assert(m_lock.is_locked());
- assert(!m_timer_op_tracker.empty());
+ ceph_assert(m_threads->timer_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
+ ceph_assert(!m_timer_op_tracker.empty());
if (m_locker.cookie.empty()) {
get_locker();
Mutex::Locker timer_locker(m_threads->timer_lock);
Mutex::Locker locker(m_lock);
- assert(!m_timer_op_tracker.empty());
+ ceph_assert(!m_timer_op_tracker.empty());
if (m_leader_lock->is_shutdown()) {
dout(10) << "canceling due to shutdown" << dendl;
uint32_t delay_factor) {
dout(10) << dendl;
- assert(m_threads->timer_lock.is_locked());
- assert(m_lock.is_locked());
+ ceph_assert(m_threads->timer_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
if (reset_leader) {
m_locker = {};
void LeaderWatcher<I>::get_locker() {
dout(10) << dendl;
- assert(m_threads->timer_lock.is_locked());
- assert(m_lock.is_locked());
- assert(!m_timer_op_tracker.empty());
+ ceph_assert(m_threads->timer_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
+ ceph_assert(!m_timer_op_tracker.empty());
C_GetLocker *get_locker_ctx = new C_GetLocker(this);
Context *ctx = create_async_context_callback(m_work_queue, get_locker_ctx);
Mutex::Locker timer_locker(m_threads->timer_lock);
Mutex::Locker mutex_locker(m_lock);
- assert(!m_timer_op_tracker.empty());
+ ceph_assert(!m_timer_op_tracker.empty());
if (m_leader_lock->is_shutdown()) {
dout(10) << "canceling due to shutdown" << dendl;
void LeaderWatcher<I>::schedule_acquire_leader_lock(uint32_t delay_factor) {
dout(10) << dendl;
- assert(m_threads->timer_lock.is_locked());
- assert(m_lock.is_locked());
+ ceph_assert(m_threads->timer_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
schedule_timer_task("acquire leader lock",
delay_factor *
template <typename I>
void LeaderWatcher<I>::acquire_leader_lock() {
- assert(m_threads->timer_lock.is_locked());
- assert(m_lock.is_locked());
- assert(!m_timer_op_tracker.empty());
+ ceph_assert(m_threads->timer_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
+ ceph_assert(!m_timer_op_tracker.empty());
++m_acquire_attempts;
dout(10) << "acquire_attempts=" << m_acquire_attempts << dendl;
Mutex::Locker timer_locker(m_threads->timer_lock);
Mutex::Locker locker(m_lock);
- assert(!m_timer_op_tracker.empty());
+ ceph_assert(!m_timer_op_tracker.empty());
if (m_leader_lock->is_shutdown()) {
dout(10) << "canceling due to shutdown" << dendl;
void LeaderWatcher<I>::release_leader_lock() {
dout(10) << dendl;
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
Context *ctx = create_async_context_callback(
m_work_queue, create_context_callback<
void LeaderWatcher<I>::init_status_watcher() {
dout(10) << dendl;
- assert(m_lock.is_locked());
- assert(m_status_watcher == nullptr);
+ ceph_assert(m_lock.is_locked());
+ ceph_assert(m_status_watcher == nullptr);
m_status_watcher = MirrorStatusWatcher<I>::create(m_ioctx, m_work_queue);
schedule_acquire_leader_lock(0);
}
- assert(m_on_finish != nullptr);
+ ceph_assert(m_on_finish != nullptr);
std::swap(on_finish, m_on_finish);
}
void LeaderWatcher<I>::shut_down_status_watcher() {
dout(10) << dendl;
- assert(m_lock.is_locked());
- assert(m_status_watcher != nullptr);
+ ceph_assert(m_lock.is_locked());
+ ceph_assert(m_status_watcher != nullptr);
Context *ctx = create_async_context_callback(
m_work_queue, create_context_callback<LeaderWatcher<I>,
void LeaderWatcher<I>::init_instances() {
dout(10) << dendl;
- assert(m_lock.is_locked());
- assert(m_instances == nullptr);
+ ceph_assert(m_lock.is_locked());
+ ceph_assert(m_instances == nullptr);
m_instances = Instances<I>::create(m_threads, m_ioctx, m_instance_id,
m_instances_listener);
m_instances->destroy();
m_instances = nullptr;
- assert(m_on_finish != nullptr);
+ ceph_assert(m_on_finish != nullptr);
std::swap(m_on_finish, on_finish);
} else {
Mutex::Locker locker(m_lock);
void LeaderWatcher<I>::shut_down_instances() {
dout(10) << dendl;
- assert(m_lock.is_locked());
- assert(m_instances != nullptr);
+ ceph_assert(m_lock.is_locked());
+ ceph_assert(m_instances != nullptr);
Context *ctx = create_async_context_callback(
m_work_queue, create_context_callback<LeaderWatcher<I>,
template <typename I>
void LeaderWatcher<I>::handle_shut_down_instances(int r) {
dout(10) << "r=" << r << dendl;
- assert(r == 0);
+ ceph_assert(r == 0);
Context *on_finish = nullptr;
{
m_instances->destroy();
m_instances = nullptr;
- assert(m_on_finish != nullptr);
+ ceph_assert(m_on_finish != nullptr);
std::swap(m_on_finish, on_finish);
}
on_finish->complete(r);
void LeaderWatcher<I>::notify_listener() {
dout(10) << dendl;
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
Context *ctx = create_async_context_callback(
m_work_queue, create_context_callback<
void LeaderWatcher<I>::notify_lock_acquired() {
dout(10) << dendl;
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
Context *ctx = create_context_callback<
LeaderWatcher<I>, &LeaderWatcher<I>::handle_notify_lock_acquired>(this);
m_ret_val = r;
}
- assert(m_on_finish != nullptr);
+ ceph_assert(m_on_finish != nullptr);
std::swap(m_on_finish, on_finish);
// listener should be ready for instance add/remove events now
void LeaderWatcher<I>::notify_lock_released() {
dout(10) << dendl;
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
Context *ctx = create_context_callback<
LeaderWatcher<I>, &LeaderWatcher<I>::handle_notify_lock_released>(this);
<< dendl;
}
- assert(m_on_finish != nullptr);
+ ceph_assert(m_on_finish != nullptr);
std::swap(m_on_finish, on_finish);
}
on_finish->complete(r);
void LeaderWatcher<I>::notify_heartbeat() {
dout(10) << dendl;
- assert(m_threads->timer_lock.is_locked());
- assert(m_lock.is_locked());
- assert(!m_timer_op_tracker.empty());
+ ceph_assert(m_threads->timer_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
+ ceph_assert(!m_timer_op_tracker.empty());
if (!is_leader(m_lock)) {
dout(5) << "not leader, canceling" << dendl;
Mutex::Locker timer_locker(m_threads->timer_lock);
Mutex::Locker locker(m_lock);
- assert(!m_timer_op_tracker.empty());
+ ceph_assert(!m_timer_op_tracker.empty());
m_timer_op_tracker.finish_op();
if (m_leader_lock->is_shutdown()) {
bool call(std::string_view command, const cmdmap_t& cmdmap,
std::string_view format, bufferlist& out) override {
Commands::const_iterator i = commands.find(command);
- assert(i != commands.end());
+ ceph_assert(i != commands.end());
Formatter *f = Formatter::create(format);
stringstream ss;
bool r = i->second->call(f, &ss);
void Mirror::update_pool_replayers(const PoolPeers &pool_peers)
{
dout(20) << "enter" << dendl;
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
// remove stale pool replayers before creating new pool replayers
for (auto it = m_pool_replayers.begin(); it != m_pool_replayers.end();) {
librados::AioCompletion *aio_comp = create_rados_callback(on_finish);
int r = m_ioctx.aio_operate(RBD_MIRRORING, aio_comp, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
aio_comp->release();
}
bool call(std::string_view command, const cmdmap_t& cmdmap,
std::string_view format, bufferlist& out) override {
auto i = commands.find(command);
- assert(i != commands.end());
+ ceph_assert(i != commands.end());
Formatter *f = Formatter::create(format);
stringstream ss;
bool r = i->second->call(f, &ss);
template <typename I>
void PoolReplayer<I>::init()
{
- assert(!m_pool_replayer_thread.is_started());
+ ceph_assert(!m_pool_replayer_thread.is_started());
// reset state
m_stopping = false;
m_instance_watcher.reset();
m_instance_replayer.reset();
- assert(!m_image_map);
- assert(!m_image_deleter);
- assert(!m_local_pool_watcher);
- assert(!m_remote_pool_watcher);
+ ceph_assert(!m_image_map);
+ ceph_assert(!m_image_deleter);
+ ceph_assert(!m_local_pool_watcher);
+ ceph_assert(!m_remote_pool_watcher);
m_local_rados.reset();
m_remote_rados.reset();
}
cct->_conf.complain_about_parse_errors(cct);
r = (*rados_ref)->init_with_context(cct);
- assert(r == 0);
+ ceph_assert(r == 0);
cct->put();
r = (*rados_ref)->connect();
dout(5) << dendl;
Mutex::Locker locker(m_lock);
- assert(!m_image_map);
+ ceph_assert(!m_image_map);
m_image_map.reset(ImageMap<I>::create(m_local_io_ctx, m_threads,
m_instance_watcher->get_instance_id(),
m_image_map_listener));
dout(10) << dendl;
Mutex::Locker locker(m_lock);
- assert(!m_local_pool_watcher);
+ ceph_assert(!m_local_pool_watcher);
m_local_pool_watcher.reset(PoolWatcher<I>::create(
m_threads, m_local_io_ctx, m_local_pool_watcher_listener));
dout(10) << dendl;
Mutex::Locker locker(m_lock);
- assert(!m_remote_pool_watcher);
+ ceph_assert(!m_remote_pool_watcher);
m_remote_pool_watcher.reset(PoolWatcher<I>::create(
m_threads, m_remote_io_ctx, m_remote_pool_watcher_listener));
dout(10) << dendl;
Mutex::Locker locker(m_lock);
- assert(!m_image_deleter);
+ ceph_assert(!m_image_deleter);
on_finish = new FunctionContext([this, on_finish](int r) {
handle_init_image_deleter(r, on_finish);
{
Mutex::Locker locker(m_lock);
- assert(m_image_deleter);
+ ceph_assert(m_image_deleter);
m_image_deleter.reset();
}
{
Mutex::Locker locker(m_lock);
- assert(m_local_pool_watcher);
+ ceph_assert(m_local_pool_watcher);
m_local_pool_watcher.reset();
if (m_remote_pool_watcher) {
template <typename I>
void PoolReplayer<I>::handle_wait_for_update_ops(int r, Context *on_finish) {
dout(10) << "r=" << r << dendl;
- assert(r == 0);
+ ceph_assert(r == 0);
shut_down_image_map(on_finish);
}
}
Mutex::Locker locker(m_lock);
- assert(m_image_map);
+ ceph_assert(m_image_map);
m_image_map.reset();
m_instance_replayer->release_all(on_finish);
const std::string &global_image_id,
const std::string &instance_id,
Context* on_finish) {
- assert(!mirror_uuid.empty());
+ ceph_assert(!mirror_uuid.empty());
dout(5) << "mirror_uuid=" << mirror_uuid << ", "
<< "global_image_id=" << global_image_id << ", "
<< "instance_id=" << instance_id << dendl;
return;
}
- assert(m_image_map);
+ ceph_assert(m_image_map);
m_image_map->update_instances_added(instance_ids);
}
return;
}
- assert(m_image_map);
+ ceph_assert(m_image_map);
m_image_map->update_instances_removed(instance_ids);
}
Mutex::Locker locker(m_lock);
m_on_init_finish = on_finish;
- assert(!m_refresh_in_progress);
+ ceph_assert(!m_refresh_in_progress);
m_refresh_in_progress = true;
}
Mutex::Locker timer_locker(m_threads->timer_lock);
Mutex::Locker locker(m_lock);
- assert(!m_shutting_down);
+ ceph_assert(!m_shutting_down);
m_shutting_down = true;
if (m_timer_ctx != nullptr) {
m_threads->timer->cancel_event(m_timer_ctx);
void PoolWatcher<I>::register_watcher() {
{
Mutex::Locker locker(m_lock);
- assert(m_image_ids_invalid);
- assert(m_refresh_in_progress);
+ ceph_assert(m_image_ids_invalid);
+ ceph_assert(m_refresh_in_progress);
}
// if the watch registration is in-flight, let the watcher
{
Mutex::Locker locker(m_lock);
- assert(m_image_ids_invalid);
- assert(m_refresh_in_progress);
+ ceph_assert(m_image_ids_invalid);
+ ceph_assert(m_refresh_in_progress);
if (r < 0) {
m_refresh_in_progress = false;
}
{
Mutex::Locker locker(m_lock);
- assert(m_image_ids_invalid);
- assert(m_refresh_in_progress);
+ ceph_assert(m_image_ids_invalid);
+ ceph_assert(m_refresh_in_progress);
// clear all pending notification events since we need to perform
// a full image list refresh
Context *on_init_finish = nullptr;
{
Mutex::Locker locker(m_lock);
- assert(m_image_ids_invalid);
- assert(m_refresh_in_progress);
+ ceph_assert(m_image_ids_invalid);
+ ceph_assert(m_refresh_in_progress);
if (r >= 0) {
m_pending_image_ids = std::move(m_refresh_image_ids);
m_async_op_tracker.finish_op();
if (on_init_finish != nullptr) {
- assert(r == -EBLACKLISTED);
+ ceph_assert(r == -EBLACKLISTED);
on_init_finish->complete(r);
}
}
librados::AioCompletion *aio_comp = create_rados_callback<
PoolWatcher, &PoolWatcher<I>::handle_get_mirror_uuid>(this);
int r = m_remote_io_ctx.aio_operate(RBD_MIRRORING, aio_comp, &op, &m_out_bl);
- assert(r == 0);
+ ceph_assert(r == 0);
aio_comp->release();
}
Context *on_init_finish = nullptr;
{
Mutex::Locker locker(m_lock);
- assert(m_image_ids_invalid);
- assert(m_refresh_in_progress);
+ ceph_assert(m_image_ids_invalid);
+ ceph_assert(m_refresh_in_progress);
m_refresh_in_progress = false;
m_pending_mirror_uuid = "";
template <typename I>
void PoolWatcher<I>::process_refresh_images() {
- assert(m_threads->timer_lock.is_locked());
- assert(m_timer_ctx != nullptr);
+ ceph_assert(m_threads->timer_lock.is_locked());
+ ceph_assert(m_timer_ctx != nullptr);
m_timer_ctx = nullptr;
{
Mutex::Locker locker(m_lock);
- assert(!m_refresh_in_progress);
+ ceph_assert(!m_refresh_in_progress);
m_refresh_in_progress = true;
m_deferred_refresh = false;
}
template <typename I>
void PoolWatcher<I>::schedule_listener() {
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
m_pending_updates = true;
if (m_shutting_down || m_image_ids_invalid || m_notify_listener_in_progress) {
return;
ImageIds removed_image_ids;
{
Mutex::Locker locker(m_lock);
- assert(m_notify_listener_in_progress);
+ ceph_assert(m_notify_listener_in_progress);
// if the mirror uuid is updated, treat it as the removal of all
// images in the pool
{
Mutex::Locker locker(m_lock);
- assert(m_notify_listener_in_progress);
+ ceph_assert(m_notify_listener_in_progress);
// if the watch failed while we didn't own the lock, we are going
// to need to perform a full refresh
template <typename I>
void ServiceDaemon<I>::update_status() {
dout(20) << dendl;
- assert(m_threads->timer_lock.is_locked());
+ ceph_assert(m_threads->timer_lock.is_locked());
ceph::JSONFormatter f;
{
RemoveRequest<I>, &RemoveRequest<I>::handle_get_snap_context>(this);
m_out_bl.clear();
int r = m_io_ctx.aio_operate(header_oid, aio_comp, &op, &m_out_bl);
- assert(r == 0);
+ ceph_assert(r == 0);
aio_comp->release();
}
&TrashMoveRequest<I>::handle_get_mirror_image_id>(this);
m_out_bl.clear();
int r = m_io_ctx.aio_operate(RBD_MIRRORING, aio_comp, &op, &m_out_bl);
- assert(r == 0);
+ ceph_assert(r == 0);
aio_comp->release();
}
TrashMoveRequest<I>,
&TrashMoveRequest<I>::handle_disable_mirror_image>(this);
int r = m_io_ctx.aio_operate(RBD_MIRRORING, aio_comp, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
aio_comp->release();
}
TrashMoveRequest<I>,
&TrashMoveRequest<I>::handle_remove_mirror_image>(this);
int r = m_io_ctx.aio_operate(RBD_MIRRORING, aio_comp, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
aio_comp->release();
}
Mutex::Locker locker(m_lock);
m_on_init_finish = on_finish;
- assert(!m_trash_list_in_progress);
+ ceph_assert(!m_trash_list_in_progress);
m_trash_list_in_progress = true;
}
Mutex::Locker timer_locker(m_threads->timer_lock);
Mutex::Locker locker(m_lock);
- assert(!m_shutting_down);
+ ceph_assert(!m_shutting_down);
m_shutting_down = true;
if (m_timer_ctx != nullptr) {
m_threads->timer->cancel_event(m_timer_ctx);
dout(20) << dendl;
{
Mutex::Locker locker(m_lock);
- assert(m_trash_list_in_progress);
+ ceph_assert(m_trash_list_in_progress);
}
librados::ObjectWriteOperation op;
auto aio_comp = create_rados_callback<
TrashWatcher<I>, &TrashWatcher<I>::handle_create_trash>(this);
int r = m_io_ctx.aio_operate(RBD_TRASH, aio_comp, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
aio_comp->release();
}
dout(20) << "r=" << r << dendl;
{
Mutex::Locker locker(m_lock);
- assert(m_trash_list_in_progress);
+ ceph_assert(m_trash_list_in_progress);
}
if (r < 0 && r != -EEXIST) {
void TrashWatcher<I>::register_watcher() {
{
Mutex::Locker locker(m_lock);
- assert(m_trash_list_in_progress);
+ ceph_assert(m_trash_list_in_progress);
}
// if the watch registration is in-flight, let the watcher
{
Mutex::Locker locker(m_lock);
- assert(m_trash_list_in_progress);
+ ceph_assert(m_trash_list_in_progress);
if (r < 0) {
m_trash_list_in_progress = false;
}
{
Mutex::Locker locker(m_lock);
- assert(m_trash_list_in_progress);
+ ceph_assert(m_trash_list_in_progress);
}
librados::ObjectReadOperation op;
TrashWatcher<I>, &TrashWatcher<I>::handle_trash_list>(this);
m_out_bl.clear();
int r = m_io_ctx.aio_operate(RBD_TRASH, aio_comp, &op, &m_out_bl);
- assert(r == 0);
+ ceph_assert(r == 0);
aio_comp->release();
}
Context *on_init_finish = nullptr;
{
Mutex::Locker locker(m_lock);
- assert(m_trash_list_in_progress);
+ ceph_assert(m_trash_list_in_progress);
if (r >= 0) {
for (auto& image : images) {
add_image(image.first, image.second);
void TrashWatcher<I>::process_trash_list() {
dout(5) << dendl;
- assert(m_threads->timer_lock.is_locked());
- assert(m_timer_ctx != nullptr);
+ ceph_assert(m_threads->timer_lock.is_locked());
+ ceph_assert(m_timer_ctx != nullptr);
m_timer_ctx = nullptr;
{
Mutex::Locker locker(m_lock);
- assert(!m_trash_list_in_progress);
+ ceph_assert(!m_trash_list_in_progress);
m_trash_list_in_progress = true;
}
return;
}
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
auto& deferment_end_time = spec.deferment_end_time;
dout(10) << "image_id=" << image_id << ", "
<< "deferment_end_time=" << deferment_end_time << dendl;
m_out_bl.clear();
int r = m_ioctx.aio_operate(RBD_MIRROR_LEADER, aio_comp, &op, &m_out_bl);
- assert(r == 0);
+ ceph_assert(r == 0);
aio_comp->release();
}
RWLock::WLocker map_lock(m_map_lock);
for (auto& it : image_mapping) {
- assert(!it.second.instance_id.empty());
+ ceph_assert(!it.second.instance_id.empty());
auto map_result = m_map[it.second.instance_id].emplace(it.first);
- assert(map_result.second);
+ ceph_assert(map_result.second);
auto image_state_result = m_image_states.emplace(
it.first, ImageState{it.second.instance_id, it.second.mapped_time});
- assert(image_state_result.second);
+ ceph_assert(image_state_result.second);
// ensure we (re)send image acquire actions to the instance
auto& image_state = image_state_result.first->second;
auto start_action = set_state(&image_state,
StateTransition::STATE_INITIALIZING, false);
- assert(start_action);
+ ceph_assert(start_action);
}
}
RWLock::WLocker map_lock(m_map_lock);
for (auto& instance : instance_ids) {
- assert(!instance.empty());
+ ceph_assert(!instance.empty());
m_map.emplace(instance, std::set<std::string>{});
}
<< "]" << dendl;
for (auto& global_image_id : shuffle_global_image_ids) {
auto it = m_image_states.find(global_image_id);
- assert(it != m_image_states.end());
+ ceph_assert(it != m_image_states.end());
auto& image_state = it->second;
if (set_state(&image_state, StateTransition::STATE_SHUFFLING, false)) {
void Policy::remove_instances(const RWLock& lock,
const InstanceIds &instance_ids,
GlobalImageIds* global_image_ids) {
- assert(m_map_lock.is_wlocked());
+ ceph_assert(m_map_lock.is_wlocked());
dout(5) << "instance_ids=" << instance_ids << dendl;
for (auto& instance_id : instance_ids) {
<< "global_image_ids=[" << instance_global_image_ids << "]"<< dendl;
for (auto& global_image_id : instance_global_image_ids) {
auto it = m_image_states.find(global_image_id);
- assert(it != m_image_states.end());
+ ceph_assert(it != m_image_states.end());
auto& image_state = it->second;
if (is_state_scheduled(image_state,
RWLock::WLocker map_lock(m_map_lock);
auto it = m_image_states.find(global_image_id);
- assert(it != m_image_states.end());
+ ceph_assert(it != m_image_states.end());
auto& image_state = it->second;
auto& transition = image_state.transition;
- assert(transition.action_type != ACTION_TYPE_NONE);
+ ceph_assert(transition.action_type != ACTION_TYPE_NONE);
dout(5) << "global_image_id=" << global_image_id << ", "
<< "state=" << image_state.state << ", "
RWLock::WLocker map_lock(m_map_lock);
auto it = m_image_states.find(global_image_id);
- assert(it != m_image_states.end());
+ ceph_assert(it != m_image_states.end());
auto& image_state = it->second;
auto& transition = image_state.transition;
StateTransition::transit(image_state.state, &image_state.transition);
if (transition.finish_state) {
// in-progress state machine complete
- assert(StateTransition::is_idle(*transition.finish_state));
+ ceph_assert(StateTransition::is_idle(*transition.finish_state));
image_state.state = *transition.finish_state;
image_state.transition = {};
}
if (StateTransition::is_idle(image_state.state) && image_state.next_state) {
// advance to pending state machine
bool start_action = set_state(&image_state, *image_state.next_state, false);
- assert(start_action);
+ ceph_assert(start_action);
}
// image state may get purged in execute_policy_action()
break;
case StateTransition::POLICY_ACTION_REMOVE:
if (image_state->state == StateTransition::STATE_UNASSOCIATED) {
- assert(image_state->instance_id == UNMAPPED_INSTANCE_ID);
- assert(!image_state->next_state);
+ ceph_assert(image_state->instance_id == UNMAPPED_INSTANCE_ID);
+ ceph_assert(!image_state->next_state);
m_image_states.erase(global_image_id);
}
break;
}
void Policy::map(const std::string& global_image_id, ImageState* image_state) {
- assert(m_map_lock.is_wlocked());
+ ceph_assert(m_map_lock.is_wlocked());
std::string instance_id = image_state->instance_id;
if (instance_id != UNMAPPED_INSTANCE_ID && !is_dead_instance(instance_id)) {
}
instance_id = do_map(m_map, global_image_id);
- assert(!instance_id.empty());
+ ceph_assert(!instance_id.empty());
dout(5) << "global_image_id=" << global_image_id << ", "
<< "instance_id=" << instance_id << dendl;
image_state->mapped_time = ceph_clock_now();
auto ins = m_map[instance_id].emplace(global_image_id);
- assert(ins.second);
+ ceph_assert(ins.second);
}
void Policy::unmap(const std::string &global_image_id,
ImageState* image_state) {
- assert(m_map_lock.is_wlocked());
+ ceph_assert(m_map_lock.is_wlocked());
std::string instance_id = image_state->instance_id;
if (instance_id == UNMAPPED_INSTANCE_ID) {
dout(5) << "global_image_id=" << global_image_id << ", "
<< "instance_id=" << instance_id << dendl;
- assert(!instance_id.empty());
+ ceph_assert(!instance_id.empty());
m_map[instance_id].erase(global_image_id);
image_state->instance_id = UNMAPPED_INSTANCE_ID;
image_state->mapped_time = {};
}
bool Policy::is_image_shuffling(const std::string &global_image_id) {
- assert(m_map_lock.is_locked());
+ ceph_assert(m_map_lock.is_locked());
auto it = m_image_states.find(global_image_id);
- assert(it != m_image_states.end());
+ ceph_assert(it != m_image_states.end());
auto& image_state = it->second;
// avoid attempting to re-shuffle a pending shuffle
}
bool Policy::can_shuffle_image(const std::string &global_image_id) {
- assert(m_map_lock.is_locked());
+ ceph_assert(m_map_lock.is_locked());
CephContext *cct = reinterpret_cast<CephContext *>(m_ioctx.cct());
int migration_throttle = cct->_conf.get_val<int64_t>(
"rbd_mirror_image_policy_migration_throttle");
auto it = m_image_states.find(global_image_id);
- assert(it != m_image_states.end());
+ ceph_assert(it != m_image_states.end());
auto& image_state = it->second;
utime_t last_shuffled_time = image_state.mapped_time;
image_state->next_state = boost::none;
StateTransition::transit(image_state->state, &image_state->transition);
- assert(image_state->transition.action_type != ACTION_TYPE_NONE);
- assert(!image_state->transition.finish_state);
+ ceph_assert(image_state->transition.action_type != ACTION_TYPE_NONE);
+ ceph_assert(!image_state->transition.finish_state);
return true;
}
typedef std::map<std::string, std::set<std::string> > InstanceToImageMap;
bool is_dead_instance(const std::string instance_id) {
- assert(m_map_lock.is_locked());
+ ceph_assert(m_map_lock.is_locked());
return m_dead_instances.find(instance_id) != m_dead_instances.end();
}
++nr_instances;
}
}
- assert(nr_instances > 0);
+ ceph_assert(nr_instances > 0);
size_t images_per_instance = image_count / nr_instances;
if (images_per_instance == 0) {
const std::string &global_image_id) {
auto min_it = map.end();
for (auto it = map.begin(); it != map.end(); ++it) {
- assert(it->second.find(global_image_id) == it->second.end());
+ ceph_assert(it->second.find(global_image_id) == it->second.end());
if (Policy::is_dead_instance(it->first)) {
continue;
} else if (min_it == map.end()) {
}
}
- assert(min_it != map.end());
+ ceph_assert(min_it != map.end());
dout(20) << "global_image_id=" << global_image_id << " maps to instance_id="
<< min_it->first << dendl;
return min_it->first;
void StateTransition::transit(State state, Transition* transition) {
auto it = s_transition_table.find({state, transition->action_type});
- assert(it != s_transition_table.end());
+ ceph_assert(it != s_transition_table.end());
*transition = it->second;
}
}
void encode(bufferlist& bl) const {
- assert(false);
+ ceph_assert(false);
}
void decode(__u8 version, bufferlist::const_iterator& it) {
librados::AioCompletion *aio_comp = create_rados_callback<
UpdateRequest, &UpdateRequest::handle_update_image_map>(this);
int r = m_ioctx.aio_operate(RBD_MIRROR_LEADER, aio_comp, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
aio_comp->release();
}
template <typename I>
BootstrapRequest<I>::~BootstrapRequest() {
- assert(m_remote_image_ctx == nullptr);
+ ceph_assert(m_remote_image_ctx == nullptr);
}
template <typename I>
if (r < 0) {
derr << ": failed to open remote image: " << cpp_strerror(r) << dendl;
- assert(m_remote_image_ctx == nullptr);
+ ceph_assert(m_remote_image_ctx == nullptr);
finish(r);
return;
}
dout(20) << ": r=" << r << dendl;
if (r == -ENOENT) {
- assert(*m_local_image_ctx == nullptr);
+ ceph_assert(*m_local_image_ctx == nullptr);
dout(10) << ": local image missing" << dendl;
unregister_client();
return;
} else if (r == -EREMOTEIO) {
- assert(*m_local_image_ctx == nullptr);
+ ceph_assert(*m_local_image_ctx == nullptr);
dout(10) << "local image is primary -- skipping image replay" << dendl;
m_ret_val = r;
close_remote_image();
return;
} else if (r < 0) {
- assert(*m_local_image_ctx == nullptr);
+ ceph_assert(*m_local_image_ctx == nullptr);
derr << ": failed to open local image: " << cpp_strerror(r) << dendl;
m_ret_val = r;
close_remote_image();
update_progress("REGISTER_CLIENT");
- assert(m_local_image_id.empty());
+ ceph_assert(m_local_image_id.empty());
librbd::journal::MirrorPeerClientMeta mirror_peer_client_meta;
mirror_peer_client_meta.state = librbd::journal::MIRROR_PEER_STATE_REPLAYING;
dout(20) << dendl;
update_progress("UPDATE_CLIENT_IMAGE");
- assert(m_local_image_id.empty());
+ ceph_assert(m_local_image_id.empty());
m_local_image_id = librbd::util::generate_image_id<I>(m_local_io_ctx);
librbd::journal::MirrorPeerClientMeta client_meta{m_local_image_id};
if (m_canceled) {
m_ret_val = -ECANCELED;
} else {
- assert(m_image_sync == nullptr);
+ ceph_assert(m_image_sync == nullptr);
Context *ctx = create_context_callback<
BootstrapRequest<I>, &BootstrapRequest<I>::handle_image_sync>(this);
m_out_bl.clear();
int r = m_remote_parent_io_ctx.aio_operate(RBD_MIRRORING, aio_comp, &op,
&m_out_bl);
- assert(r == 0);
+ ceph_assert(r == 0);
aio_comp->release();
}
m_out_bl.clear();
int r = m_local_parent_io_ctx.aio_operate(RBD_MIRRORING, aio_comp, &op,
&m_out_bl);
- assert(r == 0);
+ ceph_assert(r == 0);
aio_comp->release();
}
template <typename I>
EventPreprocessor<I>::~EventPreprocessor() {
- assert(!m_in_progress);
+ ceph_assert(!m_in_progress);
}
template <typename I>
template <typename I>
void EventPreprocessor<I>::preprocess(EventEntry *event_entry,
Context *on_finish) {
- assert(!m_in_progress);
+ ceph_assert(!m_in_progress);
m_in_progress = true;
m_event_entry = event_entry;
m_on_finish = on_finish;
&GetMirrorImageIdRequest<I>::handle_get_image_id>(
this);
int r = m_io_ctx.aio_operate(RBD_MIRRORING, aio_comp, &op, &m_out_bl);
- assert(r == 0);
+ ceph_assert(r == 0);
aio_comp->release();
}
IsPrimaryRequest<I>, &IsPrimaryRequest<I>::handle_get_mirror_state>(this);
int r = m_image_ctx->md_ctx.aio_operate(RBD_MIRRORING, aio_comp, &op,
&m_out_bl);
- assert(r == 0);
+ ceph_assert(r == 0);
aio_comp->release();
}
void OpenLocalImageRequest<I>::handle_close_image(int r) {
dout(20) << dendl;
- assert(r == 0);
+ ceph_assert(r == 0);
finish(m_ret_val);
}
PrepareLocalImageRequest<I>,
&PrepareLocalImageRequest<I>::handle_get_local_image_name>(this);
int r = m_io_ctx.aio_operate(RBD_DIRECTORY, aio_comp, &op, &m_out_bl);
- assert(r == 0);
+ ceph_assert(r == 0);
aio_comp->release();
}
PrepareLocalImageRequest<I>,
&PrepareLocalImageRequest<I>::handle_get_mirror_state>(this);
int r = m_io_ctx.aio_operate(RBD_MIRRORING, aio_comp, &op, &m_out_bl);
- assert(r == 0);
+ ceph_assert(r == 0);
aio_comp->release();
}
PrepareRemoteImageRequest<I>,
&PrepareRemoteImageRequest<I>::handle_get_remote_mirror_uuid>(this);
int r = m_remote_io_ctx.aio_operate(RBD_MIRRORING, aio_comp, &op, &m_out_bl);
- assert(r == 0);
+ ceph_assert(r == 0);
aio_comp->release();
}
settings.max_fetch_bytes = g_ceph_context->_conf.get_val<Option::size_t>(
"rbd_mirror_journal_max_fetch_bytes");
- assert(*m_remote_journaler == nullptr);
+ ceph_assert(*m_remote_journaler == nullptr);
*m_remote_journaler = new Journaler(m_threads->work_queue, m_threads->timer,
&m_threads->timer_lock, m_remote_io_ctx,
*m_remote_image_id, m_local_mirror_uuid,
{
Mutex::Locker locker(m_lock);
- assert(m_on_finish == on_finish);
+ ceph_assert(m_on_finish == on_finish);
m_on_finish = nullptr;
}
std::swap(m_on_finish, on_finish);
}
- assert(on_finish);
+ ceph_assert(on_finish);
on_finish->complete(0);
return;
}
: m_remote_image_ctx(remote_image_ctx), m_mirror_uuid(mirror_uuid),
m_journaler(journaler), m_client_meta(client_meta), m_on_finish(on_finish),
m_client_meta_copy(*client_meta) {
- assert(m_client_meta->sync_points.size() < 2);
+ ceph_assert(m_client_meta->sync_points.size() < 2);
// initialize the updated client meta with the new sync point
m_client_meta_copy.sync_points.emplace_back();
void SyncPointPruneRequest<I>::handle_remove_snap(int r) {
dout(20) << ": r=" << r << dendl;
- assert(!m_snap_names.empty());
+ ceph_assert(!m_snap_names.empty());
std::string snap_name = m_snap_names.front();
m_snap_names.pop_front();
RefreshImagesRequest<I>,
&RefreshImagesRequest<I>::handle_mirror_image_list>(this);
int r = m_remote_io_ctx.aio_operate(RBD_MIRRORING, aio_comp, &op, &m_out_bl);
- assert(r == 0);
+ ceph_assert(r == 0);
aio_comp->release();
}
static void handle_signal(int signum)
{
- assert(signum == SIGINT || signum == SIGTERM);
+ ceph_assert(signum == SIGINT || signum == SIGTERM);
derr << "*** Got signal " << sig_str(signum) << " ***" << dendl;
dout(20) << __func__ << ": " << "sending NBD_DISCONNECT" << dendl;
if (ioctl(nbd, NBD_DISCONNECT) < 0) {
void io_finish(IOContext *ctx)
{
Mutex::Locker l(lock);
- assert(ctx->item.is_on_list());
+ ceph_assert(ctx->item.is_on_list());
ctx->item.remove_myself();
io_finished.push_back(&ctx->item);
cond.Signal();
void wait_clean()
{
- assert(!reader_thread.is_started());
+ ceph_assert(!reader_thread.is_started());
Mutex::Locker l(lock);
while(!io_pending.empty())
cond.Wait(lock);
}
r = image.update_unwatch(handle);
- assert(r == 0);
+ ceph_assert(r == 0);
}
close_nbd:
r = io_ctx.write(oid, bl, bl.length(), 0);
uint64_t objver = io_ctx.get_last_version();
- assert(objver > 0);
+ ceph_assert(objver > 0);
cout << "io_ctx.write returned " << r << " last_ver=" << objver << std::endl;
uint64_t stat_size;
// test assert_version
r = io_ctx.read(oid, bl, 0, 1);
- assert(r >= 0);
+ ceph_assert(r >= 0);
uint64_t v = io_ctx.get_last_version();
cout << oid << " version is " << v << std::endl;
- assert(v > 0);
+ ceph_assert(v > 0);
io_ctx.set_assert_version(v);
r = io_ctx.read(oid, bl, 0, 1);
- assert(r >= 0);
+ ceph_assert(r >= 0);
io_ctx.set_assert_version(v - 1);
r = io_ctx.read(oid, bl, 0, 1);
- assert(r == -ERANGE);
+ ceph_assert(r == -ERANGE);
io_ctx.set_assert_version(v + 1);
r = io_ctx.read(oid, bl, 0, 1);
- assert(r == -EOVERFLOW);
+ ceph_assert(r == -EOVERFLOW);
r = io_ctx.exec(oid, "crypto", "sha1", bl, bl2);
cout << "exec returned " << r << std::endl;
bufferlist val;
val.append("foo");
r = io_ctx.setxattr(oid, "foo", val);
- assert(r >= 0);
+ ceph_assert(r >= 0);
{
ObjectReadOperation o;
o.cmpxattr("foo", CEPH_OSD_CMPXATTR_OP_EQ, val);
r = io_ctx.operate(oid, &o, &bl2);
cout << " got " << r << " wanted >= 0" << std::endl;
- assert(r >= 0);
+ ceph_assert(r >= 0);
}
val.append("...");
{
o.cmpxattr("foo", CEPH_OSD_CMPXATTR_OP_EQ, val);
r = io_ctx.operate(oid, &o, &bl2);
cout << " got " << r << " wanted " << -ECANCELED << " (-ECANCELED)" << std::endl;
- assert(r == -ECANCELED);
+ ceph_assert(r == -ECANCELED);
}
io_ctx.locator_set_key(string());