Context **out_on_applied,
Context **out_on_commit,
Context **out_on_applied_sync) {
- assert(out_on_applied);
- assert(out_on_commit);
- assert(out_on_applied_sync);
+ ceph_assert(out_on_applied);
+ ceph_assert(out_on_commit);
+ ceph_assert(out_on_applied_sync);
list<Context *> on_applied, on_commit, on_applied_sync;
for (auto& i : t) {
on_applied.splice(on_applied.end(), i.on_applied);
list<Context*> *out_on_applied,
list<Context*> *out_on_commit,
list<Context*> *out_on_applied_sync) {
- assert(out_on_applied);
- assert(out_on_commit);
- assert(out_on_applied_sync);
+ ceph_assert(out_on_applied);
+ ceph_assert(out_on_commit);
+ ceph_assert(out_on_applied_sync);
for (auto& i : t) {
out_on_applied->splice(out_on_applied->end(), i.on_applied);
out_on_commit->splice(out_on_commit->end(), i.on_commit);
case OP_ZERO:
case OP_TRUNCATE:
case OP_SETALLOCHINT:
- assert(op->cid < cm.size());
- assert(op->oid < om.size());
+ ceph_assert(op->cid < cm.size());
+ ceph_assert(op->oid < om.size());
op->cid = cm[op->cid];
op->oid = om[op->oid];
break;
case OP_CLONERANGE2:
case OP_CLONE:
- assert(op->cid < cm.size());
- assert(op->oid < om.size());
- assert(op->dest_oid < om.size());
+ ceph_assert(op->cid < cm.size());
+ ceph_assert(op->oid < om.size());
+ ceph_assert(op->dest_oid < om.size());
op->cid = cm[op->cid];
op->oid = om[op->oid];
op->dest_oid = om[op->dest_oid];
case OP_COLL_SETATTRS:
case OP_COLL_HINT:
case OP_COLL_SET_BITS:
- assert(op->cid < cm.size());
+ ceph_assert(op->cid < cm.size());
op->cid = cm[op->cid];
break;
case OP_COLL_ADD:
- assert(op->cid < cm.size());
- assert(op->oid < om.size());
- assert(op->dest_cid < om.size());
+ ceph_assert(op->cid < cm.size());
+ ceph_assert(op->oid < om.size());
+ ceph_assert(op->dest_cid < om.size());
op->cid = cm[op->cid];
op->dest_cid = cm[op->dest_cid];
op->oid = om[op->oid];
break;
case OP_COLL_MOVE_RENAME:
- assert(op->cid < cm.size());
- assert(op->oid < om.size());
- assert(op->dest_cid < cm.size());
- assert(op->dest_oid < om.size());
+ ceph_assert(op->cid < cm.size());
+ ceph_assert(op->oid < om.size());
+ ceph_assert(op->dest_cid < cm.size());
+ ceph_assert(op->dest_oid < om.size());
op->cid = cm[op->cid];
op->oid = om[op->oid];
op->dest_cid = cm[op->dest_cid];
break;
case OP_TRY_RENAME:
- assert(op->cid < cm.size());
- assert(op->oid < om.size());
- assert(op->dest_oid < om.size());
+ ceph_assert(op->cid < cm.size());
+ ceph_assert(op->oid < om.size());
+ ceph_assert(op->dest_oid < om.size());
op->cid = cm[op->cid];
op->oid = om[op->oid];
op->dest_oid = om[op->dest_oid];
break;
case OP_SPLIT_COLLECTION2:
- assert(op->cid < cm.size());
- assert(op->dest_cid < cm.size());
+ ceph_assert(op->cid < cm.size());
+ ceph_assert(op->dest_cid < cm.size());
op->cid = cm[op->cid];
op->dest_cid = cm[op->dest_cid];
break;
default:
- assert(0 == "Unknown OP");
+ ceph_assert(0 == "Unknown OP");
}
}
void _update_op_bl(
std::list<bufferptr>::iterator p;
for(p = list.begin(); p != list.end(); ++p) {
- assert(p->length() % sizeof(Op) == 0);
+ ceph_assert(p->length() % sizeof(Op) == 0);
char* raw_p = p->c_str();
char* raw_end = raw_p + p->length();
return ops > 0;
}
Op* decode_op() {
- assert(ops > 0);
+ ceph_assert(ops > 0);
Op* op = reinterpret_cast<Op*>(op_buffer_p);
op_buffer_p += sizeof(Op);
}
const ghobject_t &get_oid(__le32 oid_id) {
- assert(oid_id < objects.size());
+ ceph_assert(oid_id < objects.size());
return objects[oid_id];
}
const coll_t &get_cid(__le32 cid_id) {
- assert(cid_id < colls.size());
+ ceph_assert(cid_id < colls.size());
return colls[cid_id];
}
uint32_t get_fadvise_flags() const {
_op->len = len;
encode(write_data, data_bl);
- assert(len == write_data.length());
+ ceph_assert(len == write_data.length());
data.fadvise_flags = data.fadvise_flags | flags;
if (write_data.length() > data.largest_data_len) {
data.largest_data_len = write_data.length();
const char *ldata, size_t llen,
const char *rdata, size_t rlen,
std::string *new_value) override {
- assert(llen == rlen);
+ ceph_assert(llen == rlen);
*new_value = std::string(ldata, llen);
for (size_t i = 0; i < rlen; ++i) {
(*new_value)[i] ^= rdata[i];
KeyValueDB::Transaction txn)
{
bytes_per_block = granularity;
- assert(isp2(bytes_per_block));
+ ceph_assert(isp2(bytes_per_block));
size = p2align(new_size, bytes_per_block);
blocks_per_key = cct->_conf->bluestore_freelist_blocks_per_key;
enumerate_p->lower_bound(string());
// we assert that the first block is always allocated; it's true,
// and it simplifies our lives a bit.
- assert(enumerate_p->valid());
+ ceph_assert(enumerate_p->valid());
string k = enumerate_p->key();
const char *p = k.c_str();
_key_decode_u64(p, &enumerate_offset);
enumerate_bl = enumerate_p->value();
- assert(enumerate_offset == 0);
- assert(get_next_set_bit(enumerate_bl, 0) == 0);
+ ceph_assert(enumerate_offset == 0);
+ ceph_assert(get_next_set_bit(enumerate_bl, 0) == 0);
}
if (enumerate_offset >= size) {
first_key += bytes_per_key;
}
}
- assert(first_key == last_key);
+ ceph_assert(first_key == last_key);
{
string k;
make_offset_key(first_key, &k);
}
if (errors) {
derr << __func__ << " saw " << errors << " errors" << dendl;
- assert(0 == "bitmap freelist errors");
+ ceph_assert(0 == "bitmap freelist errors");
}
}
KeyValueDB::Transaction txn)
{
// must be block aligned
- assert((offset & block_mask) == offset);
- assert((length & block_mask) == length);
+ ceph_assert((offset & block_mask) == offset);
+ ceph_assert((length & block_mask) == length);
uint64_t first_key = offset & key_mask;
uint64_t last_key = (offset + length - 1) & key_mask;
txn->merge(bitmap_prefix, k, all_set_bl);
first_key += bytes_per_key;
}
- assert(first_key == last_key);
+ ceph_assert(first_key == last_key);
{
bufferptr p(blocks_per_key >> 3);
p.zero();
void IOContext::release_running_aios()
{
- assert(!num_running);
+ ceph_assert(!num_running);
#ifdef HAVE_LIBAIO
// release aio contexts (including pinned buffers).
running_aios.clear();
std::lock_guard<std::mutex> l(lock);
cond.notify_all();
--num_running;
- assert(num_running >= 0);
+ ceph_assert(num_running >= 0);
} else {
--num_running;
}
int BlueFS::add_block_device(unsigned id, const string& path, bool trim)
{
dout(10) << __func__ << " bdev " << id << " path " << path << dendl;
- assert(id < bdev.size());
- assert(bdev[id] == NULL);
+ ceph_assert(id < bdev.size());
+ ceph_assert(bdev[id] == NULL);
BlockDevice *b = BlockDevice::create(cct, path, NULL, NULL, discard_cb[id], static_cast<void*>(this));
int r = b->open(path);
if (r < 0) {
bool BlueFS::bdev_support_label(unsigned id)
{
- assert(id < bdev.size());
- assert(bdev[id]);
+ ceph_assert(id < bdev.size());
+ ceph_assert(bdev[id]);
return bdev[id]->supported_bdev_label();
}
dout(1) << __func__ << " bdev " << id
<< " 0x" << std::hex << offset << "~" << length << std::dec
<< dendl;
- assert(id < bdev.size());
- assert(bdev[id]);
- assert(bdev[id]->get_size() >= offset + length);
+ ceph_assert(id < bdev.size());
+ ceph_assert(bdev[id]);
+ ceph_assert(bdev[id]->get_size() >= offset + length);
block_all[id].insert(offset, length);
if (id < alloc.size() && alloc[id]) {
log_t.op_alloc_add(id, offset, length);
int r = _flush_and_sync_log(l);
- assert(r == 0);
+ ceph_assert(r == 0);
alloc[id]->init_add_free(offset, length);
}
std::unique_lock<std::mutex> l(lock);
dout(1) << __func__ << " bdev " << id
<< " want 0x" << std::hex << want << std::dec << dendl;
- assert(id < alloc.size());
- assert(alloc[id]);
+ ceph_assert(id < alloc.size());
+ ceph_assert(alloc[id]);
int64_t got = alloc[id]->allocate(want, cct->_conf->bluefs_alloc_size, 0,
extents);
- assert(got != 0);
+ ceph_assert(got != 0);
if (got < 0) {
derr << __func__ << " failed to allocate space to return to bluestore"
<< dendl;
flush_bdev();
int r = _flush_and_sync_log(l);
- assert(r == 0);
+ ceph_assert(r == 0);
logger->inc(l_bluefs_reclaim_bytes, got);
dout(1) << __func__ << " bdev " << id << " want 0x" << std::hex << want
void BlueFS::handle_discard(unsigned id, interval_set<uint64_t>& to_release)
{
dout(10) << __func__ << " bdev " << id << dendl;
- assert(alloc[id]);
+ ceph_assert(alloc[id]);
alloc[id]->release(to_release);
}
uint64_t BlueFS::get_total(unsigned id)
{
std::lock_guard<std::mutex> l(lock);
- assert(id < block_all.size());
+ ceph_assert(id < block_all.size());
return block_all[id].size();
}
uint64_t BlueFS::get_free(unsigned id)
{
std::lock_guard<std::mutex> l(lock);
- assert(id < alloc.size());
+ ceph_assert(id < alloc.size());
return alloc[id]->get_free();
}
log_file->fnode.prefer_bdev,
cct->_conf->bluefs_max_log_runway,
&log_file->fnode);
- assert(r == 0);
+ ceph_assert(r == 0);
log_writer = _create_writer(log_file);
// initial txn
if (!bdev[id]) {
continue;
}
- assert(bdev[id]->get_size());
+ ceph_assert(bdev[id]->get_size());
alloc[id] = Allocator::create(cct, cct->_conf->bluefs_allocator,
bdev[id]->get_size(),
cct->_conf->bluefs_alloc_size);
// set up the log for future writes
log_writer = _create_writer(_get_file(1));
- assert(log_writer->file->fnode.ino == 1);
+ ceph_assert(log_writer->file->fnode.ino == 1);
log_writer->pos = log_writer->file->fnode.size;
dout(10) << __func__ << " log write pos set to 0x"
<< std::hex << log_writer->pos << std::dec
dout(10) << __func__ << " super block length(encoded): " << bl.length() << dendl;
dout(10) << __func__ << " superblock " << super.version << dendl;
dout(10) << __func__ << " log_fnode " << super.log_fnode << dendl;
- assert(bl.length() <= get_super_length());
+ ceph_assert(bl.length() <= get_super_length());
bl.append_zero(get_super_length() - bl.length());
bdev[BDEV_DB]->write(get_super_offset(), bl, false);
} else {
// do not use fnode from superblock in 'noop' mode - log_file's one should
// be fine and up-to-date
- assert(log_file->fnode.ino == 1);
- assert(log_file->fnode.extents.size() != 0);
+ ceph_assert(log_file->fnode.ino == 1);
+ ceph_assert(log_file->fnode.extents.size() != 0);
}
dout(10) << __func__ << " log_fnode " << super.log_fnode << dendl;
if (unlikely(to_stdout)) {
false, // !random
true); // ignore eof
while (true) {
- assert((log_reader->buf.pos & ~super.block_mask()) == 0);
+ ceph_assert((log_reader->buf.pos & ~super.block_mask()) == 0);
uint64_t pos = log_reader->buf.pos;
uint64_t read_pos = pos;
bufferlist bl;
{
int r = _read(log_reader, &log_reader->buf, read_pos, super.block_size,
&bl, NULL);
- assert(r == (int)super.block_size);
+ ceph_assert(r == (int)super.block_size);
read_pos += r;
}
uint64_t more = 0;
<< ", which is past eof" << dendl;
break;
}
- assert(r == (int)more);
+ ceph_assert(r == (int)more);
bl.claim_append(t);
read_pos += r;
}
delete log_reader;
return -EIO;
}
- assert(seq == t.seq);
+ ceph_assert(seq == t.seq);
dout(10) << __func__ << " 0x" << std::hex << pos << std::dec
<< ": " << t << dendl;
if (unlikely(to_stdout)) {
<< ": op_init" << std::endl;
}
- assert(t.seq == 1);
+ ceph_assert(t.seq == 1);
break;
case bluefs_transaction_t::OP_JUMP:
<< std::endl;
}
- assert(next_seq >= log_seq);
+ ceph_assert(next_seq >= log_seq);
log_seq = next_seq - 1; // we will increment it below
uint64_t skip = offset - read_pos;
if (skip) {
dout(10) << __func__ << " 0x" << std::hex << read_pos
<< ": stop: failed to skip to " << offset
<< std::dec << dendl;
- assert(0 == "problem with op_jump");
+ ceph_assert(0 == "problem with op_jump");
}
}
}
<< ": op_jump_seq " << next_seq << std::endl;
}
- assert(next_seq >= log_seq);
+ ceph_assert(next_seq >= log_seq);
log_seq = next_seq - 1; // we will increment it below
}
break;
if (!noop) {
FileRef file = _get_file(ino);
- assert(file->fnode.ino);
+ ceph_assert(file->fnode.ino);
map<string,DirRef>::iterator q = dir_map.find(dirname);
- assert(q != dir_map.end());
+ ceph_assert(q != dir_map.end());
map<string,FileRef>::iterator r = q->second->file_map.find(filename);
- assert(r == q->second->file_map.end());
+ ceph_assert(r == q->second->file_map.end());
q->second->file_map[filename] = file;
++file->refs;
}
if (!noop) {
map<string,DirRef>::iterator q = dir_map.find(dirname);
- assert(q != dir_map.end());
+ ceph_assert(q != dir_map.end());
map<string,FileRef>::iterator r = q->second->file_map.find(filename);
- assert(r != q->second->file_map.end());
- assert(r->second->refs > 0);
+ ceph_assert(r != q->second->file_map.end());
+ ceph_assert(r->second->refs > 0);
--r->second->refs;
q->second->file_map.erase(r);
}
if (!noop) {
map<string,DirRef>::iterator q = dir_map.find(dirname);
- assert(q == dir_map.end());
+ ceph_assert(q == dir_map.end());
dir_map[dirname] = new Dir;
}
}
if (!noop) {
map<string,DirRef>::iterator q = dir_map.find(dirname);
- assert(q != dir_map.end());
- assert(q->second->file_map.empty());
+ ceph_assert(q != dir_map.end());
+ ceph_assert(q->second->file_map.empty());
dir_map.erase(q);
}
}
if (!noop) {
auto p = file_map.find(ino);
- assert(p != file_map.end());
+ ceph_assert(p != file_map.end());
file_map.erase(p);
}
}
return -EIO;
}
}
- assert(p.end());
+ ceph_assert(p.end());
// we successfully replayed the transaction; bump the seq and log size
++log_seq;
{
dout(20) << __func__ << " had refs " << file->refs
<< " on " << file->fnode << dendl;
- assert(file->refs > 0);
+ ceph_assert(file->refs > 0);
--file->refs;
if (file->refs == 0) {
dout(20) << __func__ << " destroying " << file->fnode << dendl;
- assert(file->num_reading.load() == 0);
+ ceph_assert(file->num_reading.load() == 0);
log_t.op_file_remove(file->fnode.ino);
for (auto& r : file->fnode.extents) {
pending_release[r.bdev].insert(r.offset, r.length);
file->deleted = true;
if (file->dirty_seq) {
- assert(file->dirty_seq > log_seq_stable);
- assert(dirty_files.count(file->dirty_seq));
+ ceph_assert(file->dirty_seq > log_seq_stable);
+ ceph_assert(dirty_files.count(file->dirty_seq));
auto it = dirty_files[file->dirty_seq].iterator_to(*file);
dirty_files[file->dirty_seq].erase(it);
file->dirty_seq = 0;
<< " of " << *p << dendl;
int r = bdev[p->bdev]->read_random(p->offset + x_off, l, out,
cct->_conf->bluefs_buffered_io);
- assert(r == 0);
+ ceph_assert(r == 0);
off += l;
len -= l;
ret += l;
<< " of " << *p << dendl;
int r = bdev[p->bdev]->read(p->offset + x_off, l, &buf->bl, ioc[p->bdev],
cct->_conf->bluefs_buffered_io);
- assert(r == 0);
+ ceph_assert(r == 0);
}
left = buf->get_buf_remaining(off);
dout(20) << __func__ << " left 0x" << std::hex << left
}
dout(20) << __func__ << " got " << ret << dendl;
- assert(!outbl || (int)outbl->length() == ret);
+ ceph_assert(!outbl || (int)outbl->length() == ret);
--h->file->num_reading;
return ret;
}
if (p.first == 1)
continue;
dout(20) << __func__ << " op_file_update " << p.second->fnode << dendl;
- assert(p.first > 1);
+ ceph_assert(p.first > 1);
t->op_file_update(p.second->fnode);
}
for (auto& p : dir_map) {
uint64_t old_allocated = 0;
log_file->fnode.swap_extents(old_extents, old_allocated);
int r = _allocate(log_file->fnode.prefer_bdev, need, &log_file->fnode);
- assert(r == 0);
+ ceph_assert(r == 0);
_close_writer(log_writer);
log_writer = _create_writer(log_file);
log_writer->append(bl);
r = _flush(log_writer, true);
- assert(r == 0);
+ ceph_assert(r == 0);
#ifdef HAVE_LIBAIO
if (!cct->_conf->bluefs_sync_write) {
list<aio_t> completed_ios;
{
dout(10) << __func__ << dendl;
File *log_file = log_writer->file.get();
- assert(!new_log);
- assert(!new_log_writer);
+ ceph_assert(!new_log);
+ ceph_assert(!new_log_writer);
// create a new log [writer] so that we know compaction is in progress
// (see _should_compact_log)
<< " need 0x" << (old_log_jump_to + cct->_conf->bluefs_max_log_runway) << std::dec << dendl;
int r = _allocate(log_file->fnode.prefer_bdev,
cct->_conf->bluefs_max_log_runway, &log_file->fnode);
- assert(r == 0);
+ ceph_assert(r == 0);
dout(10) << __func__ << " log extents " << log_file->fnode.extents << dendl;
// update the log file change and log a jump to the offset where we want to
// allocate
r = _allocate(BlueFS::BDEV_DB, new_log_jump_to,
&new_log->fnode);
- assert(r == 0);
+ ceph_assert(r == 0);
new_log_writer = _create_writer(new_log);
new_log_writer->append(bl);
// 3. flush
r = _flush(new_log_writer, true);
- assert(r == 0);
+ ceph_assert(r == 0);
// 4. wait
_flush_bdev_safely(new_log_writer);
uint64_t discarded = 0;
mempool::bluefs::vector<bluefs_extent_t> old_extents;
while (discarded < old_log_jump_to) {
- assert(!log_file->fnode.extents.empty());
+ ceph_assert(!log_file->fnode.extents.empty());
bluefs_extent_t& e = log_file->fnode.extents.front();
bluefs_extent_t temp = e;
if (discarded + e.length <= old_log_jump_to) {
// delete the new log, remove from the dirty files list
_close_writer(new_log_writer);
if (new_log->dirty_seq) {
- assert(dirty_files.count(new_log->dirty_seq));
+ ceph_assert(dirty_files.count(new_log->dirty_seq));
auto it = dirty_files[new_log->dirty_seq].iterator_to(*new_log);
dirty_files[new_log->dirty_seq].erase(it);
}
while (log_flushing) {
dout(10) << __func__ << " want_seq " << want_seq
<< " log is currently flushing, waiting" << dendl;
- assert(!jump_to);
+ ceph_assert(!jump_to);
log_cond.wait(l);
}
if (want_seq && want_seq <= log_seq_stable) {
dout(10) << __func__ << " want_seq " << want_seq << " <= log_seq_stable "
<< log_seq_stable << ", done" << dendl;
- assert(!jump_to);
+ ceph_assert(!jump_to);
return 0;
}
if (log_t.empty() && dirty_files.empty()) {
dout(10) << __func__ << " want_seq " << want_seq
<< " " << log_t << " not dirty, dirty_files empty, no-op" << dendl;
- assert(!jump_to);
+ ceph_assert(!jump_to);
return 0;
}
to_release.swap(pending_release);
uint64_t seq = log_t.seq = ++log_seq;
- assert(want_seq == 0 || want_seq <= seq);
+ ceph_assert(want_seq == 0 || want_seq <= seq);
log_t.uuid = super.uuid;
// log dirty files
}
dout(10) << __func__ << " " << log_t << dendl;
- assert(!log_t.empty());
+ ceph_assert(!log_t.empty());
// allocate some more space (before we run out)?
int64_t runway = log_writer->file->fnode.get_allocated() -
int r = _allocate(log_writer->file->fnode.prefer_bdev,
cct->_conf->bluefs_max_log_runway,
&log_writer->file->fnode);
- assert(r == 0);
+ ceph_assert(r == 0);
log_t.op_file_update(log_writer->file->fnode);
}
log_flushing = true;
int r = _flush(log_writer, true);
- assert(r == 0);
+ ceph_assert(r == 0);
if (jump_to) {
dout(10) << __func__ << " jumping log offset from 0x" << std::hex
auto l = p->second.begin();
while (l != p->second.end()) {
File *file = &*l;
- assert(file->dirty_seq > 0);
- assert(file->dirty_seq <= log_seq_stable);
+ ceph_assert(file->dirty_seq > 0);
+ ceph_assert(file->dirty_seq <= log_seq_stable);
dout(20) << __func__ << " cleaned file " << file->fnode << dendl;
file->dirty_seq = 0;
p->second.erase(l++);
}
- assert(p->second.empty());
+ ceph_assert(p->second.empty());
dirty_files.erase(p++);
}
} else {
dout(10) << __func__ << " " << h << " pos 0x" << std::hex << h->pos
<< " 0x" << offset << "~" << length << std::dec
<< " to " << h->file->fnode << dendl;
- assert(!h->file->deleted);
- assert(h->file->num_readers.load() == 0);
+ ceph_assert(!h->file->deleted);
+ ceph_assert(h->file->num_readers.load() == 0);
h->buffer_appender.flush();
<< std::hex << offset << "~" << length << std::dec
<< dendl;
}
- assert(offset <= h->file->fnode.size);
+ ceph_assert(offset <= h->file->fnode.size);
uint64_t allocated = h->file->fnode.get_allocated();
if (allocated < offset + length) {
// we should never run out of log space here; see the min runway check
// in _flush_and_sync_log.
- assert(h->file->fnode.ino != 1);
+ ceph_assert(h->file->fnode.ino != 1);
int r = _allocate(h->file->fnode.prefer_bdev,
offset + length - allocated,
&h->file->fnode);
derr << __func__ << " allocated: 0x" << std::hex << allocated
<< " offset: 0x" << offset << " length: 0x" << length << std::dec
<< dendl;
- assert(0 == "bluefs enospc");
+ ceph_assert(0 == "bluefs enospc");
return r;
}
if (cct->_conf->bluefs_preextend_wal_files &&
}
if (must_dirty) {
h->file->fnode.mtime = ceph_clock_now();
- assert(h->file->fnode.ino >= 1);
+ ceph_assert(h->file->fnode.ino >= 1);
if (h->file->dirty_seq == 0) {
h->file->dirty_seq = log_seq + 1;
dirty_files[h->file->dirty_seq].push_back(*h->file);
} else {
if (h->file->dirty_seq != log_seq + 1) {
// need re-dirty, erase from list first
- assert(dirty_files.count(h->file->dirty_seq));
+ ceph_assert(dirty_files.count(h->file->dirty_seq));
auto it = dirty_files[h->file->dirty_seq].iterator_to(*h->file);
dirty_files[h->file->dirty_seq].erase(it);
h->file->dirty_seq = log_seq + 1;
uint64_t x_off = 0;
auto p = h->file->fnode.seek(offset, &x_off);
- assert(p != h->file->fnode.extents.end());
+ ceph_assert(p != h->file->fnode.extents.end());
dout(20) << __func__ << " in " << *p << " x_off 0x"
<< std::hex << x_off << std::dec << dendl;
if (partial) {
dout(20) << __func__ << " using partial tail 0x"
<< std::hex << partial << std::dec << dendl;
- assert(h->tail_block.length() == partial);
+ ceph_assert(h->tail_block.length() == partial);
bl.claim_append_piecewise(h->tail_block);
x_off -= partial;
offset -= partial;
dout(20) << " leaving 0x" << std::hex << h->buffer.length() << std::dec
<< " unflushed" << dendl;
}
- assert(bl.length() == length);
+ ceph_assert(bl.length() == length);
switch (h->writer_type) {
case WRITER_WAL:
const bufferptr &last = t.back();
if (last.unused_tail_length() < zlen) {
derr << " wtf, last is " << last << " from " << t << dendl;
- assert(last.unused_tail_length() >= zlen);
+ ceph_assert(last.unused_tail_length() >= zlen);
}
bufferptr z = last;
z.set_offset(last.offset() + last.length());
logger->inc(l_bluefs_bytes_written_slow, bytes_written_slow);
for (unsigned i = 0; i < MAX_BDEV; ++i) {
if (bdev[i]) {
- assert(h->iocv[i]);
+ ceph_assert(h->iocv[i]);
if (h->iocv[i]->has_pending_aios()) {
bdev[i]->aio_submit(h->iocv[i]);
}
dout(10) << __func__ << " " << h << " 0x"
<< std::hex << offset << "~" << length << std::dec
<< " to " << h->file->fnode << dendl;
- assert(h->pos <= h->file->fnode.size);
+ ceph_assert(h->pos <= h->file->fnode.size);
return _flush_range(h, offset, length);
}
}
// we never truncate internal log files
- assert(h->file->fnode.ino > 1);
+ ceph_assert(h->file->fnode.ino > 1);
h->buffer_appender.flush();
<< " unflushed bytes" << dendl;
t.substr_of(h->buffer, 0, offset - h->pos);
h->buffer.swap(t);
- assert(0 == "actually this shouldn't happen");
+ ceph_assert(0 == "actually this shouldn't happen");
}
if (h->buffer.length()) {
int r = _flush(h, true);
return 0; // no-op!
}
if (offset > h->file->fnode.size) {
- assert(0 == "truncate up not supported");
+ ceph_assert(0 == "truncate up not supported");
}
- assert(h->file->fnode.size >= offset);
+ ceph_assert(h->file->fnode.size >= offset);
h->file->fnode.size = offset;
log_t.op_file_update(h->file->fnode);
return 0;
dout(20) << __func__ << " file metadata was dirty (" << old_dirty_seq
<< ") on " << h->file->fnode << ", flushing log" << dendl;
_flush_and_sync_log(l, old_dirty_seq);
- assert(h->file->dirty_seq == 0 || // cleaned
+ ceph_assert(h->file->dirty_seq == 0 || // cleaned
h->file->dirty_seq > s); // or redirtied by someone else
}
return 0;
{
dout(10) << __func__ << " len 0x" << std::hex << len << std::dec
<< " from " << (int)id << dendl;
- assert(id < alloc.size());
+ ceph_assert(id < alloc.size());
uint64_t min_alloc_size = cct->_conf->bluefs_alloc_size;
uint64_t left = round_up_to(len, min_alloc_size);
dout(10) << __func__ << " deleted, no-op" << dendl;
return 0;
}
- assert(f->fnode.ino > 1);
+ ceph_assert(f->fnode.ino > 1);
uint64_t allocated = f->fnode.get_allocated();
if (off + len > allocated) {
uint64_t want = off + len - allocated;
file->fnode.clear_extents();
}
}
- assert(file->fnode.ino > 1);
+ ceph_assert(file->fnode.ino > 1);
file->fnode.mtime = ceph_clock_now();
file->fnode.prefer_bdev = BlueFS::BDEV_DB;
dout(10) << __func__ << " " << h << " type " << h->writer_type << dendl;
for (unsigned i=0; i<MAX_BDEV; ++i) {
if (bdev[i]) {
- assert(h->iocv[i]);
+ ceph_assert(h->iocv[i]);
h->iocv[i]->aio_wait();
bdev[i]->queue_reap_ioc(h->iocv[i]);
}
dout(20) << __func__ << " dir " << new_dirname << " (" << old_dir
<< ") file " << new_filename
<< " already exists, unlinking" << dendl;
- assert(q->second != file);
+ ceph_assert(q->second != file);
log_t.op_dir_unlink(new_dirname, new_filename);
_drop_link(q->second);
}
{
std::lock_guard<std::mutex> l(lock);
dout(10) << __func__ << " " << fl << " on " << fl->file->fnode << dendl;
- assert(fl->file->locked);
+ ceph_assert(fl->file->locked);
fl->file->locked = false;
delete fl;
return 0;
num_reading(0)
{}
~File() override {
- assert(num_readers.load() == 0);
- assert(num_writers.load() == 0);
- assert(num_reading.load() == 0);
- assert(!locked);
+ ceph_assert(num_readers.load() == 0);
+ ceph_assert(num_writers.load() == 0);
+ ceph_assert(num_reading.load() == 0);
+ ceph_assert(!locked);
}
friend void intrusive_ptr_add_ref(File *f) {
return rocksdb::Status::IOError(strerror(r));
default:
// FIXME :(
- assert(0 == "unrecognized error code");
+ ceph_assert(0 == "unrecognized error code");
return rocksdb::Status::NotSupported(rocksdb::Status::kNone);
}
}
// REQUIRES: External synchronization
rocksdb::Status Read(size_t n, rocksdb::Slice* result, char* scratch) override {
int r = fs->read(h, &h->buf, h->buf.pos, n, NULL, scratch);
- assert(r >= 0);
+ ceph_assert(r >= 0);
*result = rocksdb::Slice(scratch, r);
return rocksdb::Status::OK();
}
rocksdb::Status Read(uint64_t offset, size_t n, rocksdb::Slice* result,
char* scratch) const override {
int r = fs->read_random(h, offset, n, scratch);
- assert(r >= 0);
+ ceph_assert(r >= 0);
*result = rocksdb::Slice(scratch, r);
return rocksdb::Status::OK();
}
derr << "key " << pretty_binary_string(*key) << dendl;
derr << "oid " << oid << dendl;
derr << " t " << t << dendl;
- assert(r == 0 && t == oid);
+ ceph_assert(r == 0 && t == oid);
}
}
}
static void rewrite_extent_shard_key(uint32_t offset, string *key)
{
- assert(key->size() > sizeof(uint32_t) + 1);
- assert(*key->rbegin() == EXTENT_SHARD_KEY_SUFFIX);
+ ceph_assert(key->size() > sizeof(uint32_t) + 1);
+ ceph_assert(*key->rbegin() == EXTENT_SHARD_KEY_SUFFIX);
_key_encode_u32(offset, key->size() - sizeof(uint32_t) - 1, key);
}
std::function<void(const string& final_key)> apply)
{
if (key->empty()) { // make full key
- assert(!onode_key.empty());
+ ceph_assert(!onode_key.empty());
get_extent_shard_key(onode_key, offset, key);
} else {
rewrite_extent_shard_key(offset, key);
int get_key_extent_shard(const string& key, string *onode_key, uint32_t *offset)
{
- assert(key.size() > sizeof(uint32_t) + 1);
- assert(*key.rbegin() == EXTENT_SHARD_KEY_SUFFIX);
+ ceph_assert(key.size() > sizeof(uint32_t) + 1);
+ ceph_assert(*key.rbegin() == EXTENT_SHARD_KEY_SUFFIX);
int okey_len = key.size() - sizeof(uint32_t) - 1;
*onode_key = key.substr(0, okey_len);
const char *p = key.data() + okey_len;
const char *ldata, size_t llen,
const char *rdata, size_t rlen,
std::string *new_value) override {
- assert(llen == rlen);
- assert((rlen % 8) == 0);
+ ceph_assert(llen == rlen);
+ ceph_assert((rlen % 8) == 0);
new_value->resize(rlen);
const __le64* lv = (const __le64*)ldata;
const __le64* rv = (const __le64*)rdata;
uint64_t end_touch_offset,
uint64_t min_alloc_size)
{
- assert(start_offset <= start_touch_offset && end_offset>= end_touch_offset);
+ ceph_assert(start_offset <= start_touch_offset && end_offset>= end_touch_offset);
uint64_t lookup_start_offset = p2align(start_offset, min_alloc_size);
uint64_t lookup_end_offset = round_up_to(end_offset, min_alloc_size);
blob_info_counted = &bi;
used_alloc_unit = alloc_unit_end;
- assert(it->length <= bi.referenced_bytes);
+ ceph_assert(it->length <= bi.referenced_bytes);
bi.referenced_bytes -= it->length;
dout(30) << __func__ << " affected_blob:" << *b
<< " unref 0x" << std::hex << it->length
else if (type == "2q")
c = new TwoQCache(cct);
else
- assert(0 == "unrecognized cache type");
+ ceph_assert(0 == "unrecognized cache type");
c->logger = logger;
return c;
}
Buffer *b = &*i;
- assert(b->is_clean());
+ ceph_assert(b->is_clean());
dout(20) << __func__ << " rm " << *b << dendl;
b->space->_rm_buffer(this, b);
}
uint64_t num = onode_lru.size() - onode_max;
auto p = onode_lru.end();
- assert(p != onode_lru.begin());
+ ceph_assert(p != onode_lru.begin());
--p;
int skipped = 0;
int max_skipped = g_conf()->bluestore_cache_trim_max_skip_pinned;
onode_lru.erase(p--);
} else {
onode_lru.erase(p);
- assert(num == 1);
+ ceph_assert(num == 1);
}
o->get(); // paranoia
o->c->onode_map.remove(o->oid);
for (auto i = buffer_lru.begin(); i != buffer_lru.end(); ++i) {
derr << __func__ << " " << *i << dendl;
}
- assert(s == buffer_size);
+ ceph_assert(s == buffer_size);
}
dout(20) << __func__ << " " << when << " buffer_size " << buffer_size
<< " ok" << dendl;
buffer_warm_in.insert(buffer_warm_in.iterator_to(*near), *b);
break;
case BUFFER_WARM_OUT:
- assert(b->is_empty());
+ ceph_assert(b->is_empty());
buffer_warm_out.insert(buffer_warm_out.iterator_to(*near), *b);
break;
case BUFFER_HOT:
buffer_hot.insert(buffer_hot.iterator_to(*near), *b);
break;
default:
- assert(0 == "bad cache_private");
+ ceph_assert(0 == "bad cache_private");
}
} else if (b->cache_private == BUFFER_NEW) {
b->cache_private = BUFFER_WARM_IN;
buffer_hot.push_front(*b);
break;
default:
- assert(0 == "bad cache_private");
+ ceph_assert(0 == "bad cache_private");
}
}
if (!b->is_empty()) {
{
dout(20) << __func__ << " " << *b << dendl;
if (!b->is_empty()) {
- assert(buffer_bytes >= b->length);
+ ceph_assert(buffer_bytes >= b->length);
buffer_bytes -= b->length;
- assert(buffer_list_bytes[b->cache_private] >= b->length);
+ ceph_assert(buffer_list_bytes[b->cache_private] >= b->length);
buffer_list_bytes[b->cache_private] -= b->length;
}
switch (b->cache_private) {
buffer_hot.erase(buffer_hot.iterator_to(*b));
break;
default:
- assert(0 == "bad cache_private");
+ ceph_assert(0 == "bad cache_private");
}
}
// preserve which list we're on (even if we can't preserve the order!)
switch (b->cache_private) {
case BUFFER_WARM_IN:
- assert(!b->is_empty());
+ ceph_assert(!b->is_empty());
buffer_warm_in.push_back(*b);
break;
case BUFFER_WARM_OUT:
- assert(b->is_empty());
+ ceph_assert(b->is_empty());
buffer_warm_out.push_back(*b);
break;
case BUFFER_HOT:
- assert(!b->is_empty());
+ ceph_assert(!b->is_empty());
buffer_hot.push_back(*b);
break;
default:
- assert(0 == "bad cache_private");
+ ceph_assert(0 == "bad cache_private");
}
if (!b->is_empty()) {
buffer_bytes += b->length;
{
dout(20) << __func__ << " delta " << delta << " on " << *b << dendl;
if (!b->is_empty()) {
- assert((int64_t)buffer_bytes + delta >= 0);
+ ceph_assert((int64_t)buffer_bytes + delta >= 0);
buffer_bytes += delta;
- assert((int64_t)buffer_list_bytes[b->cache_private] + delta >= 0);
+ ceph_assert((int64_t)buffer_list_bytes[b->cache_private] + delta >= 0);
buffer_list_bytes[b->cache_private] += delta;
}
}
uint64_t buffer_num = buffer_hot.size() + buffer_warm_in.size();
if (buffer_num) {
uint64_t buffer_avg_size = buffer_bytes / buffer_num;
- assert(buffer_avg_size);
+ ceph_assert(buffer_avg_size);
uint64_t calculated_buffer_num = buffer_max / buffer_avg_size;
kout = calculated_buffer_num * cct->_conf->bluestore_2q_cache_kout_ratio;
}
}
Buffer *b = &*p;
- assert(b->is_clean());
+ ceph_assert(b->is_clean());
dout(20) << __func__ << " buffer_warm_in -> out " << *b << dendl;
- assert(buffer_bytes >= b->length);
+ ceph_assert(buffer_bytes >= b->length);
buffer_bytes -= b->length;
- assert(buffer_list_bytes[BUFFER_WARM_IN] >= b->length);
+ ceph_assert(buffer_list_bytes[BUFFER_WARM_IN] >= b->length);
buffer_list_bytes[BUFFER_WARM_IN] -= b->length;
to_evict_bytes -= b->length;
evicted += b->length;
Buffer *b = &*p;
dout(20) << __func__ << " buffer_hot rm " << *b << dendl;
- assert(b->is_clean());
+ ceph_assert(b->is_clean());
// adjust evict size before buffer goes invalid
to_evict_bytes -= b->length;
evicted += b->length;
int64_t num = buffer_warm_out.size() - kout;
while (num-- > 0) {
Buffer *b = &*buffer_warm_out.rbegin();
- assert(b->is_empty());
+ ceph_assert(b->is_empty());
dout(20) << __func__ << " buffer_warm_out rm " << *b << dendl;
b->space->_rm_buffer(this, b);
}
uint64_t num = onode_lru.size() - onode_max;
auto p = onode_lru.end();
- assert(p != onode_lru.begin());
+ ceph_assert(p != onode_lru.begin());
--p;
int skipped = 0;
int max_skipped = g_conf()->bluestore_cache_trim_max_skip_pinned;
onode_lru.erase(p--);
} else {
onode_lru.erase(p);
- assert(num == 1);
+ ceph_assert(num == 1);
}
o->get(); // paranoia
o->c->onode_map.remove(o->oid);
<< buffer_list_bytes[BUFFER_HOT]
<< " != actual " << hot_bytes
<< dendl;
- assert(hot_bytes == buffer_list_bytes[BUFFER_HOT]);
+ ceph_assert(hot_bytes == buffer_list_bytes[BUFFER_HOT]);
}
for (auto i = buffer_warm_in.begin(); i != buffer_warm_in.end(); ++i) {
<< buffer_list_bytes[BUFFER_WARM_IN]
<< " != actual " << warm_in_bytes
<< dendl;
- assert(warm_in_bytes == buffer_list_bytes[BUFFER_WARM_IN]);
+ ceph_assert(warm_in_bytes == buffer_list_bytes[BUFFER_WARM_IN]);
}
if (s != buffer_bytes) {
derr << __func__ << " buffer_bytes " << buffer_bytes << " actual " << s
<< dendl;
- assert(s == buffer_bytes);
+ ceph_assert(s == buffer_bytes);
}
dout(20) << __func__ << " " << when << " buffer_bytes " << buffer_bytes
i != buffer_map.end() && offset < end && i->first < end;
++i) {
Buffer *b = i->second.get();
- assert(b->end() > offset);
+ ceph_assert(b->end() > offset);
if (b->is_writing() || b->is_clean()) {
if (b->offset < offset) {
uint32_t skip = offset - b->offset;
}
uint64_t hit_bytes = res_intervals.size();
- assert(hit_bytes <= want_bytes);
+ ceph_assert(hit_bytes <= want_bytes);
uint64_t miss_bytes = want_bytes - hit_bytes;
cache->logger->inc(l_bluestore_buffer_hit_bytes, hit_bytes);
cache->logger->inc(l_bluestore_buffer_miss_bytes, miss_bytes);
}
Buffer *b = &*i;
- assert(b->is_writing());
+ ceph_assert(b->is_writing());
if (b->flags & Buffer::FLAG_NOCACHE) {
writing.erase(i++);
break;
}
- assert(p->second->end() > pos);
+ ceph_assert(p->second->end() > pos);
ldout(cache->cct, 30) << __func__ << " move " << *p->second << dendl;
if (p->second->data.length()) {
r._add_buffer(cache, new Buffer(&r, p->second->state, p->second->seq,
_rm_buffer(cache, p--);
}
}
- assert(writing.empty());
+ ceph_assert(writing.empty());
}
// OnodeSpace
ceph::unordered_map<ghobject_t,OnodeRef>::iterator po, pn;
po = onode_map.find(old_oid);
pn = onode_map.find(new_oid);
- assert(po != pn);
+ ceph_assert(po != pn);
- assert(po != onode_map.end());
+ ceph_assert(po != onode_map.end());
if (pn != onode_map.end()) {
ldout(cache->cct, 30) << __func__ << " removing target " << pn->second
<< dendl;
BlueStore::SharedBlob::SharedBlob(uint64_t i, Collection *_coll)
: coll(_coll), sbid_unloaded(i)
{
- assert(sbid_unloaded > 0);
+ ceph_assert(sbid_unloaded > 0);
if (get_cache()) {
get_cache()->add_blob();
}
void BlueStore::SharedBlob::get_ref(uint64_t offset, uint32_t length)
{
- assert(persistent);
+ ceph_assert(persistent);
persistent->ref_map.get(offset, length);
}
PExtentVector *r,
bool *unshare)
{
- assert(persistent);
+ ceph_assert(persistent);
persistent->ref_map.put(offset, length, r,
unshare && !*unshare ? unshare : nullptr);
}
all_invalid = false;
}
}
- assert(discard == all_invalid); // in case of compressed blob all
+ ceph_assert(discard == all_invalid); // in case of compressed blob all
// or none pextents are invalid.
if (discard) {
shared_blob->bc.discard(shared_blob->get_cache(), 0,
// references. Otherwise one is neither unable to determine required
// amount of counters in case of per-au tracking nor obtain min_release_size
// for single counter mode.
- assert(get_blob().get_logical_length() != 0);
+ ceph_assert(get_blob().get_logical_length() != 0);
auto cct = coll->store->cct;
dout(20) << __func__ << " 0x" << std::hex << offset << "~" << length
<< std::dec << " " << *this << dendl;
uint32_t target_blob_size,
uint32_t b_offset,
uint32_t *length0) {
- assert(min_alloc_size);
- assert(target_blob_size);
+ ceph_assert(min_alloc_size);
+ ceph_assert(target_blob_size);
if (!get_blob().is_mutable()) {
return false;
}
auto cct = coll->store->cct; //used by dout
dout(10) << __func__ << " 0x" << std::hex << blob_offset << std::dec
<< " start " << *this << dendl;
- assert(blob.can_split());
- assert(used_in_blob.can_split());
+ ceph_assert(blob.can_split());
+ ceph_assert(used_in_blob.can_split());
bluestore_blob_t &lb = dirty_blob();
bluestore_blob_t &rb = r->dirty_blob();
dirty_range_begin == 0 && dirty_range_end == 0) {
dirty_range_begin = e.logical_offset;
}
- assert(e.logical_end() > 0);
+ ceph_assert(e.logical_end() > 0);
// -1 to exclude next potential shard
dirty_range_end = e.logical_end() - 1;
} else {
// we need to encode inline_bl to measure encoded length
bool never_happen = encode_some(0, OBJECT_MAX_SIZE, inline_bl, &n);
inline_bl.reassign_to_mempool(mempool::mempool_bluestore_cache_other);
- assert(!never_happen);
+ ceph_assert(!never_happen);
size_t len = inline_bl.length();
dout(20) << __func__ << " inline shard " << len << " bytes from " << n
<< " extents" << dendl;
auto p = shards.begin();
auto prev_p = p;
while (p != shards.end()) {
- assert(p->shard_info->offset >= prev_p->shard_info->offset);
+ ceph_assert(p->shard_info->offset >= prev_p->shard_info->offset);
auto n = p;
++n;
if (p->dirty) {
bl, &p->extents)) {
if (force) {
derr << __func__ << " encode_some needs reshard" << dendl;
- assert(!force);
+ ceph_assert(!force);
}
}
size_t len = bl.length();
// avoid resharding the trailing shard, even if it is small
else if (n != shards.end() &&
len < g_conf()->bluestore_extent_map_shard_min_size) {
- assert(endoff != OBJECT_MAX_SIZE);
+ ceph_assert(endoff != OBJECT_MAX_SIZE);
if (p == shards.begin()) {
// we are the first shard, combine with next shard
request_reshard(p->shard_info->offset, endoff + 1);
if (bid < 0) bid = 0;
}
} while (bid != begin_bid);
- assert(0 == "no available blob id");
+ ceph_assert(0 == "no available blob id");
}
void BlueStore::ExtentMap::reshard(
shards.insert(shards.begin() + si_begin, new_shard_info.size(), Shard());
si_end = si_begin + new_shard_info.size();
- assert(sv.size() == shards.size());
+ ceph_assert(sv.size() == shards.size());
// note that we need to update every shard_info of shards here,
// as sv might have been totally re-allocated above
dout(30) << " extent " << *e << dendl;
while (e->logical_offset >= shard_end) {
shard_start = shard_end;
- assert(sp != esp);
+ ceph_assert(sp != esp);
++sp;
if (sp == esp) {
shard_end = OBJECT_MAX_SIZE;
for (auto p = start;
p != extent_map.end() && p->logical_offset < end;
++p, ++n) {
- assert(p->logical_offset >= offset);
+ ceph_assert(p->logical_offset >= offset);
p->blob->last_encoded_id = -1;
if (!p->blob->is_spanning() && p->blob_escapes_range(offset, length)) {
dout(30) << __func__ << " 0x" << std::hex << offset << "~" << length
*_dout << dendl;
*/
- assert(bl.get_num_buffers() <= 1);
+ ceph_assert(bl.get_num_buffers() <= 1);
auto p = bl.front().begin_deep();
__u8 struct_v;
denc(struct_v, p);
// Version 2 differs from v1 in blob's ref_map
// serialization only. Hence there is no specific
// handling at ExtentMap level below.
- assert(struct_v == 1 || struct_v == 2);
+ ceph_assert(struct_v == 1 || struct_v == 2);
uint32_t num;
denc_varint(num, p);
blobid >>= BLOBID_SHIFT_BITS;
if (blobid) {
le->assign_blob(blobs[blobid - 1]);
- assert(le->blob);
+ ceph_assert(le->blob);
} else {
Blob *b = new Blob();
uint64_t sbid = 0;
extent_map.insert(*le);
}
- assert(n == num);
+ ceph_assert(n == num);
return num;
}
// Version 2 differs from v1 in blob's ref_map
// serialization only. Hence there is no specific
// handling at ExtentMap level.
- assert(struct_v == 1 || struct_v == 2);
+ ceph_assert(struct_v == 1 || struct_v == 2);
unsigned n;
denc_varint(n, p);
if (start < 0)
return;
- assert(last >= start);
+ ceph_assert(last >= start);
string key;
while (start <= last) {
- assert((size_t)start < shards.size());
+ ceph_assert((size_t)start < shards.size());
auto p = &shards[start];
if (!p->loaded) {
dout(30) << __func__ << " opening shard 0x" << std::hex
derr << __func__ << " missing shard 0x" << std::hex
<< p->shard_info->offset << std::dec << " for " << onode->oid
<< dendl;
- assert(r >= 0);
+ ceph_assert(r >= 0);
}
}
);
dout(20) << __func__ << " open shard 0x" << std::hex
<< p->shard_info->offset << std::dec
<< " (" << v.length() << " bytes)" << dendl;
- assert(p->dirty == false);
- assert(v.length() == p->shard_info->bytes);
+ ceph_assert(p->dirty == false);
+ ceph_assert(v.length() == p->shard_info->bytes);
onode->c->store->logger->inc(l_bluestore_onode_shard_misses);
} else {
onode->c->store->logger->inc(l_bluestore_onode_shard_hits);
if (start < 0)
return;
- assert(last >= start);
+ ceph_assert(last >= start);
while (start <= last) {
- assert((size_t)start < shards.size());
+ ceph_assert((size_t)start < shards.size());
auto p = &shards[start];
if (!p->loaded) {
derr << __func__ << "on write 0x" << std::hex << offset
<< "~" << length << " shard 0x" << p->shard_info->offset
<< std::dec << " is not loaded, can't mark dirty" << dendl;
- assert(0 == "can't mark unloaded shard dirty");
+ ceph_assert(0 == "can't mark unloaded shard dirty");
}
if (!p->dirty) {
dout(20) << __func__ << " mark shard 0x" << std::hex
--p; // start to the left of offset
}
// the caller should have just written to this region
- assert(p != extent_map.end());
+ ceph_assert(p != extent_map.end());
// identify the *next* shard
auto pshard = shards.begin();
break;
}
if (n->logical_offset >= shard_end) {
- assert(pshard != shards.end());
+ ceph_assert(pshard != shards.end());
++pshard;
if (pshard != shards.end()) {
shard_end = pshard->shard_info->offset;
break;
} else {
// deref tail
- assert(p->logical_end() > offset); // else seek_lextent bug
+ ceph_assert(p->logical_end() > offset); // else seek_lextent bug
uint64_t keep = offset - p->logical_offset;
OldExtent* oe = OldExtent::create(c, offset, p->blob_offset + keep,
p->length - keep, p->blob);
old_extent_map_t *old_extents)
{
// We need to have completely initialized Blob to increment its ref counters.
- assert(b->get_blob().get_logical_length() != 0);
+ ceph_assert(b->get_blob().get_logical_length() != 0);
// Do get_ref prior to punch_hole to prevent from putting reused blob into
// old_extents list if we overwre the blob totally
dout(30) << __func__ << " to " << *ne << dendl;
} else {
// switch blob
- assert(ep->blob_offset >= blob_offset);
+ ceph_assert(ep->blob_offset >= blob_offset);
ep->blob = rb;
ep->blob_offset -= blob_offset;
uint64_t loffs_end,
uint64_t min_alloc_size)
{
- assert((loffs % min_alloc_size) == 0);
- assert((loffs_end % min_alloc_size) == 0);
+ ceph_assert((loffs % min_alloc_size) == 0);
+ ceph_assert((loffs_end % min_alloc_size) == 0);
for (auto w : writes) {
if (b == w.b) {
auto loffs2 = p2align(w.logical_offset, min_alloc_size);
{
_discard(cct, offset, length);
auto i = iomap.insert(make_pair(offset, deferred_io()));
- assert(i.second); // this should be a new insertion
+ ceph_assert(i.second); // this should be a new insertion
i.first->second.seq = seq;
blp.copy(length, i.first->second.bl);
i.first->second.bl.reassign_to_mempool(
<< " 0x" << std::hex << p->first << "~" << p->second.bl.length()
<< " -> 0x" << head.length() << std::dec << dendl;
auto i = seq_bytes.find(p->second.seq);
- assert(i != seq_bytes.end());
+ ceph_assert(i != seq_bytes.end());
if (end > offset + length) {
bufferlist tail;
tail.substr_of(p->second.bl, offset + length - p->first,
} else {
i->second -= end - offset;
}
- assert(i->second >= 0);
+ ceph_assert(i->second >= 0);
p->second.bl.swap(head);
}
++p;
break;
}
auto i = seq_bytes.find(p->second.seq);
- assert(i != seq_bytes.end());
+ ceph_assert(i != seq_bytes.end());
auto end = p->first + p->second.bl.length();
if (end > offset + length) {
unsigned drop_front = offset + length - p->first;
<< std::dec << dendl;
i->second -= p->second.bl.length();
}
- assert(i->second >= 0);
+ ceph_assert(i->second >= 0);
p = iomap.erase(p);
}
}
}
uint64_t pos = 0;
for (auto& p : iomap) {
- assert(p.first >= pos);
+ ceph_assert(p.first >= pos);
sb[p.second.seq] += p.second.bl.length();
pos = p.first + p.second.bl.length();
}
- assert(sb == seq_bytes);
+ ceph_assert(sb == seq_bytes);
}
store->zombie_osr_set.erase(p);
ldout(store->cct, 10) << "resurrecting zombie osr " << osr << dendl;
osr->zombie = false;
- assert(osr->shard == cid.hash_to_shard(store->m_finisher_num));
+ ceph_assert(osr->shard == cid.hash_to_shard(store->m_finisher_num));
}
}
}
void BlueStore::Collection::open_shared_blob(uint64_t sbid, BlobRef b)
{
- assert(!b->shared_blob);
+ ceph_assert(!b->shared_blob);
const bluestore_blob_t& blob = b->get_blob();
if (!blob.is_shared()) {
b->shared_blob = new SharedBlob(this);
lderr(store->cct) << __func__ << " sbid 0x" << std::hex << sbid
<< std::dec << " not found at key "
<< pretty_binary_string(key) << dendl;
- assert(0 == "uh oh, missing shared_blob");
+ ceph_assert(0 == "uh oh, missing shared_blob");
}
sb->loaded = true;
void BlueStore::Collection::make_blob_shared(uint64_t sbid, BlobRef b)
{
ldout(store->cct, 10) << __func__ << " " << *b << dendl;
- assert(!b->shared_blob->is_loaded());
+ ceph_assert(!b->shared_blob->is_loaded());
// update blob
bluestore_blob_t& blob = b->dirty_blob();
uint64_t BlueStore::Collection::make_blob_unshared(SharedBlob *sb)
{
ldout(store->cct, 10) << __func__ << " " << *sb << dendl;
- assert(sb->is_loaded());
+ ceph_assert(sb->is_loaded());
uint64_t sbid = sb->get_sbid();
shared_blob_set.remove(sb);
const ghobject_t& oid,
bool create)
{
- assert(create ? lock.is_wlocked() : lock.is_locked());
+ ceph_assert(create ? lock.is_wlocked() : lock.is_locked());
spg_t pgid;
if (cid.is_pg(&pgid)) {
ldout(store->cct, 20) << " r " << r << " v.len " << v.length() << dendl;
Onode *on;
if (v.length() == 0) {
- assert(r == -ENOENT);
+ ceph_assert(r == -ENOENT);
if (!store->cct->_conf->bluestore_debug_misc &&
!create)
return OnodeRef();
on = new Onode(this, oid, key);
} else {
// loaded
- assert(r >= 0);
+ ceph_assert(r >= 0);
on = new Onode(this, oid, key);
on->exists = true;
auto p = v.front().begin_deep();
int destbits = dest->cnode.bits;
spg_t destpg;
bool is_pg = dest->cid.is_pg(&destpg);
- assert(is_pg);
+ ceph_assert(is_pg);
auto p = onode_map.onode_map.begin();
while (p != onode_map.onode_map.end()) {
}
}
// assert if we assigned more memory than is available.
- assert(mem_avail >= 0);
+ ceph_assert(mem_avail >= 0);
// Finally commit the new cache sizes
for (auto it = caches.begin(); it != caches.end(); it++) {
string BlueStore::OmapIteratorImpl::key()
{
RWLock::RLocker l(c->lock);
- assert(it->valid());
+ ceph_assert(it->valid());
string db_key = it->raw_key().second;
string user_key;
decode_omap_key(db_key, &user_key);
bufferlist BlueStore::OmapIteratorImpl::value()
{
RWLock::RLocker l(c->lock);
- assert(it->valid());
+ ceph_assert(it->valid());
return it->value();
}
void BlueStore::handle_discard(interval_set<uint64_t>& to_release)
{
dout(10) << __func__ << dendl;
- assert(alloc);
+ ceph_assert(alloc);
alloc->release(to_release);
}
cct->_conf.remove_observer(this);
_shutdown_logger();
- assert(!mounted);
- assert(db == NULL);
- assert(bluefs == NULL);
- assert(fsid_fd < 0);
- assert(path_fd < 0);
+ ceph_assert(!mounted);
+ ceph_assert(db == NULL);
+ ceph_assert(bluefs == NULL);
+ ceph_assert(fsid_fd < 0);
+ ceph_assert(path_fd < 0);
for (auto i : cache_shards) {
delete i;
}
if (cct->_conf->bluestore_compression_min_blob_size) {
comp_min_blob_size = cct->_conf->bluestore_compression_min_blob_size;
} else {
- assert(bdev);
+ ceph_assert(bdev);
if (bdev->is_rotational()) {
comp_min_blob_size = cct->_conf->bluestore_compression_min_blob_size_hdd;
} else {
if (cct->_conf->bluestore_compression_max_blob_size) {
comp_max_blob_size = cct->_conf->bluestore_compression_max_blob_size;
} else {
- assert(bdev);
+ ceph_assert(bdev);
if (bdev->is_rotational()) {
comp_max_blob_size = cct->_conf->bluestore_compression_max_blob_size_hdd;
} else {
if (cct->_conf->bluestore_throttle_cost_per_io) {
throttle_cost_per_io = cct->_conf->bluestore_throttle_cost_per_io;
} else {
- assert(bdev);
+ ceph_assert(bdev);
if (bdev->is_rotational()) {
throttle_cost_per_io = cct->_conf->bluestore_throttle_cost_per_io_hdd;
} else {
if (cct->_conf->bluestore_max_blob_size) {
max_blob_size = cct->_conf->bluestore_max_blob_size;
} else {
- assert(bdev);
+ ceph_assert(bdev);
if (bdev->is_rotational()) {
max_blob_size = cct->_conf->bluestore_max_blob_size_hdd;
} else {
if (cct->_conf->osd_op_num_shards) {
m_finisher_num = cct->_conf->osd_op_num_shards;
} else {
- assert(bdev);
+ ceph_assert(bdev);
if (bdev->is_rotational()) {
m_finisher_num = cct->_conf->osd_op_num_shards_hdd;
} else {
}
}
}
- assert(m_finisher_num != 0);
+ ceph_assert(m_finisher_num != 0);
}
int BlueStore::_set_cache_sizes()
{
- assert(bdev);
+ ceph_assert(bdev);
cache_autotune = cct->_conf.get_val<bool>("bluestore_cache_autotune");
cache_autotune_chunk_size =
cct->_conf.get_val<Option::size_t>("bluestore_cache_autotune_chunk_size");
}
label.meta[key] = value;
r = _write_bdev_label(cct, p, label);
- assert(r == 0);
+ ceph_assert(r == 0);
return ObjectStore::write_meta(key, value);
}
<< "; BlueStore has hard limit of 0x" << OBJECT_MAX_SIZE << "." << std::dec << dendl;
return -EINVAL;
}
- assert(path_fd < 0);
+ ceph_assert(path_fd < 0);
path_fd = TEMP_FAILURE_RETRY(::open(path.c_str(), O_DIRECTORY));
if (path_fd < 0) {
int r = -errno;
encode(label, bl);
uint32_t crc = bl.crc32c(-1);
encode(crc, bl);
- assert(bl.length() <= BDEV_LABEL_BLOCK_SIZE);
+ ceph_assert(bl.length() <= BDEV_LABEL_BLOCK_SIZE);
bufferptr z(BDEV_LABEL_BLOCK_SIZE - bl.length());
z.zero();
bl.append(std::move(z));
if (cct->_conf->bluestore_prefer_deferred_size) {
prefer_deferred_size = cct->_conf->bluestore_prefer_deferred_size;
} else {
- assert(bdev);
+ ceph_assert(bdev);
if (bdev->is_rotational()) {
prefer_deferred_size = cct->_conf->bluestore_prefer_deferred_size_hdd;
} else {
if (cct->_conf->bluestore_deferred_batch_ops) {
deferred_batch_ops = cct->_conf->bluestore_deferred_batch_ops;
} else {
- assert(bdev);
+ ceph_assert(bdev);
if (bdev->is_rotational()) {
deferred_batch_ops = cct->_conf->bluestore_deferred_batch_ops_hdd;
} else {
int BlueStore::_open_bdev(bool create)
{
- assert(bdev == NULL);
+ ceph_assert(bdev == NULL);
string p = path + "/block";
bdev = BlockDevice::create(cct, p, aio_cb, static_cast<void*>(this), discard_cb, static_cast<void*>(this));
int r = bdev->open(p);
block_size = bdev->get_block_size();
block_mask = ~(block_size - 1);
block_size_order = ctz(block_size);
- assert(block_size == 1u << block_size_order);
+ ceph_assert(block_size == 1u << block_size_order);
// and set cache_size based on device type
r = _set_cache_sizes();
if (r < 0) {
void BlueStore::_validate_bdev()
{
- assert(bdev);
- assert(min_alloc_size); // _get_odisk_reserved depends on that
+ ceph_assert(bdev);
+ ceph_assert(min_alloc_size); // _get_odisk_reserved depends on that
uint64_t dev_size = bdev->get_size();
if (dev_size <
_get_ondisk_reserved() + cct->_conf->bluestore_bluefs_min) {
dout(1) << __func__ << " main device size " << byte_u_t(dev_size)
<< " is too small, disable bluestore_bluefs_min for now"
<< dendl;
- assert(dev_size >= _get_ondisk_reserved());
+ ceph_assert(dev_size >= _get_ondisk_reserved());
int r = cct->_conf.set_val("bluestore_bluefs_min", "0");
- assert(r == 0);
+ ceph_assert(r == 0);
}
}
void BlueStore::_close_bdev()
{
- assert(bdev);
+ ceph_assert(bdev);
bdev->close();
delete bdev;
bdev = NULL;
int BlueStore::_open_fm(bool create)
{
- assert(fm == NULL);
+ ceph_assert(fm == NULL);
fm = FreelistManager::create(cct, freelist_type, db, PREFIX_ALLOC);
if (create) {
}
// being able to allocate in units less than bdev block size
// seems to be a bad idea.
- assert( cct->_conf->bdev_block_size <= (int64_t)min_alloc_size);
+ ceph_assert( cct->_conf->bdev_block_size <= (int64_t)min_alloc_size);
fm->create(bdev->get_size(), (int64_t)min_alloc_size, t);
// allocate superblock reserved space. note that we do not mark
fm->allocate(0, reserved, t);
if (cct->_conf->bluestore_bluefs) {
- assert(bluefs_extents.num_intervals() == 1);
+ ceph_assert(bluefs_extents.num_intervals() == 1);
interval_set<uint64_t>::iterator p = bluefs_extents.begin();
reserved = round_up_to(p.get_start() + p.get_len(), min_alloc_size);
dout(20) << __func__ << " reserved 0x" << std::hex << reserved << std::dec
l = end - start;
l = p2align(l, min_alloc_size);
}
- assert(start + l <= end);
+ ceph_assert(start + l <= end);
uint64_t u = 1 + (uint64_t)(r * (double)l);
u = p2roundup(u, min_alloc_size);
u = p2align(u, min_alloc_size);
stop = true;
}
- assert(start + l + u <= end);
+ ceph_assert(start + l + u <= end);
dout(20) << __func__ << " free 0x" << std::hex << start << "~" << l
<< " use 0x" << u << std::dec << dendl;
void BlueStore::_close_fm()
{
dout(10) << __func__ << dendl;
- assert(fm);
+ ceph_assert(fm);
fm->shutdown();
delete fm;
fm = NULL;
int BlueStore::_open_alloc()
{
- assert(alloc == NULL);
- assert(bdev->get_size());
+ ceph_assert(alloc == NULL);
+ ceph_assert(bdev->get_size());
alloc = Allocator::create(cct, cct->_conf->bluestore_allocator,
bdev->get_size(),
min_alloc_size);
void BlueStore::_close_alloc()
{
- assert(bdev);
+ ceph_assert(bdev);
bdev->discard_drain();
- assert(alloc);
+ ceph_assert(alloc);
alloc->shutdown();
delete alloc;
alloc = NULL;
int BlueStore::_open_fsid(bool create)
{
- assert(fsid_fd < 0);
+ ceph_assert(fsid_fd < 0);
int flags = O_RDWR;
if (create)
flags |= O_CREAT;
int BlueStore::_open_db(bool create, bool to_repair_db)
{
int r;
- assert(!db);
+ ceph_assert(!db);
string fn = path + "/db";
string options;
stringstream err;
uint64_t start = p2align((bdev->get_size() - initial) / 2,
cct->_conf->bluefs_alloc_size);
//avoiding superblock overwrite
- assert(cct->_conf->bluefs_alloc_size > _get_ondisk_reserved());
+ ceph_assert(cct->_conf->bluefs_alloc_size > _get_ondisk_reserved());
start = std::max(cct->_conf->bluefs_alloc_size, start);
bluefs->add_block_extent(bluefs_shared_bdev, start, initial);
return 0;
free_bluefs:
- assert(bluefs);
+ ceph_assert(bluefs);
delete bluefs;
bluefs = NULL;
return r;
void BlueStore::_close_db()
{
- assert(db);
+ ceph_assert(db);
delete db;
db = NULL;
if (bluefs) {
dout(10) << __func__ << dendl;
interval_set<uint64_t> bset;
int r = bluefs->get_block_extents(bluefs_shared_bdev, &bset);
- assert(r == 0);
+ ceph_assert(r == 0);
if (bset == bluefs_extents) {
dout(10) << __func__ << " we agree bluefs has 0x" << std::hex << bset
<< std::dec << dendl;
int BlueStore::_balance_bluefs_freespace(PExtentVector *extents)
{
int ret = 0;
- assert(bluefs);
+ ceph_assert(bluefs);
vector<pair<uint64_t,uint64_t>> bluefs_usage; // <free, total> ...
bluefs->get_usage(&bluefs_usage);
- assert(bluefs_usage.size() > bluefs_shared_bdev);
+ ceph_assert(bluefs_usage.size() > bluefs_shared_bdev);
// fixme: look at primary bdev only for now
uint64_t bluefs_free = bluefs_usage[bluefs_shared_bdev].first;
int BlueStore::_open_collections(int *errors)
{
dout(10) << __func__ << dendl;
- assert(coll_map.empty());
+ ceph_assert(coll_map.empty());
KeyValueDB::Iterator it = db->get_iterator(PREFIX_COLL);
for (it->upper_bound(string());
it->valid();
}
string serial_number = epath.substr(strlen(SPDK_PREFIX));
r = ::write(fd, serial_number.c_str(), serial_number.size());
- assert(r == (int)serial_number.size());
+ ceph_assert(r == (int)serial_number.size());
dout(1) << __func__ << " created " << name << " symlink to "
<< epath << dendl;
VOID_TEMP_FAILURE_RETRY(::close(fd));
if (cct->_conf->bluestore_min_alloc_size) {
min_alloc_size = cct->_conf->bluestore_min_alloc_size;
} else {
- assert(bdev);
+ ceph_assert(bdev);
if (bdev->is_rotational()) {
min_alloc_size = cct->_conf->bluestore_min_alloc_size_hdd;
} else {
{
dout(10) << __func__ << " " << num << dendl;
size_t old = cache_shards.size();
- assert(num >= old);
+ ceph_assert(num >= old);
cache_shards.resize(num);
for (unsigned i = old; i < num; ++i) {
cache_shards[i] = Cache::create(cct, cct->_conf->bluestore_cache_type,
int BlueStore::umount()
{
- assert(_kv_only || mounted);
+ ceph_assert(_kv_only || mounted);
dout(1) << __func__ << dendl;
_osr_drain_all();
apply(
e.offset, e.length, granularity, used_blocks,
[&](uint64_t pos, mempool_dynamic_bitset &bs) {
- assert(pos < bs.size());
+ ceph_assert(pos < bs.size());
if (bs.test(pos)) {
if (repairer) {
repairer->note_misreference(
apply(
0, std::max<uint64_t>(min_alloc_size, SUPER_RESERVED), fm->get_alloc_size(), used_blocks,
[&](uint64_t pos, mempool_dynamic_bitset &bs) {
- assert(pos < bs.size());
+ ceph_assert(pos < bs.size());
bs.set(pos);
}
);
if (bluefs) {
interval_set<uint64_t> bset;
r = bluefs->get_block_extents(bluefs_shared_bdev, &bset);
- assert(r == 0);
+ ceph_assert(r == 0);
if (!(bset == bluefs_extents)) {
dout(10) << __func__ << " bluefs says 0x" << std::hex << bset << std::dec
<< dendl;
apply(
e.get_start(), e.get_len(), fm->get_alloc_size(), used_blocks,
[&](uint64_t pos, mempool_dynamic_bitset &bs) {
- assert(pos < bs.size());
+ ceph_assert(pos < bs.size());
bs.set(pos);
}
);
}
pos = l.logical_offset + l.length;
expected_statfs.data_stored += l.length;
- assert(l.blob);
+ ceph_assert(l.blob);
const bluestore_blob_t& blob = l.blob->get_blob();
auto& ref = ref_map[l.blob];
pu = &p->second;
}
uint64_t blob_len = blob.get_logical_length();
- assert((blob_len % (sizeof(*pu)*8)) == 0);
- assert(l.blob_offset + l.length <= blob_len);
+ ceph_assert((blob_len % (sizeof(*pu)*8)) == 0);
+ ceph_assert(l.blob_offset + l.length <= blob_len);
uint64_t chunk_size = blob_len / (sizeof(*pu)*8);
uint64_t start = l.blob_offset / chunk_size;
uint64_t end =
++errors;
}
sb_info_t& sbi = sb_info[i.first->shared_blob->get_sbid()];
- assert(sbi.cid == coll_t() || sbi.cid == c->cid);
+ ceph_assert(sbi.cid == coll_t() || sbi.cid == c->cid);
sbi.cid = c->cid;
sbi.sb = i.first->shared_blob;
sbi.oids.push_back(oid);
if (r < 0) {
derr << __func__ << " failed to read from 0x" << std::hex << e->offset
<<"~" << e->length << std::dec << dendl;
- assert(0 == "read failed, wtf");
+ ceph_assert(0 == "read failed, wtf");
}
pext_to_release.push_back(*e);
e = pextents.erase(e);
b_off, bl,
[&](uint64_t offset, bufferlist& t) {
int r = bdev->write(offset, t, false);
- assert(r == 0);
+ ceph_assert(r == 0);
});
e += exts.size() - 1;
for (auto& p : exts) {
b->dirty_blob().clear_flag(bluestore_blob_t::FLAG_SHARED);
auto sb_it = sb_info.find(b->shared_blob->get_sbid());
- assert(sb_it != sb_info.end());
+ ceph_assert(sb_it != sb_info.end());
sb_info_t& sbi = sb_it->second;
for (auto& r : sbi.ref_map.ref_map) {
if (repair && (!sbi.passed || sbi.updated)) {
auto sbid = p.first;
if (sbi.ref_map.empty()) {
- assert(sbi.passed);
+ ceph_assert(sbi.passed);
dout(20) << __func__ << " " << *sbi.sb
<< " is empty, removing" << dendl;
repairer.fix_shared_blob(db, sbid, nullptr);
apply(
e.get_start(), e.get_len(), fm->get_alloc_size(), used_blocks,
[&](uint64_t pos, mempool_dynamic_bitset &bs) {
- assert(pos < bs.size());
+ ceph_assert(pos < bs.size());
bs.set(pos);
}
);
apply(
e.get_start(), e.get_len(), fm->get_alloc_size(), used_blocks,
[&](uint64_t pos, mempool_dynamic_bitset &bs) {
- assert(pos < bs.size());
+ ceph_assert(pos < bs.size());
bs.reset(pos);
}
);
apply(
offset, length, fm->get_alloc_size(), used_blocks,
[&](uint64_t pos, mempool_dynamic_bitset &bs) {
- assert(pos < bs.size());
+ ceph_assert(pos < bs.size());
if (bs.test(pos)) {
if (offset == SUPER_RESERVED &&
length == min_alloc_size - SUPER_RESERVED) {
fm->enumerate_reset();
size_t count = used_blocks.count();
if (used_blocks.size() != count) {
- assert(used_blocks.size() > count);
+ ceph_assert(used_blocks.size() > count);
used_blocks.flip();
size_t start = used_blocks.find_first();
while (start != decltype(used_blocks)::npos) {
PExtentVector exts;
int64_t alloc_len = alloc->allocate(len, min_alloc_size,
min_alloc_size * 256, 0, &exts);
- assert(alloc_len >= (int64_t)len);
+ ceph_assert(alloc_len >= (int64_t)len);
for (auto& p : exts) {
fm->allocate(p.offset, p.length, txn);
}
KeyValueDB::Transaction txn;
OnodeRef o;
CollectionRef c = _get_collection(cid);
- assert(c);
+ ceph_assert(c);
{
RWLock::WLocker l(c->lock); // just to avoid internal asserts
o = c->get_onode(oid, false);
- assert(o);
+ ceph_assert(o);
o->extent_map.fault_range(db, 0, OBJECT_MAX_SIZE);
}
}
}
}
- assert(injected);
+ ceph_assert(injected);
db->submit_transaction_sync(txn);
}
{
OnodeRef o1;
CollectionRef c1 = _get_collection(cid1);
- assert(c1);
+ ceph_assert(c1);
{
RWLock::WLocker l(c1->lock); // just to avoid internal asserts
o1 = c1->get_onode(oid1, false);
- assert(o1);
+ ceph_assert(o1);
o1->extent_map.fault_range(db, offset, OBJECT_MAX_SIZE);
}
OnodeRef o2;
CollectionRef c2 = _get_collection(cid2);
- assert(c2);
+ ceph_assert(c2);
{
RWLock::WLocker l(c2->lock); // just to avoid internal asserts
o2 = c2->get_onode(oid2, false);
- assert(o2);
+ ceph_assert(o2);
o2->extent_map.fault_range(db, offset, OBJECT_MAX_SIZE);
}
Extent& e1 = *(o1->extent_map.seek_lextent(offset));
// require onode/extent layout to be the same (and simple)
// to make things easier
- assert(o1->onode.extent_map_shards.empty());
- assert(o2->onode.extent_map_shards.empty());
- assert(o1->extent_map.spanning_blob_map.size() == 0);
- assert(o2->extent_map.spanning_blob_map.size() == 0);
- assert(e1.logical_offset == e2.logical_offset);
- assert(e1.length == e2.length);
- assert(e1.blob_offset == e2.blob_offset);
+ ceph_assert(o1->onode.extent_map_shards.empty());
+ ceph_assert(o2->onode.extent_map_shards.empty());
+ ceph_assert(o1->extent_map.spanning_blob_map.size() == 0);
+ ceph_assert(o2->extent_map.spanning_blob_map.size() == 0);
+ ceph_assert(e1.logical_offset == e2.logical_offset);
+ ceph_assert(e1.length == e2.length);
+ ceph_assert(e1.blob_offset == e2.blob_offset);
KeyValueDB::Transaction txn;
txn = db->get_transaction();
CollectionRef c = *p;
dout(10) << __func__ << " " << c << " " << c->cid << dendl;
if (c->onode_map.map_any([&](OnodeRef o) {
- assert(!o->exists);
+ ceph_assert(!o->exists);
if (o->flushing_count.load()) {
dout(10) << __func__ << " " << c << " " << c->cid << " " << o->oid
<< " flush_txns " << o->flushing_count << dendl;
} else {
l = b_len;
if (pc != cache_res.end()) {
- assert(pc->first > b_off);
+ ceph_assert(pc->first > b_off);
l = pc->first - b_off;
}
dout(30) << __func__ << " will read 0x" << std::hex << pos << ": 0x"
// propagate EIO to caller
return r;
}
- assert(r == 0);
+ ceph_assert(r == 0);
}
} else {
// read the pieces
// propagate EIO to caller
return r;
}
- assert(r == 0);
+ ceph_assert(r == 0);
}
- assert(reg.bl.length() == r_len);
+ ceph_assert(reg.bl.length() == r_len);
}
}
}
ioc.aio_wait();
r = ioc.get_return_value();
if (r < 0) {
- assert(r == -EIO); // no other errors allowed
+ ceph_assert(r == -EIO); // no other errors allowed
return -EIO;
}
}
dout(20) << __func__ << " blob " << *bptr << std::hex
<< " need 0x" << b2r_it->second << std::dec << dendl;
if (bptr->get_blob().is_compressed()) {
- assert(p != compressed_blob_bls.end());
+ ceph_assert(p != compressed_blob_bls.end());
bufferlist& compressed_bl = *p++;
if (_verify_csum(o, &bptr->get_blob(), 0, compressed_bl,
b2r_it->second.front().logical_offset) < 0) {
} else {
uint64_t l = length - pos;
if (pr != pr_end) {
- assert(pr->first > pos + offset);
+ ceph_assert(pr->first > pos + offset);
l = pr->first - (pos + offset);
}
dout(30) << __func__ << " assemble 0x" << std::hex << pos
pos += l;
}
}
- assert(bl.length() == length);
- assert(pos == length);
- assert(pr == pr_end);
+ ceph_assert(bl.length() == length);
+ ceph_assert(pos == length);
+ ceph_assert(pr == pr_end);
r = bl.length();
return r;
}
get_object_key(cct, start, &k);
if (start.hobj.is_temp()) {
temp = true;
- assert(k >= temp_start_key && k < temp_end_key);
+ ceph_assert(k >= temp_start_key && k < temp_end_key);
} else {
temp = false;
- assert(k >= start_key && k < end_key);
+ ceph_assert(k >= start_key && k < end_key);
}
dout(20) << __func__ << " start from " << pretty_binary_string(k)
<< " temp=" << (int)temp << dendl;
}
ghobject_t oid;
int r = get_key_object(it->key(), &oid);
- assert(r == 0);
+ ceph_assert(r == 0);
dout(20) << __func__ << " oid " << oid << " end " << end << dendl;
if (ls->size() >= (unsigned)max) {
dout(20) << __func__ << " reached max " << max << dendl;
dout(10) << __func__ << " ondisk_format " << ondisk_format
<< " min_compat_ondisk_format " << min_compat_ondisk_format
<< dendl;
- assert(ondisk_format == latest_ondisk_format);
+ ceph_assert(ondisk_format == latest_ondisk_format);
{
bufferlist bl;
encode(ondisk_format, bl);
freelist_type = std::string(bl.c_str(), bl.length());
dout(10) << __func__ << " freelist_type " << freelist_type << dendl;
} else {
- assert("Not Support extent freelist manager" == 0);
+ ceph_assert("Not Support extent freelist manager" == 0);
}
}
bl.clear();
{
r = db->get(PREFIX_SUPER, "min_compat_ondisk_format", &bl);
- assert(!r);
+ ceph_assert(!r);
auto p = bl.cbegin();
try {
decode(compat_ondisk_format, p);
decode(val, p);
min_alloc_size = val;
min_alloc_size_order = ctz(val);
- assert(min_alloc_size == 1u << min_alloc_size_order);
+ ceph_assert(min_alloc_size == 1u << min_alloc_size_order);
} catch (buffer::error& e) {
derr << __func__ << " unable to read min_alloc_size" << dendl;
return -EIO;
{
dout(1) << __func__ << " from " << ondisk_format << ", latest "
<< latest_ondisk_format << dendl;
- assert(ondisk_format > 0);
- assert(ondisk_format < latest_ondisk_format);
+ ceph_assert(ondisk_format > 0);
+ ceph_assert(ondisk_format < latest_ondisk_format);
if (ondisk_format == 1) {
// changes:
ondisk_format = 2;
_prepare_ondisk_format_super(t);
int r = db->submit_transaction_sync(t);
- assert(r == 0);
+ ceph_assert(r == 0);
}
// done
void BlueStore::_assign_nid(TransContext *txc, OnodeRef o)
{
if (o->onode.nid) {
- assert(o->exists);
+ ceph_assert(o->exists);
return;
}
uint64_t nid = ++nid_last;
} else {
txc->state = TransContext::STATE_KV_SUBMITTED;
int r = cct->_conf->bluestore_debug_omit_kv_commit ? 0 : db->submit_transaction(txc->t);
- assert(r == 0);
+ ceph_assert(r == 0);
_txc_applied_kv(txc);
}
}
default:
derr << __func__ << " unexpected txc " << txc
<< " state " << txc->get_state_name() << dendl;
- assert(0 == "unexpected txc state");
+ ceph_assert(0 == "unexpected txc state");
return;
}
}
void BlueStore::_txc_finish(TransContext *txc)
{
dout(20) << __func__ << " " << txc << " onodes " << txc->onodes << dendl;
- assert(txc->state == TransContext::STATE_FINISHING);
+ ceph_assert(txc->state == TransContext::STATE_FINISHING);
for (auto& sb : txc->shared_blobs_written) {
sb->finish_write(txc->seq);
dout(10) << __func__ << " " << osr << " " << osr->cid << dendl;
osr->zombie = true;
auto i = zombie_osr_set.emplace(osr->cid, osr);
- assert(i.second); // this should be a new insertion
+ ceph_assert(i.second); // this should be a new insertion
}
void BlueStore::_osr_drain_preceding(TransContext *txc)
for (auto& osr : zombies) {
if (zombie_osr_set.erase(osr->cid)) {
dout(10) << __func__ << " reaping empty zombie osr " << osr << dendl;
- assert(osr->q.empty());
+ ceph_assert(osr->q.empty());
} else if (osr->zombie) {
dout(10) << __func__ << " empty zombie osr " << osr
<< " already reaped" << dendl;
- assert(osr->q.empty());
+ ceph_assert(osr->q.empty());
} else {
dout(10) << __func__ << " empty zombie osr " << osr
<< " resurrected" << dendl;
}
kv_sync_thread.join();
kv_finalize_thread.join();
- assert(removed_collections.empty());
+ ceph_assert(removed_collections.empty());
{
std::lock_guard<std::mutex> l(kv_lock);
kv_stop = false;
dout(10) << __func__ << " start" << dendl;
deque<DeferredBatch*> deferred_stable_queue; ///< deferred ios done + stable
std::unique_lock<std::mutex> l(kv_lock);
- assert(!kv_sync_started);
+ ceph_assert(!kv_sync_started);
kv_sync_started = true;
kv_cond.notify_all();
while (true) {
- assert(kv_committing.empty());
+ ceph_assert(kv_committing.empty());
if (kv_queue.empty() &&
((deferred_done_queue.empty() && deferred_stable_queue.empty()) ||
!deferred_aggressive)) {
if (txc->state == TransContext::STATE_KV_QUEUED) {
txc->log_state_latency(logger, l_bluestore_state_kv_queued_lat);
int r = cct->_conf->bluestore_debug_omit_kv_commit ? 0 : db->submit_transaction(txc->t);
- assert(r == 0);
+ ceph_assert(r == 0);
_txc_applied_kv(txc);
--txc->osr->kv_committing_serially;
txc->state = TransContext::STATE_KV_SUBMITTED;
}
} else {
- assert(txc->state == TransContext::STATE_KV_SUBMITTED);
+ ceph_assert(txc->state == TransContext::STATE_KV_SUBMITTED);
txc->log_state_latency(logger, l_bluestore_state_kv_queued_lat);
}
if (txc->had_ios) {
ceph::make_timespan(cct->_conf->bluestore_bluefs_balance_interval)) {
bluefs_last_balance = after_flush;
int r = _balance_bluefs_freespace(&bluefs_gift_extents);
- assert(r >= 0);
+ ceph_assert(r >= 0);
if (r > 0) {
for (auto& p : bluefs_gift_extents) {
bluefs_extents.insert(p.offset, p.length);
for (auto b : deferred_stable) {
for (auto& txc : b->txcs) {
bluestore_deferred_transaction_t& wt = *txc.deferred_txn;
- assert(wt.released.empty()); // only kraken did this
+ ceph_assert(wt.released.empty()); // only kraken did this
string key;
get_deferred_key(wt.seq, &key);
synct->rm_single_key(PREFIX_DEFERRED, key);
// submit synct synchronously (block and wait for it to commit)
int r = cct->_conf->bluestore_debug_omit_kv_commit ? 0 : db->submit_transaction_sync(synct);
- assert(r == 0);
+ ceph_assert(r == 0);
{
std::unique_lock<std::mutex> m(kv_finalize_lock);
deque<DeferredBatch*> deferred_stable;
dout(10) << __func__ << " start" << dendl;
std::unique_lock<std::mutex> l(kv_finalize_lock);
- assert(!kv_finalize_started);
+ ceph_assert(!kv_finalize_started);
kv_finalize_started = true;
kv_finalize_cond.notify_all();
while (true) {
- assert(kv_committed.empty());
- assert(deferred_stable.empty());
+ ceph_assert(kv_committed.empty());
+ ceph_assert(deferred_stable.empty());
if (kv_committing_to_finalize.empty() &&
deferred_stable_to_finalize.empty()) {
if (kv_finalize_stop)
while (!kv_committed.empty()) {
TransContext *txc = kv_committed.front();
- assert(txc->state == TransContext::STATE_KV_SUBMITTED);
+ ceph_assert(txc->state == TransContext::STATE_KV_SUBMITTED);
_txc_state_proc(txc);
kv_committed.pop_front();
}
bluestore_deferred_transaction_t& wt = *txc->deferred_txn;
for (auto opi = wt.ops.begin(); opi != wt.ops.end(); ++opi) {
const auto& op = *opi;
- assert(op.op == bluestore_deferred_op_t::OP_WRITE);
+ ceph_assert(op.op == bluestore_deferred_op_t::OP_WRITE);
bufferlist::const_iterator p = op.data.begin();
for (auto e : op.extents) {
txc->osr->deferred_pending->prepare_write(
dout(10) << __func__ << " osr " << osr
<< " " << osr->deferred_pending->iomap.size() << " ios pending "
<< dendl;
- assert(osr->deferred_pending);
- assert(!osr->deferred_running);
+ ceph_assert(osr->deferred_pending);
+ ceph_assert(!osr->deferred_running);
auto b = osr->deferred_pending;
deferred_queue_size -= b->seq_bytes.size();
- assert(deferred_queue_size >= 0);
+ ceph_assert(deferred_queue_size >= 0);
osr->deferred_running = osr->deferred_pending;
osr->deferred_pending = nullptr;
logger->inc(l_bluestore_deferred_write_ops);
logger->inc(l_bluestore_deferred_write_bytes, bl.length());
int r = bdev->aio_write(start, bl, &b->ioc, false);
- assert(r == 0);
+ ceph_assert(r == 0);
}
}
if (i == b->iomap.end()) {
void BlueStore::_deferred_aio_finish(OpSequencer *osr)
{
dout(10) << __func__ << " osr " << osr << dendl;
- assert(osr->deferred_running);
+ ceph_assert(osr->deferred_running);
DeferredBatch *b = osr->deferred_running;
{
std::lock_guard<std::mutex> l(deferred_lock);
- assert(osr->deferred_running == b);
+ ceph_assert(osr->deferred_running == b);
osr->deferred_running = nullptr;
if (!osr->deferred_pending) {
dout(20) << __func__ << " dequeueing" << dendl;
case Transaction::OP_MKCOLL:
{
- assert(!c);
+ ceph_assert(!c);
const coll_t &cid = i.get_cid(op->cid);
r = _create_collection(txc, cid, op->split_bits, &c);
if (!r)
break;
case Transaction::OP_SPLIT_COLLECTION:
- assert(0 == "deprecated");
+ ceph_assert(0 == "deprecated");
break;
case Transaction::OP_SPLIT_COLLECTION2:
break;
case Transaction::OP_COLL_RENAME:
- assert(0 == "not implemented");
+ ceph_assert(0 == "not implemented");
break;
}
if (r < 0) {
<< " not handled on operation " << op->op
<< " (op " << pos << ", counting from 0)" << dendl;
_dump_transaction<0>(t);
- assert(0 == "unexpected error");
+ ceph_assert(0 == "unexpected error");
}
// these operations implicity create the object
break;
case Transaction::OP_CLONERANGE:
- assert(0 == "deprecated");
+ ceph_assert(0 == "deprecated");
break;
case Transaction::OP_CLONERANGE2:
break;
case Transaction::OP_COLL_ADD:
- assert(0 == "not implemented");
+ ceph_assert(0 == "not implemented");
break;
case Transaction::OP_COLL_REMOVE:
- assert(0 == "not implemented");
+ ceph_assert(0 == "not implemented");
break;
case Transaction::OP_COLL_MOVE:
- assert(0 == "deprecated");
+ ceph_assert(0 == "deprecated");
break;
case Transaction::OP_COLL_MOVE_RENAME:
case Transaction::OP_TRY_RENAME:
{
- assert(op->cid == op->dest_cid);
+ ceph_assert(op->cid == op->dest_cid);
const ghobject_t& noid = i.get_oid(op->dest_oid);
OnodeRef& no = ovec[op->dest_oid];
if (!no) {
<< dendl;
derr << msg << dendl;
_dump_transaction<0>(t);
- assert(0 == "unexpected error");
+ ceph_assert(0 == "unexpected error");
}
}
}
}
for (auto& e : em.extent_map) {
dout(LogLevelV) << __func__ << " " << e << dendl;
- assert(e.logical_offset >= pos);
+ ceph_assert(e.logical_offset >= pos);
pos = e.logical_offset + e.length;
const bluestore_blob_t& blob = e.blob->get_blob();
if (blob.has_csum()) {
uint64_t end = *offset + length;
unsigned back_copy = end % chunk_size;
if (back_copy) {
- assert(back_pad == 0);
+ ceph_assert(back_pad == 0);
back_pad = chunk_size - back_copy;
- assert(back_copy <= length);
+ ceph_assert(back_copy <= length);
bufferptr tail(chunk_size);
bl->copy(length - back_copy, back_copy, tail.c_str());
tail.zero(back_copy, back_pad, false);
*_dout << dendl;
if (pad_count)
logger->inc(l_bluestore_write_pad_bytes, pad_count);
- assert(bl->length() == length);
+ ceph_assert(bl->length() == length);
}
void BlueStore::_do_write_small(
{
dout(10) << __func__ << " 0x" << std::hex << offset << "~" << length
<< std::dec << dendl;
- assert(length < min_alloc_size);
+ ceph_assert(length < min_alloc_size);
uint64_t end_offs = offset + length;
logger->inc(l_bluestore_write_small);
bufferlist head_bl;
int r = _do_read(c.get(), o, offset - head_pad - head_read, head_read,
head_bl, 0);
- assert(r >= 0 && r <= (int)head_read);
+ ceph_assert(r >= 0 && r <= (int)head_read);
size_t zlen = head_read - r;
if (zlen) {
head_bl.append_zero(zlen);
bufferlist tail_bl;
int r = _do_read(c.get(), o, offset + length + tail_pad, tail_read,
tail_bl, 0);
- assert(r >= 0 && r <= (int)tail_read);
+ ceph_assert(r >= 0 && r <= (int)tail_read);
size_t zlen = tail_read - r;
if (zlen) {
tail_bl.append_zero(zlen);
op->extents.emplace_back(bluestore_pextent_t(offset, length));
return 0;
});
- assert(r == 0);
+ ceph_assert(r == 0);
if (b->get_blob().csum_type) {
b->dirty_blob().calc_csum(b_off, bl);
}
max_bsize,
offset0 - bstart,
&alloc_len)) {
- assert(alloc_len == min_alloc_size); // expecting data always
+ ceph_assert(alloc_len == min_alloc_size); // expecting data always
// fit into reused blob
// Need to check for pending writes desiring to
// reuse the same pextent. The rationale is that during GC two chunks
max_bsize,
offset0 - bstart,
&alloc_len)) {
- assert(alloc_len == min_alloc_size); // expecting data always
+ ceph_assert(alloc_len == min_alloc_size); // expecting data always
// fit into reused blob
// Need to check for pending writes desiring to
// reuse the same pextent. The rationale is that during GC two chunks
auto start = mono_clock::now();
// compress
- assert(wi.b_off == 0);
- assert(wi.blob_length == wi.bl.length());
+ ceph_assert(wi.b_off == 0);
+ ceph_assert(wi.blob_length == wi.bl.length());
// FIXME: memory alignment here is bad
bufferlist t;
int r = c->compress(wi.bl, t);
- assert(r == 0);
+ ceph_assert(r == 0);
bluestore_compression_header_t chdr;
chdr.type = c->get_type();
<< dendl;
return -ENOSPC;
}
- assert(prealloc_left == (int64_t)need);
+ ceph_assert(prealloc_left == (int64_t)need);
dout(20) << __func__ << " prealloc " << prealloc << dendl;
auto prealloc_pos = prealloc.begin();
dblob.set_compressed(wi.blob_length, wi.compressed_len);
} else if (wi.new_blob) {
// initialize newly created blob only
- assert(dblob.is_mutable());
+ ceph_assert(dblob.is_mutable());
unsigned csum_order;
if (l->length() != wi.blob_length) {
// hrm, maybe we could do better here, but let's not bother.
suggested_boff > b_off) {
dout(20) << __func__ << " forcing blob_offset to 0x"
<< std::hex << suggested_boff << std::dec << dendl;
- assert(suggested_boff >= b_off);
+ ceph_assert(suggested_boff >= b_off);
csum_length += suggested_boff - b_off;
b_off = suggested_boff;
}
PExtentVector extents;
int64_t left = final_length;
while (left > 0) {
- assert(prealloc_left > 0);
+ ceph_assert(prealloc_left > 0);
if (prealloc_pos->length <= left) {
prealloc_left -= prealloc_pos->length;
left -= prealloc_pos->length;
op->extents.emplace_back(bluestore_pextent_t(offset, length));
return 0;
});
- assert(r == 0);
+ ceph_assert(r == 0);
op->data = *l;
} else {
b->get_blob().map_bl(
}
}
}
- assert(prealloc_pos == prealloc.end());
- assert(prealloc_left == 0);
+ ceph_assert(prealloc_pos == prealloc.end());
+ ceph_assert(prealloc_left == 0);
return 0;
}
unshare_ptr);
}
if (unshare) {
- assert(maybe_unshared_blobs);
+ ceph_assert(maybe_unshared_blobs);
maybe_unshared_blobs->insert(b->shared_blob.get());
}
dout(20) << __func__ << " shared_blob release " << final
++it) {
bufferlist bl;
int r = _do_read(c.get(), o, it->offset, it->length, bl, 0);
- assert(r == (int)it->length);
+ ceph_assert(r == (int)it->length);
_do_write_data(txc, c, o, it->offset, it->length, bl, &wctx_gc);
logger->inc(l_bluestore_gc_merged, it->length);
r = -EEXIST;
goto out;
}
- assert(txc->onodes.count(newo) == 0);
+ ceph_assert(txc->onodes.count(newo) == 0);
}
txc->t->rmkey(PREFIX_OBJ, oldo->key.c_str(), oldo->key.size());
goto out;
}
auto p = new_coll_map.find(cid);
- assert(p != new_coll_map.end());
+ ceph_assert(p != new_coll_map.end());
*c = p->second;
(*c)->cnode.bits = bits;
coll_map[cid] = *c;
goto out;
}
size_t nonexistent_count = 0;
- assert((*c)->exists);
+ ceph_assert((*c)->exists);
if ((*c)->onode_map.map_any([&](OnodeRef o) {
if (o->exists) {
dout(10) << __func__ << " " << o->oid << " " << o
spg_t pgid, dest_pgid;
bool is_pg = c->cid.is_pg(&pgid);
- assert(is_pg);
+ ceph_assert(is_pg);
is_pg = d->cid.is_pg(&dest_pgid);
- assert(is_pg);
+ ceph_assert(is_pg);
// the destination should initially be empty.
- assert(d->onode_map.empty());
- assert(d->shared_blob_set.empty());
- assert(d->cnode.bits == bits);
+ ceph_assert(d->onode_map.empty());
+ ceph_assert(d->shared_blob_set.empty());
+ ceph_assert(d->cnode.bits == bits);
c->split_cache(d.get());
// adjust bits. note that this will be redundant for all but the first
// split call for this parent (first child).
c->cnode.bits = bits;
- assert(d->cnode.bits == bits);
+ ceph_assert(d->cnode.bits == bits);
r = 0;
bufferlist bl;
dout(10) << __func__ << dendl;
for (auto i : cache_shards) {
i->trim_all();
- assert(i->empty());
+ ceph_assert(i->empty());
}
for (auto& p : coll_map) {
if (!p.second->onode_map.empty()) {
derr << __func__ << " stray shared blobs on " << p.first << dendl;
p.second->shared_blob_set.dump<0>(cct);
}
- assert(p.second->onode_map.empty());
- assert(p.second->shared_blob_set.empty());
+ ceph_assert(p.second->onode_map.empty());
+ ceph_assert(p.second->shared_blob_set.empty());
}
coll_map.clear();
}
size_t BlueStoreRepairer::StoreSpaceTracker::filter_out(
const interval_set<uint64_t>& extents)
{
- assert(granularity); // initialized
+ ceph_assert(granularity); // initialized
// can't call for the second time
- assert(!was_filtered_out);
- assert(collections_bfs.size() == objects_bfs.size());
+ ceph_assert(!was_filtered_out);
+ ceph_assert(collections_bfs.size() == objects_bfs.size());
uint64_t prev_pos = 0;
uint64_t npos = collections_bfs.size();
uint64_t pos = max(e.first / granularity, prev_pos);
uint64_t end_pos = 1 + (e.first + e.second - 1) / granularity;
while (pos != npos && pos < end_pos) {
- assert( collections_bfs[pos].element_count() ==
+ ceph_assert( collections_bfs[pos].element_count() ==
objects_bfs[pos].element_count());
if (collections_bfs[pos].element_count()) {
collections_reduced.push_back(std::move(collections_bfs[pos]));
{
if (misreferenced_extents.size()) {
size_t n = space_usage_tracker.filter_out(misreferenced_extents);
- assert(n > 0);
+ ceph_assert(n > 0);
if (!fix_misreferences_txn) {
fix_misreferences_txn = db->get_transaction();
}
}
void truncate(uint32_t newlen) {
- assert(newlen < length);
+ ceph_assert(newlen < length);
if (data.length()) {
bufferlist t;
t.substr_of(data, 0, newlen);
state_list_t writing; ///< writing buffers, sorted by seq, ascending
~BufferSpace() {
- assert(buffer_map.empty());
- assert(writing.empty());
+ ceph_assert(buffer_map.empty());
+ ceph_assert(writing.empty());
}
void _add_buffer(Cache* cache, Buffer *b, int level, Buffer *near) {
++it;
}
- assert(it->seq >= b->seq);
+ ceph_assert(it->seq >= b->seq);
// note that this will insert b before it
// hence the order is maintained
writing.insert(it, *b);
}
void _rm_buffer(Cache* cache,
map<uint32_t, std::unique_ptr<Buffer>>::iterator p) {
- assert(p != buffer_map.end());
+ ceph_assert(p != buffer_map.end());
cache->_audit("_rm_buffer start");
if (p->second->is_writing()) {
writing.erase(writing.iterator_to(*p->second));
f->open_array_section("buffers");
for (auto& i : buffer_map) {
f->open_object_section("buffer");
- assert(i.first == i.second->offset);
+ ceph_assert(i.first == i.second->offset);
i.second->dump(f);
f->close_section();
}
void remove(SharedBlob *sb) {
std::lock_guard<std::mutex> l(lock);
- assert(sb->get_parent() == this);
+ ceph_assert(sb->get_parent() == this);
// only remove if it still points to us
auto p = sb_map.find(sb->get_sbid());
if (p != sb_map.end() &&
if (blob_bl.length() == 0 ) {
encode(blob, blob_bl);
} else {
- assert(blob_bl.length());
+ ceph_assert(blob_bl.length());
}
}
void bound_encode(
}
void assign_blob(const BlobRef& b) {
- assert(!blob);
+ ceph_assert(!blob);
blob = b;
blob->shared_blob->get_cache()->add_extent();
}
BlobRef get_spanning_blob(int id) {
auto p = spanning_blob_map.find(id);
- assert(p != spanning_blob_map.end());
+ ceph_assert(p != spanning_blob_map.end());
return p->second;
}
return false;
}
int s = seek_shard(offset);
- assert(s >= 0);
+ ceph_assert(s >= 0);
if (s == (int)shards.size() - 1) {
return false; // last shard
}
buffer_size += b->length;
}
void _rm_buffer(Buffer *b) override {
- assert(buffer_size >= b->length);
+ ceph_assert(buffer_size >= b->length);
buffer_size -= b->length;
auto q = buffer_lru.iterator_to(*b);
buffer_lru.erase(q);
_add_buffer(b, 0, nullptr);
}
void _adjust_buffer_size(Buffer *b, int64_t delta) override {
- assert((int64_t)buffer_size + delta >= 0);
+ ceph_assert((int64_t)buffer_size + delta >= 0);
buffer_size += delta;
}
void _touch_buffer(Buffer *b) override {
break;
case BUFFER_WARM_OUT:
// move from warm_out to hot LRU
- assert(0 == "this happens via discard hint");
+ ceph_assert(0 == "this happens via discard hint");
break;
case BUFFER_HOT:
// move to front of hot LRU
store(store), cid(c) {
}
~OpSequencer() {
- assert(q.empty());
+ ceph_assert(q.empty());
}
void queue_new(TransContext *txc) {
bool _is_all_kv_submitted() {
// caller must hold qlock & q.empty() must not empty
- assert(!q.empty());
+ ceph_assert(!q.empty());
TransContext *txc = &q.back();
if (txc->state >= TransContext::STATE_KV_SUBMITTED) {
return true;
void *entry() override;
void init() {
- assert(stop == false);
+ ceph_assert(stop == false);
create("bstore_mempool");
}
void shutdown() {
uint64_t offset);
void compact() override {
- assert(db);
+ ceph_assert(db);
db->compact();
}
bool has_builtin_csum() const override {
void init(uint64_t total,
uint64_t min_alloc_size,
uint64_t mem_cap = DEF_MEM_CAP) {
- assert(!granularity); // not initialized yet
- assert(min_alloc_size && isp2(min_alloc_size));
- assert(mem_cap);
+ ceph_assert(!granularity); // not initialized yet
+ ceph_assert(min_alloc_size && isp2(min_alloc_size));
+ ceph_assert(mem_cap);
total = round_up_to(total, min_alloc_size);
granularity = total * BLOOM_FILTER_TABLE_SIZE * 2 / mem_cap;
}
inline void set_used(uint64_t offset, uint64_t len,
const coll_t& cid, const ghobject_t& oid) {
- assert(granularity); // initialized
+ ceph_assert(granularity); // initialized
// can't call this func after filter_out has been apllied
- assert(!was_filtered_out);
+ ceph_assert(!was_filtered_out);
if (!len) {
return;
}
// determines if collection's present after filtering-out
inline bool is_used(const coll_t& cid) const {
- assert(was_filtered_out);
+ ceph_assert(was_filtered_out);
for(auto& bf : collections_bfs) {
if (bf.contains(get_hash(cid))) {
return true;
}
// determines if object's present after filtering-out
inline bool is_used(const ghobject_t& oid) const {
- assert(was_filtered_out);
+ ceph_assert(was_filtered_out);
for(auto& bf : objects_bfs) {
if (bf.contains(oid.hobj.get_hash())) {
return true;
}
// determines if collection's present before filtering-out
inline bool is_used(const coll_t& cid, uint64_t offs) const {
- assert(granularity); // initialized
- assert(!was_filtered_out);
+ ceph_assert(granularity); // initialized
+ ceph_assert(!was_filtered_out);
auto &bf = collections_bfs[offs / granularity];
if (bf.contains(get_hash(cid))) {
return true;
}
// determines if object's present before filtering-out
inline bool is_used(const ghobject_t& oid, uint64_t offs) const {
- assert(granularity); // initialized
- assert(!was_filtered_out);
+ ceph_assert(granularity); // initialized
+ ceph_assert(!was_filtered_out);
auto &bf = objects_bfs[offs / granularity];
if (bf.contains(oid.hobj.get_hash())) {
return true;
// put the freelistmanagers in different prefixes because the merge
// op is per prefix, has to done pre-db-open, and we don't know the
// freelist type until after we open the db.
- assert(prefix == "B");
+ ceph_assert(prefix == "B");
if (type == "bitmap")
return new BitmapFreelistManager(cct, kvdb, "B", "b");
return NULL;
dio = true;
aio = cct->_conf->bdev_aio;
if (!aio) {
- assert(0 == "non-aio not supported");
+ ceph_assert(0 == "non-aio not supported");
}
// disable readahead as it will wreak havoc on our mix of
vdo_fd = -1;
}
- assert(fd_direct >= 0);
+ ceph_assert(fd_direct >= 0);
VOID_TEMP_FAILURE_RETRY(::close(fd_direct));
fd_direct = -1;
- assert(fd_buffered >= 0);
+ ceph_assert(fd_buffered >= 0);
VOID_TEMP_FAILURE_RETRY(::close(fd_buffered));
fd_buffered = -1;
aio, max);
if (r < 0) {
derr << __func__ << " got " << cpp_strerror(r) << dendl;
- assert(0 == "got unexpected error from io_getevents");
+ ceph_assert(0 == "got unexpected error from io_getevents");
}
if (r > 0) {
dout(30) << __func__ << " got " << r << " completed aios" << dendl;
<< dendl;
ioc->set_return_value(-EIO);
} else {
- assert(0 == "got unexpected error from aio_t::get_return_value. "
+ ceph_assert(0 == "got unexpected error from aio_t::get_return_value. "
"This may suggest HW issue. Please check your dmesg!");
}
} else if (aio[i]->length != (uint64_t)r) {
derr << "aio to " << aio[i]->offset << "~" << aio[i]->length
<< " but returned: " << r << dendl;
- assert(0 == "unexpected aio error");
+ ceph_assert(0 == "unexpected aio error");
}
dout(10) << __func__ << " finished aio " << aio[i] << " r " << r
<< " since " << debug_stall_since << ", timeout is "
<< cct->_conf->bdev_debug_aio_suicide_timeout
<< "s, suicide" << dendl;
- assert(0 == "stalled aio... buggy kernel or bad device?");
+ ceph_assert(0 == "stalled aio... buggy kernel or bad device?");
}
}
}
void KernelDevice::_discard_thread()
{
std::unique_lock<std::mutex> l(discard_lock);
- assert(!discard_started);
+ ceph_assert(!discard_started);
discard_started = true;
discard_cond.notify_all();
while (true) {
- assert(discard_finishing.empty());
+ ceph_assert(discard_finishing.empty());
if (discard_queued.empty()) {
if (discard_stop)
break;
int pending = ioc->num_pending.load();
ioc->num_running += pending;
ioc->num_pending -= pending;
- assert(ioc->num_pending.load() == 0); // we should be only thread doing this
- assert(ioc->pending_aios.size() == 0);
+ ceph_assert(ioc->num_pending.load() == 0); // we should be only thread doing this
+ ceph_assert(ioc->pending_aios.size() == 0);
if (cct->_conf->bdev_debug_aio) {
list<aio_t>::iterator p = ioc->running_aios.begin();
derr << __func__ << " retries " << retries << dendl;
if (r < 0) {
derr << " aio submit got " << cpp_strerror(r) << dendl;
- assert(r == 0);
+ ceph_assert(r == 0);
}
}
dout(20) << __func__ << " 0x" << std::hex << off << "~" << len << std::dec
<< (buffered ? " (buffered)" : " (direct)")
<< dendl;
- assert(is_valid_io(off, len));
+ ceph_assert(is_valid_io(off, len));
if ((!buffered || bl.get_num_buffers() >= IOV_MAX) &&
bl.rebuild_aligned_size_and_memory(block_size, block_size, IOV_MAX)) {
dout(20) << __func__ << " 0x" << std::hex << off << "~" << len << std::dec
<< (buffered ? " (buffered)" : " (direct)")
<< dendl;
- assert(is_valid_io(off, len));
+ ceph_assert(is_valid_io(off, len));
if ((!buffered || bl.get_num_buffers() >= IOV_MAX) &&
bl.rebuild_aligned_size_and_memory(block_size, block_size, IOV_MAX)) {
dout(5) << __func__ << " 0x" << std::hex << off << "~" << len << std::dec
<< (buffered ? " (buffered)" : " (direct)")
<< dendl;
- assert(is_valid_io(off, len));
+ ceph_assert(is_valid_io(off, len));
_aio_log_start(ioc, off, len);
r = -errno;
goto out;
}
- assert((uint64_t)r == len);
+ ceph_assert((uint64_t)r == len);
pbl->push_back(std::move(p));
dout(40) << "data: ";
int r = 0;
#ifdef HAVE_LIBAIO
if (aio && dio) {
- assert(is_valid_io(off, len));
+ ceph_assert(is_valid_io(off, len));
_aio_log_start(ioc, off, len);
ioc->pending_aios.push_back(aio_t(ioc, fd_direct));
++ioc->num_pending;
<< " error: " << cpp_strerror(r) << dendl;
goto out;
}
- assert((uint64_t)r == aligned_len);
+ ceph_assert((uint64_t)r == aligned_len);
memcpy(buf, p.c_str() + (off - aligned_off), len);
dout(40) << __func__ << " data: ";
{
dout(5) << __func__ << " 0x" << std::hex << off << "~" << len << std::dec
<< dendl;
- assert(len > 0);
- assert(off < size);
- assert(off + len <= size);
+ ceph_assert(len > 0);
+ ceph_assert(off < size);
+ ceph_assert(off + len <= size);
int r = 0;
//if it's direct io and unaligned, we have to use a internal buffer
<< dendl;
goto out;
}
- assert((uint64_t)r == len);
+ ceph_assert((uint64_t)r == len);
}
dout(40) << __func__ << " data: ";
{
dout(5) << __func__ << " 0x" << std::hex << off << "~" << len << std::dec
<< dendl;
- assert(off % block_size == 0);
- assert(len % block_size == 0);
+ ceph_assert(off % block_size == 0);
+ ceph_assert(len % block_size == 0);
int r = posix_fadvise(fd_buffered, off, len, POSIX_FADV_DONTNEED);
if (r) {
r = -r;
// usable queue depth should minus 1 to aovid overflow.
max_queue_depth = opts.io_queue_size - 1;
qpair = spdk_nvme_ctrlr_alloc_io_qpair(ctrlr, &opts, sizeof(opts));
- assert(qpair != NULL);
+ ceph_assert(qpair != NULL);
// allocate spdk dma memory
for (uint16_t i = 0; i < data_buffer_default_num; i++) {
void *b = spdk_dma_zmalloc(data_buffer_size, CEPH_PAGE_SIZE, NULL);
if (!b) {
derr << __func__ << " failed to create memory pool for nvme data buffer" << dendl;
- assert(b);
+ ceph_assert(b);
}
data_buf_mempool.push_back(b);
}
if (!data_buf_mempool.empty()) {
for (uint16_t i = 0; i < data_buffer_default_num; i++) {
void *b = data_buf_mempool[i];
- assert(b);
+ ceph_assert(b);
spdk_dma_free(b);
}
data_buf_mempool.clear();
return_code(rc),
start(ceph::coarse_real_clock::now()) {}
~Task() {
- assert(!io_request.nseg);
+ ceph_assert(!io_request.nseg);
}
void release_segs(SharedDriverQueueData *queue_data) {
if (io_request.extra_segs) {
Task *t = static_cast<Task*>(cb_arg);
uint32_t i = sgl_offset / data_buffer_size;
uint32_t offset = i * data_buffer_size;
- assert(i <= t->io_request.nseg);
+ ceph_assert(i <= t->io_request.nseg);
for (; i < t->io_request.nseg; i++) {
offset += data_buffer_size;
int try_get(const string &sn_tag, SharedDriverData **driver);
void register_ctrlr(const string &sn_tag, spdk_nvme_ctrlr *c, struct spdk_pci_device *pci_dev,
SharedDriverData **driver) {
- assert(lock.is_locked());
+ ceph_assert(lock.is_locked());
spdk_nvme_ns *ns;
int num_ns = spdk_nvme_ctrlr_get_num_ns(c);
- assert(num_ns >= 1);
+ ceph_assert(num_ns >= 1);
if (num_ns > 1) {
dout(0) << __func__ << " namespace count larger than 1, currently only use the first namespace" << dendl;
}
<< ":" << spdk_pci_device_get_dev(pci_dev) << ":" << spdk_pci_device_get_func(pci_dev) << dendl;
// only support one device per osd now!
- assert(shared_driver_datas.empty());
+ ceph_assert(shared_driver_datas.empty());
// index 0 is occurred by master thread
shared_driver_datas.push_back(new SharedDriverData(shared_driver_datas.size()+1, sn_tag, c, ns));
*driver = shared_driver_datas.back();
pci_dev = spdk_pci_get_device(&pci_addr);
if (!pci_dev) {
dout(0) << __func__ << " failed to get pci device" << dendl;
- assert(pci_dev);
+ ceph_assert(pci_dev);
}
NVMEManager::ProbeContext *ctx = static_cast<NVMEManager::ProbeContext*>(cb_ctx);
probe_queue.pop_front();
r = spdk_nvme_probe(NULL, ctxt, probe_cb, attach_cb, NULL);
if (r < 0) {
- assert(!ctxt->driver);
+ ceph_assert(!ctxt->driver);
derr << __func__ << " device probe nvme failed" << dendl;
}
ctxt->done = true;
IOContext *ctx = task->ctx;
SharedDriverQueueData *queue = task->queue;
- assert(queue != NULL);
- assert(ctx != NULL);
+ ceph_assert(queue != NULL);
+ ceph_assert(ctx != NULL);
--queue->current_queue_depth;
auto dur = std::chrono::duration_cast<std::chrono::nanoseconds>(
ceph::coarse_real_clock::now() - task->start);
if (task->command == IOCommand::WRITE_COMMAND) {
queue->logger->tinc(l_bluestore_nvmedevice_write_lat, dur);
- assert(!spdk_nvme_cpl_is_error(completion));
+ ceph_assert(!spdk_nvme_cpl_is_error(completion));
dout(20) << __func__ << " write/zero op successfully, left "
<< queue->queue_op_seq - queue->completed_op_seq << dendl;
// check waiting count before doing callback (which may
delete task;
} else if (task->command == IOCommand::READ_COMMAND) {
queue->logger->tinc(l_bluestore_nvmedevice_read_lat, dur);
- assert(!spdk_nvme_cpl_is_error(completion));
+ ceph_assert(!spdk_nvme_cpl_is_error(completion));
dout(20) << __func__ << " read op successfully" << dendl;
task->fill_cb();
task->release_segs(queue);
ctx->try_aio_wake();
}
} else {
- assert(task->command == IOCommand::FLUSH_COMMAND);
- assert(!spdk_nvme_cpl_is_error(completion));
+ ceph_assert(task->command == IOCommand::FLUSH_COMMAND);
+ ceph_assert(!spdk_nvme_cpl_is_error(completion));
queue->logger->tinc(l_bluestore_nvmedevice_flush_lat, dur);
dout(20) << __func__ << " flush op successfully" << dendl;
task->return_code = 0;
if (pending && t) {
ioc->num_running += pending;
ioc->num_pending -= pending;
- assert(ioc->num_pending.load() == 0); // we should be only thread doing this
+ ceph_assert(ioc->num_pending.load() == 0); // we should be only thread doing this
// Only need to push the first entry
ioc->nvme_task_first = ioc->nvme_task_last = nullptr;
if (!queue_t)
uint64_t len = bl.length();
dout(20) << __func__ << " " << off << "~" << len << " ioc " << ioc
<< " buffered " << buffered << dendl;
- assert(is_valid_io(off, len));
+ ceph_assert(is_valid_io(off, len));
write_split(this, off, bl, ioc);
dout(5) << __func__ << " " << off << "~" << len << dendl;
uint64_t len = bl.length();
dout(20) << __func__ << " " << off << "~" << len << " buffered "
<< buffered << dendl;
- assert(off % block_size == 0);
- assert(len % block_size == 0);
- assert(len > 0);
- assert(off < size);
- assert(off + len <= size);
+ ceph_assert(off % block_size == 0);
+ ceph_assert(len % block_size == 0);
+ ceph_assert(len > 0);
+ ceph_assert(off < size);
+ ceph_assert(off + len <= size);
IOContext ioc(cct, NULL);
write_split(this, off, bl, &ioc);
bool buffered)
{
dout(5) << __func__ << " " << off << "~" << len << " ioc " << ioc << dendl;
- assert(is_valid_io(off, len));
+ ceph_assert(is_valid_io(off, len));
Task *t = new Task(this, IOCommand::READ_COMMAND, off, len, 1);
bufferptr p = buffer::create_page_aligned(len);
IOContext *ioc)
{
dout(20) << __func__ << " " << off << "~" << len << " ioc " << ioc << dendl;
- assert(is_valid_io(off, len));
+ ceph_assert(is_valid_io(off, len));
Task *t = new Task(this, IOCommand::READ_COMMAND, off, len);
int NVMEDevice::read_random(uint64_t off, uint64_t len, char *buf, bool buffered)
{
- assert(len > 0);
- assert(off < size);
- assert(off + len <= size);
+ ceph_assert(len > 0);
+ ceph_assert(off < size);
+ ceph_assert(off + len <= size);
uint64_t aligned_off = align_down(off, block_size);
uint64_t aligned_len = align_up(off+len, block_size) - aligned_off;
{
dout(1) << __func__ << dendl;
- assert(addr != NULL);
+ ceph_assert(addr != NULL);
pmem_unmap(addr, size);
- assert(fd >= 0);
+ ceph_assert(fd >= 0);
VOID_TEMP_FAILURE_RETRY(::close(fd));
fd = -1;
void PMEMDevice::aio_submit(IOContext *ioc)
{
if (ioc->priv) {
- assert(ioc->num_running == 0);
+ ceph_assert(ioc->num_running == 0);
aio_callback(aio_callback_priv, ioc->priv);
} else {
ioc->try_aio_wake();
{
uint64_t len = bl.length();
dout(20) << __func__ << " " << off << "~" << len << dendl;
- assert(is_valid_io(off, len));
+ ceph_assert(is_valid_io(off, len));
dout(40) << "data: ";
bl.hexdump(*_dout);
bool buffered)
{
dout(5) << __func__ << " " << off << "~" << len << dendl;
- assert(is_valid_io(off, len));
+ ceph_assert(is_valid_io(off, len));
bufferptr p = buffer::create_page_aligned(len);
memcpy(p.c_str(), addr + off, len);
int PMEMDevice::read_random(uint64_t off, uint64_t len, char *buf, bool buffered)
{
dout(5) << __func__ << " " << off << "~" << len << dendl;
- assert(is_valid_io(off, len));
+ ceph_assert(is_valid_io(off, len));
memcpy(buf, addr + off, len);
return 0;
}
num_free -= *length;
- assert(num_free >= 0);
+ ceph_assert(num_free >= 0);
last_alloc = *offset + *length;
return 0;
}
double StupidAllocator::get_fragmentation(uint64_t alloc_unit)
{
- assert(alloc_unit);
+ ceph_assert(alloc_unit);
double res;
uint64_t max_intervals = 0;
uint64_t intervals = 0;
}
ldout(cct, 30) << __func__ << " " << intervals << "/" << max_intervals
<< dendl;
- assert(intervals <= max_intervals);
+ ceph_assert(intervals <= max_intervals);
if (!intervals || max_intervals <= 1) {
return 0.0;
}
rm.subtract(overlap);
}
}
- assert(rm.empty());
+ ceph_assert(rm.empty());
num_free -= length;
- assert(num_free >= 0);
+ ceph_assert(num_free >= 0);
}
++left;
++cur;
}
- assert(aios_size >= left);
+ ceph_assert(aios_size >= left);
int done = 0;
while (left > 0) {
int r = io_submit(ctx, std::min(left, max_iodepth), piocb + done);
}
return r;
}
- assert(r > 0);
+ ceph_assert(r > 0);
done += r;
left -= r;
attempts = 16;
ctx(0) {
}
~aio_queue_t() {
- assert(ctx == 0);
+ ceph_assert(ctx == 0);
}
int init() {
- assert(ctx == 0);
+ ceph_assert(ctx == 0);
int r = io_setup(max_iodepth, &ctx);
if (r < 0) {
if (ctx) {
void shutdown() {
if (ctx) {
int r = io_destroy(ctx);
- assert(r == 0);
+ ceph_assert(r == 0);
ctx = 0;
}
}
unsigned refs = 0;
for (const auto &p : ref_map) {
if (p.first < pos)
- assert(0 == "overlap");
+ ceph_assert(0 == "overlap");
if (p.first == pos && p.second.refs == refs)
- assert(0 == "unmerged");
+ ceph_assert(0 == "unmerged");
pos = p.first + p.second.length;
refs = p.second.refs;
}
}
if (p->first < offset) {
// split off the portion before offset
- assert(p->first + p->second.length > offset);
+ ceph_assert(p->first + p->second.length > offset);
uint64_t left = p->first + p->second.length - offset;
p->second.length = offset - p->first;
p = ref_map.insert(map<uint64_t,record_t>::value_type(
offset, record_t(left, p->second.refs))).first;
// continue below
}
- assert(p->first == offset);
+ ceph_assert(p->first == offset);
if (length < p->second.length) {
ref_map.insert(make_pair(offset + length,
record_t(p->second.length - length,
auto p = ref_map.lower_bound(offset);
if (p == ref_map.end() || p->first > offset) {
if (p == ref_map.begin()) {
- assert(0 == "put on missing extent (nothing before)");
+ ceph_assert(0 == "put on missing extent (nothing before)");
}
--p;
if (p->first + p->second.length <= offset) {
- assert(0 == "put on missing extent (gap)");
+ ceph_assert(0 == "put on missing extent (gap)");
}
}
if (p->first < offset) {
offset, record_t(left, p->second.refs))).first;
}
while (length > 0) {
- assert(p->first == offset);
+ ceph_assert(p->first == offset);
if (length < p->second.length) {
if (p->second.refs != 1) {
unshared = false;
void bluestore_blob_use_tracker_t::allocate()
{
- assert(num_au != 0);
+ ceph_assert(num_au != 0);
bytes_per_au = new uint32_t[num_au];
for (uint32_t i = 0; i < num_au; ++i) {
bytes_per_au[i] = 0;
void bluestore_blob_use_tracker_t::init(
uint32_t full_length, uint32_t _au_size) {
- assert(!au_size || is_empty());
- assert(_au_size > 0);
- assert(full_length > 0);
+ ceph_assert(!au_size || is_empty());
+ ceph_assert(_au_size > 0);
+ ceph_assert(full_length > 0);
clear();
uint32_t _num_au = round_up_to(full_length, _au_size) / _au_size;
au_size = _au_size;
void bluestore_blob_use_tracker_t::get(
uint32_t offset, uint32_t length)
{
- assert(au_size);
+ ceph_assert(au_size);
if (!num_au) {
total_bytes += length;
} else {
uint32_t offset, uint32_t length,
PExtentVector *release_units)
{
- assert(au_size);
+ ceph_assert(au_size);
if (release_units) {
release_units->clear();
}
bool maybe_empty = true;
if (!num_au) {
- assert(total_bytes >= length);
+ ceph_assert(total_bytes >= length);
total_bytes -= length;
} else {
auto end = offset + length;
auto phase = offset % au_size;
size_t pos = offset / au_size;
auto diff = std::min(au_size - phase, end - offset);
- assert(diff <= bytes_per_au[pos]);
+ ceph_assert(diff <= bytes_per_au[pos]);
bytes_per_au[pos] -= diff;
offset += (phase ? au_size - phase : au_size);
if (bytes_per_au[pos] == 0) {
bool bluestore_blob_use_tracker_t::can_split_at(uint32_t blob_offset) const
{
- assert(au_size);
+ ceph_assert(au_size);
return (blob_offset % au_size) == 0 &&
blob_offset < num_au * au_size;
}
uint32_t blob_offset,
bluestore_blob_use_tracker_t* r)
{
- assert(au_size);
- assert(can_split());
- assert(can_split_at(blob_offset));
- assert(r->is_empty());
+ ceph_assert(au_size);
+ ceph_assert(can_split());
+ ceph_assert(can_split_at(blob_offset));
+ ceph_assert(r->is_empty());
uint32_t new_num_au = blob_offset / au_size;
r->init( (num_au - new_num_au) * au_size, au_size);
if (extents.size() == 0) {
// if blob is compressed then logical length to be already configured
// otherwise - to be unset.
- assert((is_compressed() && logical_length != 0) ||
+ ceph_assert((is_compressed() && logical_length != 0) ||
(!is_compressed() && logical_length == 0));
extents.reserve(allocs.size() + (b_off ? 1 : 0));
logical_length = new_len;
}
} else {
- assert(!is_compressed()); // partial allocations are forbidden when
+ ceph_assert(!is_compressed()); // partial allocations are forbidden when
// compressed
- assert(b_off < logical_length);
+ ceph_assert(b_off < logical_length);
uint32_t cur_offs = 0;
auto start_it = extents.begin();
size_t pos = 0;
while (true) {
- assert(start_it != extents.end());
+ ceph_assert(start_it != extents.end());
if (cur_offs + start_it->length > b_off) {
break;
}
auto end_it = start_it;
while (true) {
- assert(end_it != extents.end());
- assert(!end_it->is_valid());
+ ceph_assert(end_it != extents.end());
+ ceph_assert(!end_it->is_valid());
if (cur_offs + end_it->length >= end_off) {
break;
}
cur_offs += end_it->length;
++end_it;
}
- assert(cur_offs + end_it->length >= end_off);
+ ceph_assert(cur_offs + end_it->length >= end_off);
uint32_t tail = cur_offs + end_it->length - end_off;
start_it = extents.erase(start_it, end_it + 1);
}
pos += e.length;
}
- assert(is_compressed() || get_logical_length() == pos);
+ ceph_assert(is_compressed() || get_logical_length() == pos);
extents.resize(1);
extents[0].offset = bluestore_pextent_t::INVALID_OFFSET;
extents[0].length = pos;
if (loffs_it == lend ||
pext_loffs_start + pext_it->length <= loffs_it->offset) {
int delta0 = pext_loffs - pext_loffs_start;
- assert(delta0 >= 0);
+ ceph_assert(delta0 >= 0);
if ((uint32_t)delta0 < pext_it->length) {
vb.add(pext_it->offset + delta0, pext_it->length - delta0);
}
else {
//assert(pext_loffs == pext_loffs_start);
int delta0 = pext_loffs - pext_loffs_start;
- assert(delta0 >= 0);
+ ceph_assert(delta0 >= 0);
int delta = loffs_it->offset - pext_loffs;
- assert(delta >= 0);
+ ceph_assert(delta >= 0);
if (delta > 0) {
vb.add(pext_it->offset + delta0, delta);
pext_loffs += delta;
rb.csum_type = csum_type;
rb.csum_chunk_order = csum_chunk_order;
size_t csum_order = get_csum_chunk_size();
- assert(blob_offset % csum_order == 0);
+ ceph_assert(blob_offset % csum_order == 0);
size_t pos = (blob_offset / csum_order) * get_csum_value_size();
// deep copy csum data
bufferptr old;
if (num_au) {
new_len = round_up_to(new_len, au_size);
uint32_t _num_au = new_len / au_size;
- assert(_num_au <= num_au);
+ ceph_assert(_num_au <= num_au);
if (_num_au) {
num_au = _num_au; // bytes_per_au array is left unmodified
}
void add_tail(uint32_t new_len, uint32_t _au_size) {
auto full_size = au_size * (num_au ? num_au : 1);
- assert(new_len >= full_size);
+ ceph_assert(new_len >= full_size);
if (new_len == full_size) {
return;
}
uint32_t old_total = total_bytes;
total_bytes = 0;
init(new_len, _au_size);
- assert(num_au);
+ ceph_assert(num_au);
bytes_per_au[0] = old_total;
} else {
- assert(_au_size == au_size);
+ ceph_assert(_au_size == au_size);
new_len = round_up_to(new_len, au_size);
uint32_t _num_au = new_len / au_size;
- assert(_num_au >= num_au);
+ ceph_assert(_num_au >= num_au);
if (_num_au > num_au) {
auto old_bytes = bytes_per_au;
auto old_num_au = num_au;
DENC_HELPERS;
void bound_encode(size_t& p, uint64_t struct_v) const {
- assert(struct_v == 1 || struct_v == 2);
+ ceph_assert(struct_v == 1 || struct_v == 2);
denc(extents, p);
denc_varint(flags, p);
denc_varint_lowz(logical_length, p);
}
void encode(bufferlist::contiguous_appender& p, uint64_t struct_v) const {
- assert(struct_v == 1 || struct_v == 2);
+ ceph_assert(struct_v == 1 || struct_v == 2);
denc(extents, p);
denc_varint(flags, p);
if (is_compressed()) {
}
void decode(bufferptr::const_iterator& p, uint64_t struct_v) {
- assert(struct_v == 1 || struct_v == 2);
+ ceph_assert(struct_v == 1 || struct_v == 2);
denc(extents, p);
denc_varint(flags, p);
if (is_compressed()) {
}
uint64_t calc_offset(uint64_t x_off, uint64_t *plen) const {
auto p = extents.begin();
- assert(p != extents.end());
+ ceph_assert(p != extents.end());
while (x_off >= p->length) {
x_off -= p->length;
++p;
- assert(p != extents.end());
+ ceph_assert(p != extents.end());
}
if (plen)
*plen = p->length - x_off;
bool _validate_range(uint64_t b_off, uint64_t b_len,
bool require_allocated) const {
auto p = extents.begin();
- assert(p != extents.end());
+ ceph_assert(p != extents.end());
while (b_off >= p->length) {
b_off -= p->length;
++p;
- assert(p != extents.end());
+ ceph_assert(p != extents.end());
}
b_len += b_off;
while (b_len) {
- assert(p != extents.end());
+ ceph_assert(p != extents.end());
if (require_allocated != p->is_valid()) {
return false;
}
b_len -= p->length;
++p;
}
- assert(0 == "we should not get here");
+ ceph_assert(0 == "we should not get here");
return false;
}
return false;
}
uint64_t blob_len = get_logical_length();
- assert((blob_len % (sizeof(unused)*8)) == 0);
- assert(offset + length <= blob_len);
+ ceph_assert((blob_len % (sizeof(unused)*8)) == 0);
+ ceph_assert(offset + length <= blob_len);
uint64_t chunk_size = blob_len / (sizeof(unused)*8);
uint64_t start = offset / chunk_size;
uint64_t end = round_up_to(offset + length, chunk_size) / chunk_size;
/// mark a range that has never been used
void add_unused(uint64_t offset, uint64_t length) {
uint64_t blob_len = get_logical_length();
- assert((blob_len % (sizeof(unused)*8)) == 0);
- assert(offset + length <= blob_len);
+ ceph_assert((blob_len % (sizeof(unused)*8)) == 0);
+ ceph_assert(offset + length <= blob_len);
uint64_t chunk_size = blob_len / (sizeof(unused)*8);
uint64_t start = round_up_to(offset, chunk_size) / chunk_size;
uint64_t end = (offset + length) / chunk_size;
void mark_used(uint64_t offset, uint64_t length) {
if (has_unused()) {
uint64_t blob_len = get_logical_length();
- assert((blob_len % (sizeof(unused)*8)) == 0);
- assert(offset + length <= blob_len);
+ ceph_assert((blob_len % (sizeof(unused)*8)) == 0);
+ ceph_assert(offset + length <= blob_len);
uint64_t chunk_size = blob_len / (sizeof(unused)*8);
uint64_t start = offset / chunk_size;
uint64_t end = round_up_to(offset + length, chunk_size) / chunk_size;
static_assert(std::is_invocable_r_v<int, F, uint64_t, uint64_t>);
auto p = extents.begin();
- assert(p != extents.end());
+ ceph_assert(p != extents.end());
while (x_off >= p->length) {
x_off -= p->length;
++p;
- assert(p != extents.end());
+ ceph_assert(p != extents.end());
}
while (x_len > 0) {
- assert(p != extents.end());
+ ceph_assert(p != extents.end());
uint64_t l = std::min(p->length - x_off, x_len);
int r = f(p->offset + x_off, l);
if (r < 0)
static_assert(std::is_invocable_v<F, uint64_t, bufferlist&>);
auto p = extents.begin();
- assert(p != extents.end());
+ ceph_assert(p != extents.end());
while (x_off >= p->length) {
x_off -= p->length;
++p;
- assert(p != extents.end());
+ ceph_assert(p != extents.end());
}
bufferlist::iterator it = bl.begin();
uint64_t x_len = bl.length();
while (x_len > 0) {
- assert(p != extents.end());
+ ceph_assert(p != extents.end());
uint64_t l = std::min(p->length - x_off, x_len);
bufferlist t;
it.copy(l, t);
const char *p = csum_data.c_str();
switch (cs) {
case 0:
- assert(0 == "no csum data, bad index");
+ ceph_assert(0 == "no csum data, bad index");
case 1:
return reinterpret_cast<const uint8_t*>(p)[i];
case 2:
case 8:
return reinterpret_cast<const __le64*>(p)[i];
default:
- assert(0 == "unrecognized csum word size");
+ ceph_assert(0 == "unrecognized csum word size");
}
}
const char *get_csum_item_ptr(unsigned i) const {
}
}
void add_tail(uint32_t new_len) {
- assert(is_mutable());
- assert(!has_unused());
- assert(new_len > logical_length);
+ ceph_assert(is_mutable());
+ ceph_assert(!has_unused());
+ ceph_assert(new_len > logical_length);
extents.emplace_back(
bluestore_pextent_t(
bluestore_pextent_t::INVALID_OFFSET,
interval_t res_candidate;
if (tail->length != 0) {
- assert((tail->offset % l0_granularity) == 0);
- assert((tail->length % l0_granularity) == 0);
+ ceph_assert((tail->offset % l0_granularity) == 0);
+ ceph_assert((tail->length % l0_granularity) == 0);
res_candidate.offset = tail->offset / l0_granularity;
res_candidate.length = tail->length / l0_granularity;
}
search_ctx_t* ctx)
{
auto d = CHILD_PER_SLOT;
- assert((pos_start % d) == 0);
- assert((pos_end % d) == 0);
+ ceph_assert((pos_start % d) == 0);
+ ceph_assert((pos_end % d) == 0);
uint64_t l0_w = slotset_width * CHILD_PER_SLOT_L0;
auto d0 = bits_per_slotset;
uint64_t l1_w = CHILD_PER_SLOT;
// this should be aligned with slotset boundaries
- assert(0 == (l0_pos % d0));
- assert(0 == (l0_pos_end % d0));
+ ceph_assert(0 == (l0_pos % d0));
+ ceph_assert(0 == (l0_pos_end % d0));
int64_t idx = l0_pos / bits_per_slot;
int64_t idx_end = l0_pos_end / bits_per_slot;
idx = p2roundup(idx, int64_t(slotset_width));
}
if ((idx % slotset_width) == 0) {
- assert(mask_to_apply != L1_ENTRY_NOT_USED);
+ ceph_assert(mask_to_apply != L1_ENTRY_NOT_USED);
uint64_t shift = (l1_pos % l1_w) * L1_ENTRY_WIDTH;
slot_t& slot_val = l1[l1_pos / l1_w];
auto mask = slot_t(L1_ENTRY_MASK) << shift;
// full length match required.
if (ctx.affordable_len) {
// allocate as specified
- assert(ctx.affordable_len >= length);
+ ceph_assert(ctx.affordable_len >= length);
auto pos = ctx.affordable_offs / l0_granularity;
_mark_alloc_l1_l0(pos, pos + 1);
res = interval_t(ctx.affordable_offs, length);
// allocate from free slot sets
if (ctx.free_count) {
auto l = std::min(length, ctx.free_count * l1_granularity);
- assert((l % l0_granularity) == 0);
+ ceph_assert((l % l0_granularity) == 0);
auto pos_end = ctx.free_l1_pos * l0_w + l / l0_granularity;
_mark_alloc_l1_l0(ctx.free_l1_pos * l0_w, pos_end);
if (ctx.free_count) {
auto l = std::min(length, ctx.free_count * l1_granularity);
- assert((l % l0_granularity) == 0);
+ ceph_assert((l % l0_granularity) == 0);
auto pos_end = ctx.free_l1_pos * l0_w + l / l0_granularity;
_mark_alloc_l1_l0(ctx.free_l1_pos * l0_w, pos_end);
}
// we can terminate earlier on free entry only
- assert(ctx.fully_processed);
+ ceph_assert(ctx.fully_processed);
// check partially free slot sets first (including neighboring),
// full length match required.
if (ctx.affordable_len) {
- assert(ctx.affordable_len >= length);
- assert((length % l0_granularity) == 0);
+ ceph_assert(ctx.affordable_len >= length);
+ ceph_assert((length % l0_granularity) == 0);
auto pos_start = ctx.affordable_offs + length / l0_granularity;
auto pos_end = (ctx.affordable_offs + length) / l0_granularity;
_mark_alloc_l1_l0(pos_start, pos_end);
} else {
search_ctx_t ctx;
_analyze_partials(pos_start, pos_end, length, min_length, NO_STOP, &ctx);
- assert(ctx.fully_processed);
+ ceph_assert(ctx.fully_processed);
// check partially free slot sets first (including neighboring),
// full length match required.
if (ctx.affordable_len) {
- assert(ctx.affordable_len >= length);
- assert((length % l0_granularity) == 0);
+ ceph_assert(ctx.affordable_len >= length);
+ ceph_assert((length % l0_granularity) == 0);
auto pos_start = ctx.affordable_offs / l0_granularity;
auto pos_end = (ctx.affordable_offs + length) / l0_granularity;
_mark_alloc_l1_l0(pos_start, pos_end);
if (aligned_extent.length > 0) {
aligned_extent.length = std::min(length,
uint64_t(aligned_extent.length));
- assert((aligned_extent.offset % l0_granularity) == 0);
- assert((aligned_extent.length % l0_granularity) == 0);
+ ceph_assert((aligned_extent.offset % l0_granularity) == 0);
+ ceph_assert((aligned_extent.length % l0_granularity) == 0);
auto pos_start = aligned_extent.offset / l0_granularity;
auto pos_end = (aligned_extent.offset + aligned_extent.length) / l0_granularity;
uint64_t d0 = CHILD_PER_SLOT_L0;
uint64_t d1 = CHILD_PER_SLOT;
- assert(0 == (l1_pos_start % (slotset_width * d1)));
- assert(0 == (l1_pos_end % (slotset_width * d1)));
+ ceph_assert(0 == (l1_pos_start % (slotset_width * d1)));
+ ceph_assert(0 == (l1_pos_end % (slotset_width * d1)));
if (min_length != l0_granularity) {
// probably not the most effecient way but
// don't care much about that at the moment
continue;
}
auto free_pos = find_next_set_bit(slot_val, 0);
- assert(free_pos < bits_per_slot);
+ ceph_assert(free_pos < bits_per_slot);
do {
- assert(length > *allocated);
+ ceph_assert(length > *allocated);
bool empty;
empty = _allocate_l0(length, max_length,
++l0_dives;
- assert(l0_pos0 < l0_pos1);
- assert(length > *allocated);
- assert(0 == (l0_pos0 % (slotset_width * d0)));
- assert(0 == (l0_pos1 % (slotset_width * d0)));
- assert(((length - *allocated) % l0_granularity) == 0);
+ ceph_assert(l0_pos0 < l0_pos1);
+ ceph_assert(length > *allocated);
+ ceph_assert(0 == (l0_pos0 % (slotset_width * d0)));
+ ceph_assert(0 == (l0_pos1 % (slotset_width * d0)));
+ ceph_assert(((length - *allocated) % l0_granularity) == 0);
uint64_t need_entries = (length - *allocated) / l0_granularity;
}
auto free_pos = find_next_set_bit(slot_val, 0);
- assert(free_pos < bits_per_slot);
+ ceph_assert(free_pos < bits_per_slot);
auto next_pos = free_pos + 1;
while (next_pos < bits_per_slot &&
(next_pos - free_pos) < need_entries) {
{
bool no_free = true;
uint64_t d = slotset_width * CHILD_PER_SLOT_L0;
- assert(0 == (l0_pos % d));
- assert(0 == (l0_pos_end % d));
+ ceph_assert(0 == (l0_pos % d));
+ ceph_assert(0 == (l0_pos_end % d));
auto idx = l0_pos / CHILD_PER_SLOT_L0;
auto idx_end = l0_pos_end / CHILD_PER_SLOT_L0;
{
bool no_free = true;
uint64_t d = slotset_width * _children_per_slot();
- assert(0 == (l1_pos % d));
- assert(0 == (l1_pos_end % d));
+ ceph_assert(0 == (l1_pos % d));
+ ceph_assert(0 == (l1_pos_end % d));
auto idx = l1_pos / CHILD_PER_SLOT;
auto idx_end = l1_pos_end / CHILD_PER_SLOT;
uint64_t debug_get_free(uint64_t l1_pos0 = 0, uint64_t l1_pos1 = 0)
{
- assert(0 == (l1_pos0 % CHILD_PER_SLOT));
- assert(0 == (l1_pos1 % CHILD_PER_SLOT));
+ ceph_assert(0 == (l1_pos0 % CHILD_PER_SLOT));
+ ceph_assert(0 == (l1_pos1 % CHILD_PER_SLOT));
auto idx0 = l1_pos0 * slotset_width;
auto idx1 = l1_pos1 * slotset_width;
void _init(uint64_t capacity, uint64_t _alloc_unit, bool mark_as_free = true)
{
- assert(isp2(_alloc_unit));
+ ceph_assert(isp2(_alloc_unit));
l1._init(capacity, _alloc_unit, mark_as_free);
l2_granularity =
void _mark_l2_allocated(int64_t l2_pos, int64_t l2_pos_end)
{
auto d = CHILD_PER_SLOT;
- assert(0 <= l2_pos_end);
- assert((int64_t)l2.size() >= (l2_pos_end / d));
+ ceph_assert(0 <= l2_pos_end);
+ ceph_assert((int64_t)l2.size() >= (l2_pos_end / d));
while (l2_pos < l2_pos_end) {
l2[l2_pos / d] &= ~(slot_t(1) << (l2_pos % d));
void _mark_l2_free(int64_t l2_pos, int64_t l2_pos_end)
{
auto d = CHILD_PER_SLOT;
- assert(0 <= l2_pos_end);
- assert((int64_t)l2.size() >= (l2_pos_end / d));
+ ceph_assert(0 <= l2_pos_end);
+ ceph_assert((int64_t)l2.size() >= (l2_pos_end / d));
while (l2_pos < l2_pos_end) {
l2[l2_pos / d] |= (slot_t(1) << (l2_pos % d));
void _mark_l2_on_l1(int64_t l2_pos, int64_t l2_pos_end)
{
auto d = CHILD_PER_SLOT;
- assert(0 <= l2_pos_end);
- assert((int64_t)l2.size() >= (l2_pos_end / d));
+ ceph_assert(0 <= l2_pos_end);
+ ceph_assert((int64_t)l2.size() >= (l2_pos_end / d));
auto idx = l2_pos * slotset_width;
auto idx_end = l2_pos_end * slotset_width;
{
uint64_t prev_allocated = *allocated;
uint64_t d = CHILD_PER_SLOT;
- assert(isp2(min_length));
- assert(min_length <= l2_granularity);
- assert(max_length == 0 || max_length >= min_length);
- assert(max_length == 0 || (max_length % min_length) == 0);
- assert(length >= min_length);
- assert((length % min_length) == 0);
+ ceph_assert(isp2(min_length));
+ ceph_assert(min_length <= l2_granularity);
+ ceph_assert(max_length == 0 || max_length >= min_length);
+ ceph_assert(max_length == 0 || (max_length % min_length) == 0);
+ ceph_assert(length >= min_length);
+ ceph_assert((length % min_length) == 0);
uint64_t l1_w = slotset_width * l1._children_per_slot();
all_set = true;
} else {
free_pos = find_next_set_bit(slot_val, 0);
- assert(free_pos < bits_per_slot);
+ ceph_assert(free_pos < bits_per_slot);
}
do {
- assert(length > *allocated);
+ ceph_assert(length > *allocated);
bool empty = l1._allocate_l1(length,
min_length,
max_length,
++l2_allocs;
auto allocated_here = *allocated - prev_allocated;
- assert(available >= allocated_here);
+ ceph_assert(available >= allocated_here);
available -= allocated_here;
}
std::lock_guard<std::mutex> l(lock);
auto allocated = l1._mark_alloc_l1(o, len);
- assert(available >= allocated);
+ ceph_assert(available >= allocated);
available -= allocated;
_mark_l2_on_l1(l2_pos, l2_pos_end);
}
if (ready) {
return 0;
}
- assert(!parent_iter);
+ ceph_assert(!parent_iter);
if (header->parent) {
Header parent = map->lookup_parent(header);
if (!parent) {
parent_iter = std::make_shared<DBObjectMapIteratorImpl>(map, parent);
}
key_iter = map->db->get_iterator(map->user_prefix(header));
- assert(key_iter);
+ ceph_assert(key_iter);
complete_iter = map->db->get_iterator(map->complete_prefix(header));
- assert(complete_iter);
+ ceph_assert(complete_iter);
cur_iter = key_iter;
- assert(cur_iter);
+ ceph_assert(cur_iter);
ready = true;
return 0;
}
bool DBObjectMap::DBObjectMapIteratorImpl::valid()
{
bool valid = !invalid && ready;
- assert(!valid || cur_iter->valid());
+ ceph_assert(!valid || cur_iter->valid());
return valid;
}
int DBObjectMap::DBObjectMapIteratorImpl::next(bool validate)
{
- assert(cur_iter->valid());
- assert(valid());
+ ceph_assert(cur_iter->valid());
+ ceph_assert(valid());
cur_iter->next();
return adjust();
}
if (r < 0)
return r;
while (parent_iter && parent_iter->valid() && !on_parent()) {
- assert(valid());
+ ceph_assert(valid());
r = lower_bound(parent_iter->key());
if (r < 0)
return r;
return false;
}
- assert(complete_iter->key() <= to_test);
- assert(complete_iter->value().length() >= 1);
+ ceph_assert(complete_iter->key() <= to_test);
+ ceph_assert(complete_iter->value().length() >= 1);
string _end(complete_iter->value().c_str(),
complete_iter->value().length() - 1);
if (_end.empty() || _end > to_test) {
return true;
} else {
complete_iter->next();
- assert(!complete_iter->valid() || complete_iter->key() > to_test);
+ ceph_assert(!complete_iter->valid() || complete_iter->key() > to_test);
return false;
}
}
} else {
invalid = true;
}
- assert(invalid || cur_iter->valid());
+ ceph_assert(invalid || cur_iter->valid());
return 0;
}
if (check_spos(oid, header, spos))
return 0;
remove_map_header(hl, oid, header, t);
- assert(header->num_children > 0);
+ ceph_assert(header->num_children > 0);
header->num_children--;
int r = _clear(header, t);
if (r < 0)
if (!parent) {
return -EINVAL;
}
- assert(parent->num_children > 0);
+ ceph_assert(parent->num_children > 0);
parent->num_children--;
header.swap(parent);
}
return db->submit_transaction(t);
}
- assert(state.legacy);
+ ceph_assert(state.legacy);
{
// We only get here for legacy (v2) stores
// remove current header
remove_map_header(hl, oid, header, t);
- assert(header->num_children > 0);
+ ceph_assert(header->num_children > 0);
header->num_children--;
int r = _clear(header, t);
if (r < 0)
KeyValueDB::Transaction t = db->get_transaction();
write_state(t);
int ret = db->submit_transaction_sync(t);
- assert(ret == 0);
+ ceph_assert(ret == 0);
dout(1) << __func__ << " done" << dendl;
return;
}
const SequencerPosition *spos) {
KeyValueDB::Transaction t = db->get_transaction();
if (oid) {
- assert(spos);
+ ceph_assert(spos);
MapHeaderLock hl(this, *oid);
Header header = lookup_map_header(hl, *oid);
if (header) {
}
int DBObjectMap::write_state(KeyValueDB::Transaction _t) {
- assert(header_lock.is_locked_by_me());
+ ceph_assert(header_lock.is_locked_by_me());
dout(20) << "dbobjectmap: seq is " << state.seq << dendl;
KeyValueDB::Transaction t = _t ? _t : db->get_transaction();
bufferlist bl;
const MapHeaderLock &l,
const ghobject_t &oid)
{
- assert(l.get_locked() == oid);
+ ceph_assert(l.get_locked() == oid);
_Header *header = new _Header();
{
Mutex::Locker l(cache_lock);
if (caches.lookup(oid, header)) {
- assert(!in_use.count(header->seq));
+ ceph_assert(!in_use.count(header->seq));
in_use.insert(header->seq);
return Header(header, RemoveOnDelete(this));
}
caches.add(oid, *ret);
}
- assert(!in_use.count(header->seq));
+ ceph_assert(!in_use.count(header->seq));
in_use.insert(header->seq);
return ret;
}
}
header->num_children = 1;
header->oid = oid;
- assert(!in_use.count(header->seq));
+ ceph_assert(!in_use.count(header->seq));
in_use.insert(header->seq);
write_state();
Header header = Header(new _Header(), RemoveOnDelete(this));
auto iter = out.begin()->second.cbegin();
header->decode(iter);
- assert(header->seq == input->parent);
+ ceph_assert(header->seq == input->parent);
dout(20) << "lookup_parent: parent seq is " << header->seq << " with parent "
<< header->parent << dendl;
in_use.insert(header->seq);
Header header,
KeyValueDB::Transaction t)
{
- assert(l.get_locked() == oid);
+ ceph_assert(l.get_locked() == oid);
dout(20) << "remove_map_header: removing " << header->seq
<< " oid " << oid << dendl;
set<string> to_remove;
const ghobject_t &oid, _Header header,
KeyValueDB::Transaction t)
{
- assert(l.get_locked() == oid);
+ ceph_assert(l.get_locked() == oid);
dout(20) << "set_map_header: setting " << header.seq
<< " oid " << oid << " parent seq "
<< header.parent << dendl;
}
const ghobject_t &get_locked() const {
- assert(locked);
+ ceph_assert(locked);
return *locked;
}
void swap(MapHeaderLock &o) {
- assert(db == o.db);
+ ceph_assert(db == o.db);
// centos6's boost optional doesn't seem to have swap :(
boost::optional<ghobject_t> _locked = o.locked;
~MapHeaderLock() {
if (locked) {
Mutex::Locker l(db->header_lock);
- assert(db->map_header_in_use.count(*locked));
+ ceph_assert(db->map_header_in_use.count(*locked));
db->map_header_cond.Signal();
db->map_header_in_use.erase(*locked);
}
int sync(const ghobject_t *oid=0, const SequencerPosition *spos=0) override;
void compact() override {
- assert(db);
+ ceph_assert(db);
db->compact();
}
db(db) {}
void operator() (_Header *header) {
Mutex::Locker l(db->header_lock);
- assert(db->in_use.count(header->seq));
+ ceph_assert(db->in_use.count(header->seq));
db->in_use.erase(header->seq);
db->header_cond.Signal();
delete header;
public:
const int fd;
explicit FD(int _fd) : fd(_fd) {
- assert(_fd >= 0);
+ ceph_assert(_fd >= 0);
}
int operator*() const {
return fd;
public:
explicit FDCache(CephContext *cct) : cct(cct),
registry_shards(std::max<int64_t>(cct->_conf->filestore_fd_cache_shards, 1)) {
- assert(cct);
+ ceph_assert(cct);
cct->_conf.add_observer(this);
registry = new SharedLRU<ghobject_t, FD>[registry_shards];
for (int i = 0; i < registry_shards; ++i) {
{
int ret;
- assert(fd == -1);
+ ceph_assert(fd == -1);
ret = _open(false, false);
if (ret)
return ret;
// This can not be used on an active journal
int FileJournal::peek_fsid(uuid_d& fsid)
{
- assert(fd == -1);
+ ceph_assert(fd == -1);
int r = _open(false, false);
if (r)
return r;
stop_writer();
// close
- assert(writeq_empty());
- assert(!must_write_header);
- assert(fd >= 0);
+ ceph_assert(writeq_empty());
+ ceph_assert(!must_write_header);
+ ceph_assert(fd >= 0);
_close(fd);
fd = -1;
}
{
dout(10) << "_fdump" << dendl;
- assert(fd == -1);
+ ceph_assert(fd == -1);
int err = _open(false, false);
if (err)
return err;
#ifdef HAVE_LIBAIO
{
Mutex::Locker locker(aio_lock);
- assert(aio_write_queue_ops > 0);
+ ceph_assert(aio_write_queue_ops > 0);
aio_write_queue_ops--;
- assert(aio_write_queue_bytes >= bytes);
+ ceph_assert(aio_write_queue_bytes >= bytes);
aio_write_queue_bytes -= bytes;
}
#else
out:
dout(20) << "prepare_multi_write queue_pos now " << queue_pos << dendl;
- assert((write_pos + bl.length() == queue_pos) ||
+ ceph_assert((write_pos + bl.length() == queue_pos) ||
(write_pos + bl.length() - header.max_size + get_top() == queue_pos));
return 0;
}
void FileJournal::queue_completions_thru(uint64_t seq)
{
- assert(finisher_lock.is_locked());
+ ceph_assert(finisher_lock.is_locked());
utime_t now = ceph_clock_now();
list<completion_item> items;
batch_pop_completions(items);
{
// make sure list segments are page aligned
if (directio && !bl.is_aligned_size_and_memory(block_size, CEPH_DIRECTIO_ALIGNMENT)) {
- assert((bl.length() & (CEPH_DIRECTIO_ALIGNMENT - 1)) == 0);
- assert((pos & (CEPH_DIRECTIO_ALIGNMENT - 1)) == 0);
- assert(0 == "bl was not aligned");
+ ceph_assert((bl.length() & (CEPH_DIRECTIO_ALIGNMENT - 1)) == 0);
+ ceph_assert((pos & (CEPH_DIRECTIO_ALIGNMENT - 1)) == 0);
+ ceph_assert(0 == "bl was not aligned");
}
}
split = header.max_size - pos;
first.substr_of(bl, 0, split);
second.substr_of(bl, split, bl.length() - split);
- assert(first.length() + second.length() == bl.length());
+ ceph_assert(first.length() + second.length() == bl.length());
dout(10) << "do_write wrapping, first bit at " << pos << " len " << first.length()
<< " second bit len " << second.length() << " (orig len " << bl.length() << ")" << dendl;
check_align(first_pos, first);
ceph_abort();
}
- assert(first_pos == get_top());
+ ceph_assert(first_pos == get_top());
} else {
// header too?
if (hbp.length()) {
write_lock.Lock();
- assert(write_pos == pos);
- assert(write_pos % header.alignment == 0);
+ ceph_assert(write_pos == pos);
+ ceph_assert(write_pos % header.alignment == 0);
{
Mutex::Locker locker(finisher_lock);
continue;
}
}
- assert(r == 0);
+ ceph_assert(r == 0);
if (logger) {
logger->inc(l_filestore_journal_wr);
split = header.max_size - pos;
first.substr_of(bl, 0, split);
second.substr_of(bl, split, bl.length() - split);
- assert(first.length() + second.length() == bl.length());
+ ceph_assert(first.length() + second.length() == bl.length());
dout(10) << "do_aio_write wrapping, first bit at " << pos << "~" << first.length() << dendl;
if (write_aio_bl(pos, first, 0)) {
<< ") failed" << dendl;
ceph_abort();
}
- assert(pos == header.max_size);
+ ceph_assert(pos == header.max_size);
if (hbp.length()) {
// be sneaky: include the header in the second fragment
bufferlist tmp;
write_pos = pos;
if (write_pos == header.max_size)
write_pos = get_top();
- assert(write_pos % header.alignment == 0);
+ ceph_assert(write_pos % header.alignment == 0);
}
/**
for (std::list<buffer::ptr>::const_iterator p = bl.buffers().begin();
n < max;
++p, ++n) {
- assert(p != bl.buffers().end());
+ ceph_assert(p != bl.buffers().end());
iov[n].iov_base = (void *)p->c_str();
iov[n].iov_len = p->length();
len += p->length();
continue;
}
check_align(pos, tbl);
- assert(0 == "io_submit got unexpected error");
+ ceph_assert(0 == "io_submit got unexpected error");
} else {
break;
}
continue;
}
derr << "io_getevents got " << cpp_strerror(r) << dendl;
- assert(0 == "got unexpected error from io_getevents");
+ ceph_assert(0 == "got unexpected error from io_getevents");
}
{
if (event[i].res != ai->len) {
derr << "aio to " << ai->off << "~" << ai->len
<< " returned: " << (int)event[i].res << dendl;
- assert(0 == "unexpected aio error");
+ ceph_assert(0 == "unexpected aio error");
}
dout(10) << __func__ << " aio " << ai->off
<< "~" << ai->len << " done" << dendl;
*/
void FileJournal::check_aio_completion()
{
- assert(aio_lock.is_locked());
+ ceph_assert(aio_lock.is_locked());
dout(20) << "check_aio_completion" << dendl;
bool completed_something = false, signal = false;
dout(5) << "submit_entry seq " << seq
<< " len " << e.length()
<< " (" << oncommit << ")" << dendl;
- assert(e.length() > 0);
- assert(e.length() < header.max_size);
+ ceph_assert(e.length() > 0);
+ ceph_assert(e.length() < header.max_size);
if (logger) {
logger->inc(l_filestore_journal_queue_bytes, orig_len);
FileJournal::write_item &FileJournal::peek_write()
{
- assert(write_lock.is_locked());
+ ceph_assert(write_lock.is_locked());
Mutex::Locker locker(writeq_lock);
return writeq.front();
}
void FileJournal::pop_write()
{
- assert(write_lock.is_locked());
+ ceph_assert(write_lock.is_locked());
Mutex::Locker locker(writeq_lock);
if (logger) {
logger->dec(l_filestore_journal_queue_bytes, writeq.front().orig_len);
void FileJournal::batch_pop_write(list<write_item> &items)
{
- assert(write_lock.is_locked());
+ ceph_assert(write_lock.is_locked());
{
Mutex::Locker locker(writeq_lock);
writeq.swap(items);
void FileJournal::batch_unpop_write(list<write_item> &items)
{
- assert(write_lock.is_locked());
+ ceph_assert(write_lock.is_locked());
for (auto &&i : items) {
if (logger) {
logger->inc(l_filestore_journal_queue_bytes, i.orig_len);
if (offset >= end)
return;
end = round_up_to(end - block_size, block_size);
- assert(end >= offset);
+ ceph_assert(end >= offset);
if (offset < end)
if (block_device_discard(fd, offset, end - offset) < 0)
dout(1) << __func__ << " ioctl(BLKDISCARD) error:" << cpp_strerror(errno) << dendl;
if (seq < last_committed_seq) {
dout(5) << "committed_thru " << seq << " < last_committed_seq " << last_committed_seq << dendl;
- assert(seq >= last_committed_seq);
+ ceph_assert(seq >= last_committed_seq);
return;
}
if (seq == last_committed_seq) {
len = olen; // rest
int64_t actual = ::lseek64(fd, pos, SEEK_SET);
- assert(actual == pos);
+ ceph_assert(actual == pos);
bufferptr bp = buffer::create(len);
int r = safe_read_exact(fd, bp.c_str(), len);
if (_h)
*_h = *h;
- assert(cur_pos % header.alignment == 0);
+ ceph_assert(cur_pos % header.alignment == 0);
return SUCCESS;
}
corrupt_at = corrupt_at + get_top() - header.max_size;
int64_t actual = ::lseek64(fd, corrupt_at, SEEK_SET);
- assert(actual == corrupt_at);
+ ceph_assert(actual == corrupt_at);
char buf[10];
int r = safe_read_exact(fd, buf, 1);
- assert(r == 0);
+ ceph_assert(r == 0);
actual = ::lseek64(wfd, corrupt_at, SEEK_SET);
- assert(actual == corrupt_at);
+ ceph_assert(actual == corrupt_at);
buf[0]++;
r = safe_write(wfd, buf, 1);
- assert(r == 0);
+ ceph_assert(r == 0);
}
void FileJournal::corrupt_payload(
}
completion_item completion_peek_front() {
Mutex::Locker l(completions_lock);
- assert(!completions.empty());
+ ceph_assert(!completions.empty());
return completions.front();
}
void completion_pop_front() {
Mutex::Locker l(completions_lock);
- assert(!completions.empty());
+ ceph_assert(!completions.empty());
completions.pop_front();
}
cct->_conf.add_observer(this);
}
~FileJournal() override {
- assert(fd == -1);
+ ceph_assert(fd == -1);
delete[] zero_buf;
cct->_conf.remove_observer(this);
}
int FileStore::get_index(const coll_t& cid, Index *index)
{
int r = index_manager.get_index(cid, basedir, index);
- assert(!m_filestore_fail_eio || r != -EIO);
+ ceph_assert(!m_filestore_fail_eio || r != -EIO);
return r;
}
char path[PATH_MAX];
get_cdir(cid, path, sizeof(path));
int r = index_manager.init_index(cid, path, target_version);
- assert(!m_filestore_fail_eio || r != -EIO);
+ ceph_assert(!m_filestore_fail_eio || r != -EIO);
return r;
}
if (!path)
path = &path2;
int r, exist;
- assert(index.index);
+ ceph_assert(index.index);
r = (index.index)->lookup(oid, path, &exist);
if (r < 0) {
- assert(!m_filestore_fail_eio || r != -EIO);
+ ceph_assert(!m_filestore_fail_eio || r != -EIO);
return r;
}
if (!exist)
r = -errno;
if (r >= 0 && m_filestore_sloppy_crc) {
int rc = backend->_crc_update_truncate(**fd, length);
- assert(rc >= 0);
+ ceph_assert(rc >= 0);
}
lfn_close(fd);
- assert(!m_filestore_fail_eio || r != -EIO);
+ ceph_assert(!m_filestore_fail_eio || r != -EIO);
return r;
}
if (r < 0)
return r;
- assert(index.index);
+ ceph_assert(index.index);
RWLock::RLocker l((index.index)->access_lock);
r = lfn_find(oid, index, &path);
FDRef *outfd,
Index *index)
{
- assert(outfd);
+ ceph_assert(outfd);
int r = 0;
bool need_lock = true;
int flags = O_RDWR;
}
int fd, exist;
- assert((*index).index);
+ ceph_assert((*index).index);
if (need_lock) {
((*index).index)->access_lock.get_write();
}
((*index).index)->access_lock.put_write();
}
- assert(!m_filestore_fail_eio || r != -EIO);
+ ceph_assert(!m_filestore_fail_eio || r != -EIO);
return r;
}
return r;
}
- assert(index_old.index);
- assert(index_new.index);
+ ceph_assert(index_old.index);
+ ceph_assert(index_new.index);
if (!index_same) {
r = index_old->lookup(o, &path_old, &exist);
if (r < 0) {
- assert(!m_filestore_fail_eio || r != -EIO);
+ ceph_assert(!m_filestore_fail_eio || r != -EIO);
return r;
}
if (!exist)
r = index_new->lookup(newoid, &path_new, &exist);
if (r < 0) {
- assert(!m_filestore_fail_eio || r != -EIO);
+ ceph_assert(!m_filestore_fail_eio || r != -EIO);
return r;
}
if (exist)
r = index_new->created(newoid, path_new->path());
if (r < 0) {
- assert(!m_filestore_fail_eio || r != -EIO);
+ ceph_assert(!m_filestore_fail_eio || r != -EIO);
return r;
}
} else {
r = index_old->lookup(o, &path_old, &exist);
if (r < 0) {
- assert(!m_filestore_fail_eio || r != -EIO);
+ ceph_assert(!m_filestore_fail_eio || r != -EIO);
return r;
}
if (!exist)
r = index_new->lookup(newoid, &path_new, &exist);
if (r < 0) {
- assert(!m_filestore_fail_eio || r != -EIO);
+ ceph_assert(!m_filestore_fail_eio || r != -EIO);
return r;
}
if (exist)
r = index_new->created(newoid, path_new->path());
if (r < 0) {
- assert(!m_filestore_fail_eio || r != -EIO);
+ ceph_assert(!m_filestore_fail_eio || r != -EIO);
return r;
}
}
return r;
}
- assert(index.index);
+ ceph_assert(index.index);
RWLock::WLocker l((index.index)->access_lock);
{
int hardlink;
r = index->lookup(o, &path, &hardlink);
if (r < 0) {
- assert(!m_filestore_fail_eio || r != -EIO);
+ ceph_assert(!m_filestore_fail_eio || r != -EIO);
return r;
}
r = object_map->clear(o, &spos);
if (r < 0 && r != -ENOENT) {
dout(25) << __FUNC__ << ": omap clear failed " << cpp_strerror(r) << dendl;
- assert(!m_filestore_fail_eio || r != -EIO);
+ ceph_assert(!m_filestore_fail_eio || r != -EIO);
return r;
}
if (cct->_conf->filestore_debug_inject_read_err) {
buf0->reset();
if (::statfs(basedir.c_str(), &buf) < 0) {
int r = -errno;
- assert(!m_filestore_fail_eio || r != -EIO);
- assert(r != -ENOENT);
+ ceph_assert(!m_filestore_fail_eio || r != -EIO);
+ ceph_assert(r != -ENOENT);
return r;
}
{
m_fs_type = f_type;
- assert(!backend);
+ ceph_assert(!backend);
backend = FileStoreBackend::create(f_type, this);
dout(0) << "backend " << backend->get_name()
if (backend->can_checkpoint()) {
// create snap_1 too
current_fd = ::open(current_fn.c_str(), O_RDONLY);
- assert(current_fd >= 0);
+ ceph_assert(current_fd >= 0);
char s[NAME_MAX];
snprintf(s, sizeof(s), COMMIT_SNAP_ITEM, 1ull);
ret = backend->create_checkpoint(s, nullptr);
}
if (read_fsid(omap_fsid_fd, &old_omap_fsid) < 0 || old_omap_fsid.is_zero()) {
- assert(!fsid.is_zero());
+ ceph_assert(!fsid.is_zero());
fsid.print(fsid_str);
strcat(fsid_str, "\n");
ret = ::ftruncate(omap_fsid_fd, 0);
int op_fd = ::open(current_op_seq_fn.c_str(), O_CREAT|O_RDWR, 0644);
if (op_fd < 0) {
int r = -errno;
- assert(!m_filestore_fail_eio || r != -EIO);
+ ceph_assert(!m_filestore_fail_eio || r != -EIO);
return r;
}
char s[40];
if (ret < 0) {
derr << __FUNC__ << ": error reading " << current_op_seq_fn << ": " << cpp_strerror(ret) << dendl;
VOID_TEMP_FAILURE_RETRY(::close(op_fd));
- assert(!m_filestore_fail_eio || ret != -EIO);
+ ceph_assert(!m_filestore_fail_eio || ret != -EIO);
return ret;
}
*seq = atoll(s);
int ret = TEMP_FAILURE_RETRY(::pwrite(fd, s, strlen(s), 0));
if (ret < 0) {
ret = -errno;
- assert(!m_filestore_fail_eio || ret != -EIO);
+ ceph_assert(!m_filestore_fail_eio || ret != -EIO);
}
return ret;
}
char clustersnap[NAME_MAX];
for (list<string>::iterator it = ls.begin(); it != ls.end(); ++it) {
if (sscanf(it->c_str(), COMMIT_SNAP_ITEM, &c) == 1) {
- assert(c > prev);
+ ceph_assert(c > prev);
prev = c;
snaps.push_back(c);
} else if (sscanf(it->c_str(), CLUSTER_SNAP_ITEM, clustersnap) == 1)
<< " ** NOTE: rolling back to cluster snapshot " << m_osd_rollback_to_cluster_snap << " **"
<< TEXT_NORMAL
<< dendl;
- assert(cluster_snaps.count(m_osd_rollback_to_cluster_snap));
+ ceph_assert(cluster_snaps.count(m_osd_rollback_to_cluster_snap));
snprintf(s, sizeof(s), CLUSTER_SNAP_ITEM, m_osd_rollback_to_cluster_snap.c_str());
} else {
{
goto close_basedir_fd;
}
- assert(current_fd >= 0);
+ ceph_assert(current_fd >= 0);
op_fd = read_op_seq(&initial_op_seq);
if (op_fd < 0) {
<< " with error: " << ret << dendl;
goto close_current_fd;
}
- assert(index.index);
+ ceph_assert(index.index);
RWLock::WLocker l((index.index)->access_lock);
index->cleanup();
VOID_TEMP_FAILURE_RETRY(::close(fsid_fd));
fsid_fd = -1;
done:
- assert(!m_filestore_fail_eio || ret != -EIO);
+ ceph_assert(!m_filestore_fail_eio || ret != -EIO);
delete backend;
backend = nullptr;
object_map.reset();
dout(10) << __FUNC__ << dendl;
vector<coll_t> ls;
int r = list_collections(ls, true);
- assert(r >= 0);
+ ceph_assert(r >= 0);
dout(20) << " ls " << ls << dendl;
} else {
dout(10) << __FUNC__ << ": creating " << temp << dendl;
r = _create_collection(temp, 0, spos);
- assert(r == 0);
+ ceph_assert(r == 0);
}
}
for (set<coll_t>::iterator p = temps.begin(); p != temps.end(); ++p) {
dout(10) << __FUNC__ << ": removing stray " << *p << dendl;
r = _collection_remove_recursive(*p, spos);
- assert(r == 0);
+ ceph_assert(r == 0);
}
}
return 0;
}
- assert(journal);
+ ceph_assert(journal);
//prepare and encode transactions data out of lock
bufferlist tbl;
int orig_len = -1;
int ret = object_map->sync();
if (ret < 0) {
derr << __FUNC__ << ": omap sync error " << cpp_strerror(ret) << dendl;
- assert(0 == "_set_global_replay_guard failed");
+ ceph_assert(0 == "_set_global_replay_guard failed");
}
ret = sync_filesystem(basedir_fd);
if (ret < 0) {
derr << __FUNC__ << ": sync_filesystem error " << cpp_strerror(ret) << dendl;
- assert(0 == "_set_global_replay_guard failed");
+ ceph_assert(0 == "_set_global_replay_guard failed");
}
char fn[PATH_MAX];
if (fd < 0) {
int err = errno;
derr << __FUNC__ << ": " << cid << " error " << cpp_strerror(err) << dendl;
- assert(0 == "_set_global_replay_guard failed");
+ ceph_assert(0 == "_set_global_replay_guard failed");
}
_inject_failure();
if (r < 0) {
derr << __FUNC__ << ": fsetxattr " << GLOBAL_REPLAY_GUARD_XATTR
<< " got " << cpp_strerror(r) << dendl;
- assert(0 == "fsetxattr failed");
+ ceph_assert(0 == "fsetxattr failed");
}
// and make sure our xattr is durable.
int r = chain_fgetxattr(fd, GLOBAL_REPLAY_GUARD_XATTR, buf, sizeof(buf));
if (r < 0) {
dout(20) << __FUNC__ << ": no xattr" << dendl;
- assert(!m_filestore_fail_eio || r != -EIO);
+ ceph_assert(!m_filestore_fail_eio || r != -EIO);
VOID_TEMP_FAILURE_RETRY(::close(fd));
return 1; // no xattr
}
if (fd < 0) {
int err = errno;
derr << __FUNC__ << ": " << cid << " error " << cpp_strerror(err) << dendl;
- assert(0 == "_set_replay_guard failed");
+ ceph_assert(0 == "_set_replay_guard failed");
}
_set_replay_guard(fd, spos, 0, in_progress);
VOID_TEMP_FAILURE_RETRY(::close(fd));
fd, REPLAY_GUARD_XATTR, v.c_str(), v.length());
if (r < 0) {
derr << "fsetxattr " << REPLAY_GUARD_XATTR << " got " << cpp_strerror(r) << dendl;
- assert(0 == "fsetxattr failed");
+ ceph_assert(0 == "fsetxattr failed");
}
// and make sure our xattr is durable.
if (fd < 0) {
int err = errno;
derr << __FUNC__ << ": " << cid << " error " << cpp_strerror(err) << dendl;
- assert(0 == "_close_replay_guard failed");
+ ceph_assert(0 == "_close_replay_guard failed");
}
_close_replay_guard(fd, spos);
VOID_TEMP_FAILURE_RETRY(::close(fd));
fd, REPLAY_GUARD_XATTR, v.c_str(), v.length());
if (r < 0) {
derr << "fsetxattr " << REPLAY_GUARD_XATTR << " got " << cpp_strerror(r) << dendl;
- assert(0 == "fsetxattr failed");
+ ceph_assert(0 == "fsetxattr failed");
}
// and make sure our xattr is durable.
int r = chain_fgetxattr(fd, REPLAY_GUARD_XATTR, buf, sizeof(buf));
if (r < 0) {
dout(20) << __FUNC__ << ": no xattr" << dendl;
- assert(!m_filestore_fail_eio || r != -EIO);
+ ceph_assert(!m_filestore_fail_eio || r != -EIO);
return 1; // no xattr
}
bufferlist bl;
const coll_t &ncid = i.get_cid(op->dest_cid);
const ghobject_t &oid = i.get_oid(op->oid);
- assert(oid.hobj.pool >= -1);
+ ceph_assert(oid.hobj.pool >= -1);
// always followed by OP_COLL_REMOVE
Transaction::Op *op2 = i.decode_op();
const coll_t &ocid2 = i.get_cid(op2->cid);
const ghobject_t &oid2 = i.get_oid(op2->oid);
- assert(op2->op == Transaction::OP_COLL_REMOVE);
- assert(ocid2 == ocid);
- assert(oid2 == oid);
+ ceph_assert(op2->op == Transaction::OP_COLL_REMOVE);
+ ceph_assert(ocid2 == ocid);
+ ceph_assert(oid2 == oid);
tracepoint(objectstore, coll_add_enter);
r = _collection_add(ncid, ocid, oid, spos);
case Transaction::OP_COLL_SETATTR:
case Transaction::OP_COLL_RMATTR:
- assert(0 == "collection attr methods no longer implemented");
+ ceph_assert(0 == "collection attr methods no longer implemented");
break;
case Transaction::OP_COLL_RENAME:
break;
case Transaction::OP_SPLIT_COLLECTION:
{
- assert(0 == "not legacy journal; upgrade to firefly first");
+ ceph_assert(0 == "not legacy journal; upgrade to firefly first");
}
break;
case Transaction::OP_SPLIT_COLLECTION2:
dump_open_fds(cct);
}
- assert(0 == "unexpected error");
+ ceph_assert(0 == "unexpected error");
}
}
osr->wait_for_apply(oid);
const coll_t& cid = !_need_temp_object_collection(ch->cid, oid) ? ch->cid : ch->cid.get_temp();
int r = lfn_stat(cid, oid, st);
- assert(allow_eio || !m_filestore_fail_eio || r != -EIO);
+ ceph_assert(allow_eio || !m_filestore_fail_eio || r != -EIO);
if (r < 0) {
dout(10) << __FUNC__ << ": " << ch->cid << "/" << oid
<< " = " << r << dendl;
struct stat st;
memset(&st, 0, sizeof(struct stat));
int r = ::fstat(**fd, &st);
- assert(r == 0);
+ ceph_assert(r == 0);
len = st.st_size;
}
if (errors != 0) {
dout(0) << __FUNC__ << ": " << cid << "/" << oid << " " << offset << "~"
<< got << " ... BAD CRC:\n" << ss.str() << dendl;
- assert(0 == "bad crc on read");
+ ceph_assert(0 == "bad crc on read");
}
}
done:
dout(10) << __FUNC__ << ": " << cid << "/" << oid << " " << offset << "~" << len << " = " << r << " num_extents=" << destmap.size() << " " << destmap << dendl;
- assert(!m_filestore_fail_eio || r != -EIO);
+ ceph_assert(!m_filestore_fail_eio || r != -EIO);
tracepoint(objectstore, fiemap_exit, r);
return r;
}
if (r >= 0 && m_filestore_sloppy_crc) {
int rc = backend->_crc_update_write(**fd, offset, len, bl);
- assert(rc >= 0);
+ ceph_assert(rc >= 0);
}
if (replaying || m_disable_wbthrottle) {
if (ret >= 0 && m_filestore_sloppy_crc) {
int rc = backend->_crc_update_zero(**fd, offset, len);
- assert(rc >= 0);
+ ceph_assert(rc >= 0);
}
if (ret == 0)
if (r < 0) {
goto out2;
}
- assert(index.index);
+ ceph_assert(index.index);
RWLock::WLocker l((index.index)->access_lock);
r = lfn_open(cid, newoid, true, &n, &index);
lfn_close(o);
out2:
dout(10) << __FUNC__ << ": " << cid << "/" << oldoid << " -> " << cid << "/" << newoid << " = " << r << dendl;
- assert(!m_filestore_fail_eio || r != -EIO);
+ ceph_assert(!m_filestore_fail_eio || r != -EIO);
return r;
}
if (r >= 0) {
if (m_filestore_sloppy_crc) {
int rc = backend->_crc_update_clone_range(from, to, srcoff, len, dstoff);
- assert(rc >= 0);
+ ceph_assert(rc >= 0);
}
struct stat st;
r = ::fstat(to, &st);
}
if (r < 0 && replaying) {
- assert(r == -ERANGE);
+ ceph_assert(r == -ERANGE);
derr << __FUNC__ << ": short source tolerated because we are replaying" << dendl;
r = len;
}
- assert(replaying || pos == end);
+ ceph_assert(replaying || pos == end);
if (r >= 0 && !skip_sloppycrc && m_filestore_sloppy_crc) {
int rc = backend->_crc_update_clone_range(from, to, srcoff, len, dstoff);
- assert(rc >= 0);
+ ceph_assert(rc >= 0);
}
dout(20) << __FUNC__ << ": " << srcoff << "~" << len << " to " << dstoff << " = " << r << dendl;
return r;
int err = write_op_seq(op_fd, cp);
if (err < 0) {
derr << "Error during write_op_seq: " << cpp_strerror(err) << dendl;
- assert(0 == "error during write_op_seq");
+ ceph_assert(0 == "error during write_op_seq");
}
char s[NAME_MAX];
if (err < 0) {
int err = errno;
derr << "snap create '" << s << "' got error " << err << dendl;
- assert(err == 0);
+ ceph_assert(err == 0);
}
snaps.push_back(cp);
err = backend->sync_checkpoint(cid);
if (err < 0) {
derr << "ioctl WAIT_SYNC got " << cpp_strerror(err) << dendl;
- assert(0 == "wait_sync got error");
+ ceph_assert(0 == "wait_sync got error");
}
dout(20) << " done waiting for checkpoint " << cid << " to complete" << dendl;
}
int err = object_map->sync();
if (err < 0) {
derr << "object_map sync got " << cpp_strerror(err) << dendl;
- assert(0 == "object_map sync returned error");
+ ceph_assert(0 == "object_map sync returned error");
}
err = backend->syncfs();
if (err < 0) {
derr << "syncfs got " << cpp_strerror(err) << dendl;
- assert(0 == "syncfs returned error");
+ ceph_assert(0 == "syncfs returned error");
}
err = write_op_seq(op_fd, cp);
if (err < 0) {
derr << "Error during write_op_seq: " << cpp_strerror(err) << dendl;
- assert(0 == "error during write_op_seq");
+ ceph_assert(0 == "error during write_op_seq");
}
err = ::fsync(op_fd);
if (err < 0) {
derr << "Error during fsync of op_seq: " << cpp_strerror(err) << dendl;
- assert(0 == "error during fsync of op_seq");
+ ceph_assert(0 == "error during fsync of op_seq");
}
}
l = chain_fgetxattr(fd, name, bp.c_str(), l);
}
}
- assert(!m_filestore_fail_eio || l != -EIO);
+ ceph_assert(!m_filestore_fail_eio || l != -EIO);
return l;
}
if (len == -ERANGE) {
len = chain_flistxattr(fd, 0, 0);
if (len < 0) {
- assert(!m_filestore_fail_eio || len != -EIO);
+ ceph_assert(!m_filestore_fail_eio || len != -EIO);
return len;
}
dout(10) << " -ERANGE, len is " << len << dendl;
len = chain_flistxattr(fd, names2, len);
dout(10) << " -ERANGE, got " << len << dendl;
if (len < 0) {
- assert(!m_filestore_fail_eio || len != -EIO);
+ ceph_assert(!m_filestore_fail_eio || len != -EIO);
delete[] names2;
return len;
}
name = names2;
} else if (len < 0) {
- assert(!m_filestore_fail_eio || len != -EIO);
+ ceph_assert(!m_filestore_fail_eio || len != -EIO);
return len;
} else {
name = names1;
}
out:
dout(10) << __FUNC__ << ": " << cid << "/" << oid << " '" << name << "' = " << r << dendl;
- assert(!m_filestore_fail_eio || r != -EIO);
+ ceph_assert(!m_filestore_fail_eio || r != -EIO);
if (cct->_conf->filestore_debug_inject_read_err &&
debug_mdata_eio(oid)) {
return -EIO;
if (r == -ENOENT)
r = 0;
}
- assert(omap_attrs.size() == omap_aset.size());
+ ceph_assert(omap_attrs.size() == omap_aset.size());
for (map<string, bufferlist>::iterator i = omap_aset.begin();
i != omap_aset.end();
++i) {
}
out:
dout(10) << __FUNC__ << ": " << cid << "/" << oid << " = " << r << dendl;
- assert(!m_filestore_fail_eio || r != -EIO);
+ ceph_assert(!m_filestore_fail_eio || r != -EIO);
if (cct->_conf->filestore_debug_inject_read_err &&
debug_mdata_eio(oid)) {
r = _fgetattrs(**fd, inline_set);
incomplete_inline = (r == -E2BIG);
- assert(!m_filestore_fail_eio || r != -EIO);
+ ceph_assert(!m_filestore_fail_eio || r != -EIO);
dout(15) << __FUNC__ << ": " << cid << "/" << oid
<< (incomplete_inline ? " (incomplete_inline, forcing omap)" : "")
<< dendl;
r = object_map->remove_xattrs(oid, omap_remove, &spos);
if (r < 0 && r != -ENOENT) {
dout(10) << __FUNC__ << ": could not remove_xattrs r = " << r << dendl;
- assert(!m_filestore_fail_eio || r != -EIO);
+ ceph_assert(!m_filestore_fail_eio || r != -EIO);
goto out_close;
} else {
r = 0; // don't confuse the debug output
r = object_map->set_xattrs(oid, omap_set, &spos);
if (r < 0) {
dout(10) << __FUNC__ << ": could not set_xattrs r = " << r << dendl;
- assert(!m_filestore_fail_eio || r != -EIO);
+ ceph_assert(!m_filestore_fail_eio || r != -EIO);
goto out_close;
}
}
r = object_map->remove_xattrs(oid, to_remove, &spos);
if (r < 0 && r != -ENOENT) {
dout(10) << __FUNC__ << ": could not remove_xattrs index r = " << r << dendl;
- assert(!m_filestore_fail_eio || r != -EIO);
+ ceph_assert(!m_filestore_fail_eio || r != -EIO);
goto out_close;
}
}
r = object_map->get_all_xattrs(oid, &omap_attrs);
if (r < 0 && r != -ENOENT) {
dout(10) << __FUNC__ << ": could not get omap_attrs r = " << r << dendl;
- assert(!m_filestore_fail_eio || r != -EIO);
+ ceph_assert(!m_filestore_fail_eio || r != -EIO);
goto out_close;
}
r = object_map->remove_xattrs(oid, omap_attrs, &spos);
for (vector<ghobject_t>::iterator i = objects.begin();
i != objects.end();
++i) {
- assert(_check_replay_guard(cid, *i, spos));
+ ceph_assert(_check_replay_guard(cid, *i, spos));
r = _remove(cid, *i, spos);
if (r < 0)
return r;
if (!dir) {
r = -errno;
derr << "tried opening directory " << fn << ": " << cpp_strerror(-r) << dendl;
- assert(!m_filestore_fail_eio || r != -EIO);
+ ceph_assert(!m_filestore_fail_eio || r != -EIO);
return r;
}
if (int n = snprintf(filename, sizeof(filename), "%s/%s", fn, de->d_name);
n >= static_cast<int>(sizeof(filename))) {
derr << __func__ << " path length overrun: " << n << dendl;
- assert(false);
+ ceph_assert(false);
}
r = ::stat(filename, &sb);
if (r < 0) {
r = -errno;
derr << "stat on " << filename << ": " << cpp_strerror(-r) << dendl;
- assert(!m_filestore_fail_eio || r != -EIO);
+ ceph_assert(!m_filestore_fail_eio || r != -EIO);
break;
}
if (!S_ISDIR(sb.st_mode)) {
}
::closedir(dir);
- assert(!m_filestore_fail_eio || r != -EIO);
+ ceph_assert(!m_filestore_fail_eio || r != -EIO);
tracepoint(objectstore, list_collections_exit, r);
return r;
}
if (r < 0)
r = -errno;
dout(10) << __FUNC__ << ": " << fn << " = " << r << dendl;
- assert(!m_filestore_fail_eio || r != -EIO);
+ ceph_assert(!m_filestore_fail_eio || r != -EIO);
tracepoint(objectstore, collection_stat_exit, r);
return r;
}
return r;
}
- assert(index.index);
+ ceph_assert(index.index);
RWLock::RLocker l((index.index)->access_lock);
vector<ghobject_t> ls;
if (r < 0) {
derr << __FUNC__ << ": collection_list_partial returned: "
<< cpp_strerror(r) << dendl;
- assert(!m_filestore_fail_eio || r != -EIO);
+ ceph_assert(!m_filestore_fail_eio || r != -EIO);
return r;
}
*empty = ls.empty();
if (r < 0)
return r;
- assert(index.index);
+ ceph_assert(index.index);
RWLock::RLocker l((index.index)->access_lock);
r = index->collection_list_partial(start, end, max, ls, next);
if (r < 0) {
- assert(!m_filestore_fail_eio || r != -EIO);
+ ceph_assert(!m_filestore_fail_eio || r != -EIO);
return r;
}
dout(20) << "objects: " << *ls << dendl;
if (r < 0)
return r;
{
- assert(index.index);
+ ceph_assert(index.index);
RWLock::RLocker l((index.index)->access_lock);
r = lfn_find(hoid, index);
if (r < 0)
}
r = object_map->get(hoid, header, out);
if (r < 0 && r != -ENOENT) {
- assert(!m_filestore_fail_eio || r != -EIO);
+ ceph_assert(!m_filestore_fail_eio || r != -EIO);
return r;
}
tracepoint(objectstore, omap_get_exit, 0);
if (r < 0)
return r;
{
- assert(index.index);
+ ceph_assert(index.index);
RWLock::RLocker l((index.index)->access_lock);
r = lfn_find(hoid, index);
if (r < 0)
}
r = object_map->get_header(hoid, bl);
if (r < 0 && r != -ENOENT) {
- assert(allow_eio || !m_filestore_fail_eio || r != -EIO);
+ ceph_assert(allow_eio || !m_filestore_fail_eio || r != -EIO);
return r;
}
tracepoint(objectstore, omap_get_header_exit, 0);
if (r < 0)
return r;
{
- assert(index.index);
+ ceph_assert(index.index);
RWLock::RLocker l((index.index)->access_lock);
r = lfn_find(hoid, index);
if (r < 0)
}
r = object_map->get_keys(hoid, keys);
if (r < 0 && r != -ENOENT) {
- assert(!m_filestore_fail_eio || r != -EIO);
+ ceph_assert(!m_filestore_fail_eio || r != -EIO);
return r;
}
tracepoint(objectstore, omap_get_keys_exit, 0);
goto out;
}
{
- assert(index.index);
+ ceph_assert(index.index);
RWLock::RLocker l((index.index)->access_lock);
r = lfn_find(hoid, index);
if (r < 0) {
}
r = object_map->get_values(hoid, keys, out);
if (r < 0 && r != -ENOENT) {
- assert(!m_filestore_fail_eio || r != -EIO);
+ ceph_assert(!m_filestore_fail_eio || r != -EIO);
where = " (get_values)";
goto out;
}
if (r < 0)
return r;
{
- assert(index.index);
+ ceph_assert(index.index);
RWLock::RLocker l((index.index)->access_lock);
r = lfn_find(hoid, index);
if (r < 0)
}
r = object_map->check_keys(hoid, keys, out);
if (r < 0 && r != -ENOENT) {
- assert(!m_filestore_fail_eio || r != -EIO);
+ ceph_assert(!m_filestore_fail_eio || r != -EIO);
return r;
}
tracepoint(objectstore, omap_check_keys_exit, 0);
return ObjectMap::ObjectMapIterator();
}
{
- assert(index.index);
+ ceph_assert(index.index);
RWLock::RLocker l((index.index)->access_lock);
r = lfn_find(hoid, index);
if (r < 0) {
r = get_index(c, &from);
if (r < 0)
goto out;
- assert(from.index);
+ ceph_assert(from.index);
RWLock::WLocker l((from.index)->access_lock);
r = from->prep_delete();
if (r < 0) {
// the source collection/object does not exist. If we are replaying, we
// should be safe, so just return 0 and move on.
- assert(replaying);
+ ceph_assert(replaying);
dout(10) << __FUNC__ << ": " << c << "/" << o << " from "
<< oldcid << "/" << o << " (dne, continue replay) " << dendl;
return 0;
<< oldcid << "/" << oldoid << " (dne, ignoring enoent)"
<< dendl;
} else {
- assert(0 == "ERROR: source must exist");
+ ceph_assert(0 == "ERROR: source must exist");
}
if (!replaying) {
if (r < 0)
return r;
{
- assert(index.index);
+ ceph_assert(index.index);
RWLock::RLocker l((index.index)->access_lock);
r = lfn_find(hoid, index);
if (r < 0)
return r;
}
{
- assert(index.index);
+ ceph_assert(index.index);
RWLock::RLocker l((index.index)->access_lock);
r = lfn_find(hoid, index);
if (r < 0) {
if (r < 0)
return r;
{
- assert(index.index);
+ ceph_assert(index.index);
RWLock::RLocker l((index.index)->access_lock);
r = lfn_find(hoid, index);
if (r < 0)
if (r < 0)
return r;
{
- assert(index.index);
+ ceph_assert(index.index);
RWLock::RLocker l((index.index)->access_lock);
r = lfn_find(hoid, index);
if (r < 0)
dout(15) << __FUNC__ << ": " << cid << " bits: " << bits << dendl;
if (!collection_exists(cid)) {
dout(2) << __FUNC__ << ": " << cid << " DNE" << dendl;
- assert(replaying);
+ ceph_assert(replaying);
return 0;
}
if (!collection_exists(dest)) {
dout(2) << __FUNC__ << ": " << dest << " DNE" << dendl;
- assert(replaying);
+ ceph_assert(replaying);
return 0;
}
r = get_index(dest, &to);
if (!r) {
- assert(from.index);
+ ceph_assert(from.index);
RWLock::WLocker l1((from.index)->access_lock);
- assert(to.index);
+ ceph_assert(to.index);
RWLock::WLocker l2((to.index)->access_lock);
r = from->split(rem, bits, to.index);
++i) {
dout(20) << __FUNC__ << ": " << *i << " still in source "
<< cid << dendl;
- assert(!i->match(bits, rem));
+ ceph_assert(!i->match(bits, rem));
}
objects.clear();
}
++i) {
dout(20) << __FUNC__ << ": " << *i << " now in dest "
<< *i << dendl;
- assert(i->match(bits, rem));
+ ceph_assert(i->match(bits, rem));
}
objects.clear();
}
lfn_close(fd);
out:
dout(10) << __FUNC__ << ": " << cid << "/" << oid << " object_size " << expected_object_size << " write_size " << expected_write_size << " = " << ret << dendl;
- assert(!m_filestore_fail_eio || ret != -EIO);
+ ceph_assert(!m_filestore_fail_eio || ret != -EIO);
return ret;
}
void FileStore::OpSequencer::_unregister_apply(Op *o)
{
- assert(o->registered_apply);
+ ceph_assert(o->registered_apply);
for (auto& t : o->tls) {
for (auto& i : t.get_object_index()) {
uint32_t key = i.first.hobj.get_hash();
}
++p;
}
- assert(removed);
+ ceph_assert(removed);
}
}
}
bool _get_max_uncompleted(
uint64_t *seq ///< [out] max uncompleted seq
) {
- assert(qlock.is_locked());
- assert(seq);
+ ceph_assert(qlock.is_locked());
+ ceph_assert(seq);
*seq = 0;
if (q.empty() && jq.empty())
return true;
bool _get_min_uncompleted(
uint64_t *seq ///< [out] min uncompleted seq
) {
- assert(qlock.is_locked());
- assert(seq);
+ ceph_assert(qlock.is_locked());
+ ceph_assert(seq);
*seq = 0;
if (q.empty() && jq.empty())
return true;
void wait_for_apply(const ghobject_t& oid);
Op *peek_queue() {
Mutex::Locker l(qlock);
- assert(apply_lock.is_locked());
+ ceph_assert(apply_lock.is_locked());
return q.front();
}
Op *dequeue(list<Context*> *to_queue) {
- assert(to_queue);
- assert(apply_lock.is_locked());
+ ceph_assert(to_queue);
+ ceph_assert(apply_lock.is_locked());
Mutex::Locker l(qlock);
Op *o = q.front();
q.pop_front();
id(i),
osr_name(osr_name_str.c_str()) {}
~OpSequencer() override {
- assert(q.empty());
+ ceph_assert(q.empty());
}
};
typedef boost::intrusive_ptr<OpSequencer> OpSequencerRef;
store->_finish_op(osr);
}
void _clear() override {
- assert(store->op_queue.empty());
+ ceph_assert(store->op_queue.empty());
}
} op_wq;
void inject_mdata_error(const ghobject_t &oid) override;
void compact() override {
- assert(object_map);
+ ceph_assert(object_map);
object_map->compact();
}
/// int value to hex digit
char int_to_hex(int v)
{
- assert(v < 16);
+ ceph_assert(v < 16);
if (v < 10)
return '0' + v;
return 'A' + v - 10;
/// reverse bits in a nibble (0..15)
int reverse_nibble_bits(int in)
{
- assert(in < 16);
+ ceph_assert(in < 16);
return
((in & 8) >> 3) |
((in & 4) >> 1) |
/// compare hex digit (as length 1 string) bitwise
bool cmp_hexdigit_bitwise(const string& l, const string& r)
{
- assert(l.length() == 1 && r.length() == 1);
+ ceph_assert(l.length() == 1 && r.length() == 1);
int lv = hex_to_int(l[0]);
int rv = hex_to_int(r[0]);
- assert(lv < 16);
- assert(rv < 16);
+ ceph_assert(lv < 16);
+ ceph_assert(rv < 16);
return reverse_nibble_bits(lv) < reverse_nibble_bits(rv);
}
uint32_t match,
uint32_t bits,
CollectionIndex* dest) {
- assert(collection_version() == dest->collection_version());
+ ceph_assert(collection_version() == dest->collection_version());
unsigned mkdirred = 0;
return col_split_level(
*this,
// the below logic is inspired by rados.h#ceph_stable_mod,
// it basically determines how many sub-folders should we
// create for splitting
- assert(pg_num_bits > 0); // otherwise BAD_SHIFT
+ ceph_assert(pg_num_bits > 0); // otherwise BAD_SHIFT
if (((1 << (pg_num_bits - 1)) | ps) >= pg_num) {
++split_bits;
}
leavies = leavies >> 4;
}
for (uint32_t i = 0; i < subs; ++i) {
- assert(split_bits <= 4); // otherwise BAD_SHIFT
+ ceph_assert(split_bits <= 4); // otherwise BAD_SHIFT
int v = tmp_id | (i << ((4 - split_bits) % 4));
paths.push_back(to_hex(v));
ret = create_path(paths);
return r;
auto bufiter = buf.cbegin();
info->decode(bufiter);
- assert(path.size() == (unsigned)info->hash_level);
+ ceph_assert(path.size() == (unsigned)info->hash_level);
return 0;
}
int HashIndex::set_info(const vector<string> &path, const subdir_info_s &info) {
bufferlist buf;
- assert(path.size() == (unsigned)info.hash_level);
+ ceph_assert(path.size() == (unsigned)info.hash_level);
info.encode(buf);
return add_attr_path(path, SUBDIR_ATTR, buf);
}
}
string HashIndex::get_path_str(const ghobject_t &oid) {
- assert(!oid.is_max());
+ ceph_assert(!oid.is_max());
return get_hash_str(oid.hobj.get_hash());
}
ghobject_t *next,
vector<ghobject_t> *out)
{
- assert(out);
+ ceph_assert(out);
return list_by_hash_bitwise(path, end, max_count, next, out);
}
using ceph::decode;
__u8 v;
decode(v, bl);
- assert(v == 1);
+ ceph_assert(v == 1);
decode(objs, bl);
decode(subdirs, bl);
decode(hash_level, bl);
using ceph::decode;
__u8 v;
decode(v, bl);
- assert(v == 1);
+ ceph_assert(v == 1);
decode(op, bl);
decode(path, bl);
}
/// Convert a number to hex string (upper case).
static string to_hex(int n) {
- assert(n >= 0 && n < 16);
+ ceph_assert(n >= 0 && n < 16);
char c = (n <= 9 ? ('0' + n) : ('A' + n - 10));
string str;
str.append(1, c);
dout(3) << "journal_replay: skipping old op seq " << seq << " <= " << op_seq << dendl;
continue;
}
- assert(op_seq == seq-1);
+ ceph_assert(op_seq == seq-1);
dout(3) << "journal_replay: applying op seq " << seq << dendl;
auto p = bl.cbegin();
}
dout(10) << "op_apply_start " << op << " open_ops " << open_ops << " -> "
<< (open_ops+1) << dendl;
- assert(!blocked);
- assert(op > committed_seq);
+ ceph_assert(!blocked);
+ ceph_assert(op > committed_seq);
open_ops++;
return op;
}
<< (open_ops-1) << ", max_applied_seq " << max_applied_seq << " -> "
<< std::max(op, max_applied_seq) << dendl;
--open_ops;
- assert(open_ops >= 0);
+ ceph_assert(open_ops >= 0);
// signal a blocked commit_start
if (blocked) {
if (op != op_submitted + 1) {
dout(0) << "op_submit_finish " << op << " expected " << (op_submitted + 1)
<< ", OUT OF ORDER" << dendl;
- assert(0 == "out of order op_submit_finish");
+ ceph_assert(0 == "out of order op_submit_finish");
}
op_submitted = op;
lock.Unlock();
void JournalingObjectStore::ApplyManager::add_waiter(uint64_t op, Context *c)
{
Mutex::Locker l(com_lock);
- assert(c);
+ ceph_assert(c);
commit_waiters[op].push_back(c);
}
<< " open ops to drain" << dendl;
blocked_cond.Wait(apply_lock);
}
- assert(open_ops == 0);
+ ceph_assert(open_ops == 0);
dout(10) << "commit_start blocked, all open_ops have completed" << dendl;
{
Mutex::Locker l(com_lock);
if (max_applied_seq == committed_seq) {
dout(10) << "commit_start nothing to do" << dendl;
blocked = false;
- assert(commit_waiters.empty());
+ ceph_assert(commit_waiters.empty());
goto out;
}
com_lock("JOS::ApplyManager::com_lock", false, true, false, cct),
committing_seq(0), committed_seq(0) {}
void reset() {
- assert(open_ops == 0);
- assert(blocked == false);
+ ceph_assert(open_ops == 0);
+ ceph_assert(blocked == false);
max_applied_seq = 0;
committing_seq = 0;
committed_seq = 0;
} else {
string long_name = lfn_generate_object_name(obj);
if (!lfn_must_hash(long_name)) {
- assert(long_name == short_name);
+ ceph_assert(long_name == short_name);
}
if (index_version == HASH_INDEX_TAG)
get_hobject_from_oinfo(to_list_path.c_str(), short_name.c_str(), &obj);
char *end = s + sizeof(s);
char *t = s;
- assert(oid.generation == ghobject_t::NO_GEN);
+ ceph_assert(oid.generation == ghobject_t::NO_GEN);
const char *i = oid.hobj.oid.name.c_str();
// Escape subdir prefix
if (oid.hobj.oid.name.substr(0, 4) == "DIR_") {
if (index_version == HASH_INDEX_TAG)
return lfn_generate_object_name_keyless(oid);
- assert(oid.generation == ghobject_t::NO_GEN);
+ ceph_assert(oid.generation == ghobject_t::NO_GEN);
string full_name;
string::const_iterator i = oid.hobj.oid.name.begin();
if (oid.hobj.oid.name.substr(0, 4) == "DIR_") {
*hardlink = 0;
return 0;
}
- assert(r > 0);
+ ceph_assert(r > 0);
string lfn(bp.c_str(), bp.length());
if (lfn == full_name) {
if (mangled_name)
{
char hash[FILENAME_HASH_LEN + 1];
- assert(len >= FILENAME_SHORT_LEN + 4);
+ ceph_assert(len >= FILENAME_SHORT_LEN + 4);
strncpy(filename, old_filename, FILENAME_PREFIX_LEN);
filename[FILENAME_PREFIX_LEN] = '\0';
int index = -1;
char buf[FILENAME_SHORT_LEN + 4];
- assert((end - suffix) < (int)sizeof(buf));
+ ceph_assert((end - suffix) < (int)sizeof(buf));
int r = sscanf(suffix, "_%d_%s", &index, buf);
if (r < 2)
return false;
string LFNIndex::lfn_get_short_name(const ghobject_t &oid, int i)
{
string long_name = lfn_generate_object_name(oid);
- assert(lfn_must_hash(long_name));
+ ceph_assert(lfn_must_hash(long_name));
char buf[FILENAME_SHORT_LEN + 4];
build_filename(long_name.c_str(), i, buf, sizeof(buf));
return string(buf);
try { \
if (failed) { \
r = cleanup(); \
- assert(r == 0); \
+ ceph_assert(r == 0); \
} \
{ x } \
out: \
Mutex::Locker l(lock);
set_from_conf();
}
- assert(cct);
+ ceph_assert(cct);
PerfCountersBuilder b(
cct, string("WBThrottle"),
l_wbthrottle_first, l_wbthrottle_last);
}
WBThrottle::~WBThrottle() {
- assert(cct);
+ ceph_assert(cct);
cct->get_perfcounters_collection()->remove(logger);
delete logger;
cct->_conf.remove_observer(this);
void WBThrottle::set_from_conf()
{
- assert(lock.is_locked());
+ ceph_assert(lock.is_locked());
if (fs == BTRFS) {
size_limits.first =
cct->_conf->filestore_wbthrottle_btrfs_bytes_start_flusher;
fd_limits.second =
cct->_conf->filestore_wbthrottle_xfs_inodes_hard_limit;
} else {
- assert(0 == "invalid value for fs");
+ ceph_assert(0 == "invalid value for fs");
}
cond.Signal();
}
bool WBThrottle::get_next_should_flush(
boost::tuple<ghobject_t, FDRef, PendingWB> *next)
{
- assert(lock.is_locked());
- assert(next);
+ ceph_assert(lock.is_locked());
+ ceph_assert(next);
while (!stopping && (!beyond_limit() || pending_wbs.empty()))
cond.Wait(lock);
if (stopping)
return false;
- assert(!pending_wbs.empty());
+ ceph_assert(!pending_wbs.empty());
ghobject_t obj(pop_object());
ceph::unordered_map<ghobject_t, pair<PendingWB, FDRef> >::iterator i =
#ifdef HAVE_POSIX_FADVISE
if (cct->_conf->filestore_fadvise && wb.get<2>().nocache) {
int fa_r = posix_fadvise(**wb.get<1>(), 0, 0, POSIX_FADV_DONTNEED);
- assert(fa_r == 0);
+ ceph_assert(fa_r == 0);
}
#endif
lock.Lock();
#ifdef HAVE_POSIX_FADVISE
if (cct->_conf->filestore_fadvise && i->second.first.nocache) {
int fa_r = posix_fadvise(**i->second.second, 0, 0, POSIX_FADV_DONTNEED);
- assert(fa_r == 0);
+ ceph_assert(fa_r == 0);
}
#endif
list<ghobject_t> lru;
ceph::unordered_map<ghobject_t, list<ghobject_t>::iterator> rev_lru;
void remove_object(const ghobject_t &oid) {
- assert(lock.is_locked());
+ ceph_assert(lock.is_locked());
ceph::unordered_map<ghobject_t, list<ghobject_t>::iterator>::iterator iter =
rev_lru.find(oid);
if (iter == rev_lru.end())
rev_lru.erase(iter);
}
ghobject_t pop_object() {
- assert(!lru.empty());
+ ceph_assert(!lru.empty());
ghobject_t oid(lru.front());
lru.pop_front();
rev_lru.erase(oid);
return oid;
}
void insert_object(const ghobject_t &oid) {
- assert(rev_lru.find(oid) == rev_lru.end());
+ ceph_assert(rev_lru.find(oid) == rev_lru.end());
lru.push_back(oid);
rev_lru.insert(make_pair(oid, --lru.end()));
}
if (!m_has_extsize)
return -EOPNOTSUPP;
- assert(hint < UINT_MAX);
+ ceph_assert(hint < UINT_MAX);
return set_extsize(fd, hint);
}
list<string> *ls = static_cast<list<string> *>(data);
string str = ZFS::get_name(zh);
size_t pos = str.find('@');
- assert(pos != string::npos && pos + 1 != str.length());
+ ceph_assert(pos != string::npos && pos + 1 != str.length());
ls->push_back(str.substr(pos + 1));
return 0;
}
#include <linux/fs.h>
#endif
+#include "include/assert.h"
+
/*
* chaining xattrs
*
switch (*name) {
case '@': /* escape it */
pos += 2;
- assert (pos < raw_len - 1);
+ ceph_assert (pos < raw_len - 1);
*raw_name = '@';
raw_name++;
*raw_name = '@';
break;
default:
pos++;
- assert(pos < raw_len - 1);
+ ceph_assert(pos < raw_len - 1);
*raw_name = *name;
break;
}
*raw_name = '\0';
} else {
int r = snprintf(raw_name, raw_len - pos, "@%d", i);
- assert(r < raw_len - pos);
+ ceph_assert(r < raw_len - pos);
}
}
break;
}
pos++;
- assert(pos < name_len);
+ ceph_assert(pos < name_len);
name++;
raw_name++;
}
}
}
}
- assert(0 == "unreachable");
+ ceph_assert(0 == "unreachable");
return 0;
}
pos += chunk_size;
ret = pos;
i++;
- assert(size == 0 || !ensure_single_attr);
+ ceph_assert(size == 0 || !ensure_single_attr);
} while (size);
if (ret >= 0 && !skip_chain_cleanup) {
pos += chunk_size;
ret = pos;
i++;
- assert(size == 0 || !ensure_single_attr);
+ ceph_assert(size == 0 || !ensure_single_attr);
} while (size);
if (ret >= 0 && !skip_chain_cleanup) {
int from_fd,
uint64_t from_offset, uint64_t from_len)
{
- assert(0 == "write me");
+ ceph_assert(0 == "write me");
}
int FS::zero(int fd, uint64_t offset, uint64_t length)
derr << "key " << pretty_binary_string(*key) << dendl;
derr << "oid " << oid << dendl;
derr << " t " << t << dendl;
- assert(t == oid);
+ ceph_assert(t == oid);
}
}
}
{
std::lock_guard<std::mutex> l(lock);
dout(30) << __func__ << " " << oid << " " << o << dendl;
- assert(onode_map.count(oid) == 0);
+ ceph_assert(onode_map.count(oid) == 0);
onode_map[oid] = o;
lru.push_front(*o);
}
po = onode_map.find(old_oid);
pn = onode_map.find(new_oid);
- assert(po != onode_map.end());
+ ceph_assert(po != onode_map.end());
if (pn != onode_map.end()) {
lru_list_t::iterator p = lru.iterator_to(*pn->second);
lru.erase(p);
return false;
}
ceph::unordered_map<ghobject_t,OnodeRef>::iterator p = onode_map.begin();
- assert(p != onode_map.end());
+ ceph_assert(p != onode_map.end());
next->first = p->first;
next->second = p->second;
return true;
}
ceph::unordered_map<ghobject_t,OnodeRef>::iterator p = onode_map.find(after);
- assert(p != onode_map.end()); // for now
+ ceph_assert(p != onode_map.end()); // for now
lru_list_t::iterator pi = lru.iterator_to(*p->second);
++pi;
if (pi == lru.end()) {
lru.erase(p--);
} else {
lru.erase(p);
- assert(num == 1);
+ ceph_assert(num == 1);
}
o->get(); // paranoia
onode_map.erase(o->oid);
const ghobject_t& oid,
bool create)
{
- assert(create ? lock.is_wlocked() : lock.is_locked());
+ ceph_assert(create ? lock.is_wlocked() : lock.is_locked());
spg_t pgid;
if (cid.is_pg(&pgid)) {
ldout(store->cct, 20) << " r " << r << " v.len " << v.length() << dendl;
Onode *on;
if (v.length() == 0) {
- assert(r == -ENOENT);
+ ceph_assert(r == -ENOENT);
if (!create)
return OnodeRef();
on->dirty = true;
} else {
// loaded
- assert(r >=0);
+ ceph_assert(r >=0);
on = new Onode(store->cct, oid, key);
on->exists = true;
auto p = v.cbegin();
KStore::~KStore()
{
_shutdown_logger();
- assert(!mounted);
- assert(db == NULL);
- assert(fsid_fd < 0);
+ ceph_assert(!mounted);
+ ceph_assert(db == NULL);
+ ceph_assert(fsid_fd < 0);
}
void KStore::_init_logger()
int KStore::_open_path()
{
- assert(path_fd < 0);
+ ceph_assert(path_fd < 0);
path_fd = ::open(path.c_str(), O_DIRECTORY);
if (path_fd < 0) {
int r = -errno;
int KStore::_open_fsid(bool create)
{
- assert(fsid_fd < 0);
+ ceph_assert(fsid_fd < 0);
int flags = O_RDWR;
if (create)
flags |= O_CREAT;
int KStore::_open_db(bool create)
{
int r;
- assert(!db);
+ ceph_assert(!db);
char fn[PATH_MAX];
snprintf(fn, sizeof(fn), "%s/db", path.c_str());
void KStore::_close_db()
{
- assert(db);
+ ceph_assert(db);
delete db;
db = NULL;
}
int KStore::_open_collections(int *errors)
{
- assert(coll_map.empty());
+ ceph_assert(coll_map.empty());
KeyValueDB::Iterator it = db->get_iterator(PREFIX_COLL);
for (it->upper_bound(string());
it->valid();
int KStore::umount()
{
- assert(mounted);
+ ceph_assert(mounted);
dout(1) << __func__ << dendl;
_sync();
buf0->reset();
if (::statfs(basedir.c_str(), &buf) < 0) {
int r = -errno;
- assert(r != -ENOENT);
+ ceph_assert(r != -ENOENT);
return r;
}
{
pair<ghobject_t,OnodeRef> next;
while (c->onode_map.get_next(next.first, &next)) {
- assert(!next.second->exists);
+ ceph_assert(!next.second->exists);
if (!next.second->flush_txns.empty()) {
dout(10) << __func__ << " " << c->cid << " " << next.second->oid
<< " flush_txns " << next.second->flush_txns << dendl;
get_object_key(cct, start, &k);
if (start.hobj.is_temp()) {
temp = true;
- assert(k >= temp_start_key && k < temp_end_key);
+ ceph_assert(k >= temp_start_key && k < temp_end_key);
} else {
temp = false;
- assert(k >= start_key && k < end_key);
+ ceph_assert(k >= start_key && k < end_key);
}
dout(20) << " start from " << pretty_binary_string(k)
<< " temp=" << (int)temp << dendl;
dout(20) << __func__ << " key " << pretty_binary_string(it->key()) << dendl;
ghobject_t oid;
int r = get_key_object(it->key(), &oid);
- assert(r == 0);
+ ceph_assert(r == 0);
if (ls->size() >= (unsigned)max) {
dout(20) << __func__ << " reached max " << max << dendl;
*pnext = oid;
string KStore::OmapIteratorImpl::key()
{
RWLock::RLocker l(c->lock);
- assert(it->valid());
+ ceph_assert(it->valid());
string db_key = it->raw_key().second;
string user_key;
decode_omap_key(db_key, &user_key);
bufferlist KStore::OmapIteratorImpl::value()
{
RWLock::RLocker l(c->lock);
- assert(it->valid());
+ ceph_assert(it->valid());
return it->value();
}
decode_omap_key(it->key(), &user_key);
dout(30) << __func__ << " got " << pretty_binary_string(it->key())
<< " -> " << user_key << dendl;
- assert(it->key() < tail);
+ ceph_assert(it->key() < tail);
(*out)[user_key] = it->value();
}
it->next();
decode_omap_key(it->key(), &user_key);
dout(30) << __func__ << " got " << pretty_binary_string(it->key())
<< " -> " << user_key << dendl;
- assert(it->key() < tail);
+ ceph_assert(it->key() < tail);
keys->insert(user_key);
it->next();
}
std::lock_guard<std::mutex> l(kv_lock);
if (cct->_conf->kstore_sync_submit_transaction) {
int r = db->submit_transaction(txc->t);
- assert(r == 0);
+ ceph_assert(r == 0);
}
kv_queue.push_back(txc);
kv_cond.notify_one();
}
{
int r = db->submit_transaction_sync(txc->t);
- assert(r == 0);
+ ceph_assert(r == 0);
}
break;
default:
derr << __func__ << " unexpected txc " << txc
<< " state " << txc->get_state_name() << dendl;
- assert(0 == "unexpected txc state");
+ ceph_assert(0 == "unexpected txc state");
return;
}
}
void KStore::_txc_finish(TransContext *txc)
{
dout(20) << __func__ << " " << txc << " onodes " << txc->onodes << dendl;
- assert(txc->state == TransContext::STATE_FINISHING);
+ ceph_assert(txc->state == TransContext::STATE_FINISHING);
for (set<OnodeRef>::iterator p = txc->onodes.begin();
p != txc->onodes.end();
std::lock_guard<std::mutex> l((*p)->flush_lock);
dout(20) << __func__ << " onode " << *p << " had " << (*p)->flush_txns
<< dendl;
- assert((*p)->flush_txns.count(txc));
+ ceph_assert((*p)->flush_txns.count(txc));
(*p)->flush_txns.erase(txc);
if ((*p)->flush_txns.empty()) {
(*p)->flush_cond.notify_all();
dout(10) << __func__ << " start" << dendl;
std::unique_lock<std::mutex> l(kv_lock);
while (true) {
- assert(kv_committing.empty());
+ ceph_assert(kv_committing.empty());
if (kv_queue.empty()) {
if (kv_stop)
break;
it != kv_committing.end();
++it) {
int r = db->submit_transaction((*it)->t);
- assert(r == 0);
+ ceph_assert(r == 0);
}
}
int r = db->submit_transaction_sync(t);
- assert(r == 0);
+ ceph_assert(r == 0);
utime_t finish = ceph_clock_now();
utime_t dur = finish - start;
dout(20) << __func__ << " committed " << kv_committing.size()
case Transaction::OP_MKCOLL:
{
- assert(!c);
+ ceph_assert(!c);
coll_t cid = i.get_cid(op->cid);
r = _create_collection(txc, cid, op->split_bits, &c);
if (!r)
break;
case Transaction::OP_SPLIT_COLLECTION:
- assert(0 == "deprecated");
+ ceph_assert(0 == "deprecated");
break;
case Transaction::OP_SPLIT_COLLECTION2:
break;
case Transaction::OP_COLL_RENAME:
- assert(0 == "not implemented");
+ ceph_assert(0 == "not implemented");
break;
}
if (r < 0) {
f.close_section();
f.flush(*_dout);
*_dout << dendl;
- assert(0 == "unexpected error");
+ ceph_assert(0 == "unexpected error");
}
// object operations
break;
case Transaction::OP_CLONERANGE:
- assert(0 == "deprecated");
+ ceph_assert(0 == "deprecated");
break;
case Transaction::OP_CLONERANGE2:
break;
case Transaction::OP_COLL_ADD:
- assert(0 == "not implemented");
+ ceph_assert(0 == "not implemented");
break;
case Transaction::OP_COLL_REMOVE:
- assert(0 == "not implemented");
+ ceph_assert(0 == "not implemented");
break;
case Transaction::OP_COLL_MOVE:
- assert(0 == "deprecated");
+ ceph_assert(0 == "deprecated");
break;
case Transaction::OP_COLL_MOVE_RENAME:
{
- assert(op->cid == op->dest_cid);
+ ceph_assert(op->cid == op->dest_cid);
const ghobject_t& noid = i.get_oid(op->dest_oid);
OnodeRef no = c->get_onode(noid, true);
r = _rename(txc, c, o, no, noid);
f.close_section();
f.flush(*_dout);
*_dout << dendl;
- assert(0 == "unexpected error");
+ ceph_assert(0 == "unexpected error");
}
}
}
} else {
dout(30) << __func__ << " got header/data "
<< pretty_binary_string(it->key()) << dendl;
- assert(it->key() < tail);
+ ceph_assert(it->key() < tail);
rewrite_omap_key(newo->onode.omap_head, it->key(), &key);
txc->t->set(PREFIX_OMAP, key, it->value());
}
goto out;
}
auto p = new_coll_map.find(cid);
- assert(p != new_coll_map.end());
+ ceph_assert(p != new_coll_map.end());
*c = p->second;
- assert((*c)->cid == cid);
+ ceph_assert((*c)->cid == cid);
(*c)->cnode.bits = bits;
coll_map[cid] = *c;
new_coll_map.erase(p);
c->onode_map.clear();
d->onode_map.clear();
c->cnode.bits = bits;
- assert(d->cnode.bits == bits);
+ ceph_assert(d->cnode.bits == bits);
r = 0;
bufferlist bl;
q_list_t q; ///< transactions
~OpSequencer() {
- assert(q.empty());
+ ceph_assert(q.empty());
}
void queue_new(TransContext *txc) {
if (txc->state >= TransContext::STATE_KV_DONE) {
return true;
}
- assert(txc->state < TransContext::STATE_KV_DONE);
+ ceph_assert(txc->state < TransContext::STATE_KV_DONE);
txc->oncommits.push_back(c);
return false;
}
ThreadPool::TPHandle *handle = NULL) override;
void compact () override {
- assert(db);
+ ceph_assert(db);
db->compact();
}
dout(20) << __func__ << " coll " << p->first << " " << p->second << dendl;
collections.insert(p->first);
bufferlist bl;
- assert(p->second);
+ ceph_assert(p->second);
p->second->encode(bl);
string fn = path + "/" + stringify(p->first);
int r = bl.write_file(fn.c_str());
void MemStore::set_fsid(uuid_d u)
{
int r = write_meta("fsid", stringify(u));
- assert(r >= 0);
+ ceph_assert(r >= 0);
}
uuid_d MemStore::get_fsid()
{
string fsid_str;
int r = read_meta("fsid", &fsid_str);
- assert(r >= 0);
+ ceph_assert(r >= 0);
uuid_d uuid;
bool b = uuid.parse(fsid_str.c_str());
- assert(b);
+ ceph_assert(b);
return uuid;
}
break;
case Transaction::OP_COLL_MOVE:
- assert(0 == "deprecated");
+ ceph_assert(0 == "deprecated");
break;
case Transaction::OP_COLL_MOVE_RENAME:
case Transaction::OP_COLL_SETATTR:
{
- assert(0 == "not implemented");
+ ceph_assert(0 == "not implemented");
}
break;
case Transaction::OP_COLL_RMATTR:
{
- assert(0 == "not implemented");
+ ceph_assert(0 == "not implemented");
}
break;
case Transaction::OP_COLL_RENAME:
{
- assert(0 == "not implemented");
+ ceph_assert(0 == "not implemented");
}
break;
}
break;
case Transaction::OP_SPLIT_COLLECTION:
- assert(0 == "deprecated");
+ ceph_assert(0 == "deprecated");
break;
case Transaction::OP_SPLIT_COLLECTION2:
{
f.close_section();
f.flush(*_dout);
*_dout << dendl;
- assert(0 == "unexpected error");
+ ceph_assert(0 == "unexpected error");
}
}
{
dout(10) << __func__ << " " << cid << " " << oid << " "
<< offset << "~" << len << dendl;
- assert(len == bl.length());
+ ceph_assert(len == bl.length());
CollectionRef c = get_collection(cid);
if (!c)
if (!result.second)
return -EEXIST;
auto p = new_coll_map.find(cid);
- assert(p != new_coll_map.end());
+ ceph_assert(p != new_coll_map.end());
result.first->second = p->second;
result.first->second->bits = bits;
new_coll_map.erase(p);
return -ENOENT;
// note: c and oc may be the same
- assert(&(*c) == &(*oc));
+ ceph_assert(&(*c) == &(*oc));
c->lock.get_write();
int r = -EEXIST;
}
sc->bits = bits;
- assert(dc->bits == (int)bits);
+ ceph_assert(dc->bits == (int)bits);
return 0;
}
length -= c;
}
// make sure we sized the vector correctly
- assert(out == range.rend());
+ ceph_assert(out == range.rend());
}
// return all allocated pages that intersect the range [offset,length)
}
void decode(bufferlist::const_iterator &p) {
using ceph::decode;
- assert(empty());
+ ceph_assert(empty());
decode(page_size, p);
unsigned count;
decode(count, p);