From c3321f7633670ab84899d85da289109f3b56e886 Mon Sep 17 00:00:00 2001 From: "Adam C. Emerson" Date: Thu, 23 Aug 2018 11:17:42 -0400 Subject: [PATCH] os: Use ceph_assert for asserts Signed-off-by: Adam C. Emerson --- src/os/ObjectStore.h | 60 +-- src/os/bluestore/BitmapFreelistManager.cc | 20 +- src/os/bluestore/BlockDevice.cc | 2 +- src/os/bluestore/BlockDevice.h | 2 +- src/os/bluestore/BlueFS.cc | 182 +++---- src/os/bluestore/BlueFS.h | 8 +- src/os/bluestore/BlueRocksEnv.cc | 6 +- src/os/bluestore/BlueStore.cc | 554 ++++++++++---------- src/os/bluestore/BlueStore.h | 58 +- src/os/bluestore/FreelistManager.cc | 2 +- src/os/bluestore/KernelDevice.cc | 48 +- src/os/bluestore/NVMEDevice.cc | 56 +- src/os/bluestore/PMEMDevice.cc | 12 +- src/os/bluestore/StupidAllocator.cc | 10 +- src/os/bluestore/aio.cc | 4 +- src/os/bluestore/aio.h | 6 +- src/os/bluestore/bluestore_types.cc | 64 +-- src/os/bluestore/bluestore_types.h | 62 +-- src/os/bluestore/fastbmap_allocator_impl.cc | 44 +- src/os/bluestore/fastbmap_allocator_impl.h | 58 +- src/os/filestore/DBObjectMap.cc | 52 +- src/os/filestore/DBObjectMap.h | 10 +- src/os/filestore/FDCache.h | 4 +- src/os/filestore/FileJournal.cc | 80 +-- src/os/filestore/FileJournal.h | 6 +- src/os/filestore/FileStore.cc | 246 ++++----- src/os/filestore/FileStore.h | 20 +- src/os/filestore/HashIndex.cc | 24 +- src/os/filestore/HashIndex.h | 6 +- src/os/filestore/JournalingObjectStore.cc | 16 +- src/os/filestore/JournalingObjectStore.h | 4 +- src/os/filestore/LFNIndex.cc | 14 +- src/os/filestore/LFNIndex.h | 2 +- src/os/filestore/WBThrottle.cc | 18 +- src/os/filestore/WBThrottle.h | 6 +- src/os/filestore/XfsFileStoreBackend.cc | 2 +- src/os/filestore/ZFSFileStoreBackend.cc | 2 +- src/os/filestore/chain_xattr.cc | 12 +- src/os/filestore/chain_xattr.h | 4 +- src/os/fs/FS.cc | 2 +- src/os/kstore/KStore.cc | 98 ++-- src/os/kstore/KStore.h | 6 +- src/os/memstore/MemStore.cc | 28 +- src/os/memstore/PageSet.h | 4 +- 44 files changed, 963 insertions(+), 961 deletions(-) diff --git a/src/os/ObjectStore.h b/src/os/ObjectStore.h index 4f36164d74ccb..85b4ef329d1c3 100644 --- a/src/os/ObjectStore.h +++ b/src/os/ObjectStore.h @@ -534,9 +534,9 @@ public: Context **out_on_applied, Context **out_on_commit, Context **out_on_applied_sync) { - assert(out_on_applied); - assert(out_on_commit); - assert(out_on_applied_sync); + ceph_assert(out_on_applied); + ceph_assert(out_on_commit); + ceph_assert(out_on_applied_sync); list on_applied, on_commit, on_applied_sync; for (auto& i : t) { on_applied.splice(on_applied.end(), i.on_applied); @@ -552,9 +552,9 @@ public: list *out_on_applied, list *out_on_commit, list *out_on_applied_sync) { - assert(out_on_applied); - assert(out_on_commit); - assert(out_on_applied_sync); + ceph_assert(out_on_applied); + ceph_assert(out_on_commit); + ceph_assert(out_on_applied_sync); for (auto& i : t) { out_on_applied->splice(out_on_applied->end(), i.on_applied); out_on_commit->splice(out_on_commit->end(), i.on_commit); @@ -619,17 +619,17 @@ public: case OP_ZERO: case OP_TRUNCATE: case OP_SETALLOCHINT: - assert(op->cid < cm.size()); - assert(op->oid < om.size()); + ceph_assert(op->cid < cm.size()); + ceph_assert(op->oid < om.size()); op->cid = cm[op->cid]; op->oid = om[op->oid]; break; case OP_CLONERANGE2: case OP_CLONE: - assert(op->cid < cm.size()); - assert(op->oid < om.size()); - assert(op->dest_oid < om.size()); + ceph_assert(op->cid < cm.size()); + ceph_assert(op->oid < om.size()); + ceph_assert(op->dest_oid < om.size()); op->cid = cm[op->cid]; op->oid = om[op->oid]; op->dest_oid = om[op->dest_oid]; @@ -642,24 +642,24 @@ public: case OP_COLL_SETATTRS: case OP_COLL_HINT: case OP_COLL_SET_BITS: - assert(op->cid < cm.size()); + ceph_assert(op->cid < cm.size()); op->cid = cm[op->cid]; break; case OP_COLL_ADD: - assert(op->cid < cm.size()); - assert(op->oid < om.size()); - assert(op->dest_cid < om.size()); + ceph_assert(op->cid < cm.size()); + ceph_assert(op->oid < om.size()); + ceph_assert(op->dest_cid < om.size()); op->cid = cm[op->cid]; op->dest_cid = cm[op->dest_cid]; op->oid = om[op->oid]; break; case OP_COLL_MOVE_RENAME: - assert(op->cid < cm.size()); - assert(op->oid < om.size()); - assert(op->dest_cid < cm.size()); - assert(op->dest_oid < om.size()); + ceph_assert(op->cid < cm.size()); + ceph_assert(op->oid < om.size()); + ceph_assert(op->dest_cid < cm.size()); + ceph_assert(op->dest_oid < om.size()); op->cid = cm[op->cid]; op->oid = om[op->oid]; op->dest_cid = cm[op->dest_cid]; @@ -667,23 +667,23 @@ public: break; case OP_TRY_RENAME: - assert(op->cid < cm.size()); - assert(op->oid < om.size()); - assert(op->dest_oid < om.size()); + ceph_assert(op->cid < cm.size()); + ceph_assert(op->oid < om.size()); + ceph_assert(op->dest_oid < om.size()); op->cid = cm[op->cid]; op->oid = om[op->oid]; op->dest_oid = om[op->dest_oid]; break; case OP_SPLIT_COLLECTION2: - assert(op->cid < cm.size()); - assert(op->dest_cid < cm.size()); + ceph_assert(op->cid < cm.size()); + ceph_assert(op->dest_cid < cm.size()); op->cid = cm[op->cid]; op->dest_cid = cm[op->dest_cid]; break; default: - assert(0 == "Unknown OP"); + ceph_assert(0 == "Unknown OP"); } } void _update_op_bl( @@ -695,7 +695,7 @@ public: std::list::iterator p; for(p = list.begin(); p != list.end(); ++p) { - assert(p->length() % sizeof(Op) == 0); + ceph_assert(p->length() % sizeof(Op) == 0); char* raw_p = p->c_str(); char* raw_end = raw_p + p->length(); @@ -884,7 +884,7 @@ public: return ops > 0; } Op* decode_op() { - assert(ops > 0); + ceph_assert(ops > 0); Op* op = reinterpret_cast(op_buffer_p); op_buffer_p += sizeof(Op); @@ -926,11 +926,11 @@ public: } const ghobject_t &get_oid(__le32 oid_id) { - assert(oid_id < objects.size()); + ceph_assert(oid_id < objects.size()); return objects[oid_id]; } const coll_t &get_cid(__le32 cid_id) { - assert(cid_id < colls.size()); + ceph_assert(cid_id < colls.size()); return colls[cid_id]; } uint32_t get_fadvise_flags() const { @@ -1029,7 +1029,7 @@ public: _op->len = len; encode(write_data, data_bl); - assert(len == write_data.length()); + ceph_assert(len == write_data.length()); data.fadvise_flags = data.fadvise_flags | flags; if (write_data.length() > data.largest_data_len) { data.largest_data_len = write_data.length(); diff --git a/src/os/bluestore/BitmapFreelistManager.cc b/src/os/bluestore/BitmapFreelistManager.cc index 27af4c75c4aae..2435a0a19444d 100644 --- a/src/os/bluestore/BitmapFreelistManager.cc +++ b/src/os/bluestore/BitmapFreelistManager.cc @@ -27,7 +27,7 @@ struct XorMergeOperator : public KeyValueDB::MergeOperator { const char *ldata, size_t llen, const char *rdata, size_t rlen, std::string *new_value) override { - assert(llen == rlen); + ceph_assert(llen == rlen); *new_value = std::string(ldata, llen); for (size_t i = 0; i < rlen; ++i) { (*new_value)[i] ^= rdata[i]; @@ -62,7 +62,7 @@ int BitmapFreelistManager::create(uint64_t new_size, uint64_t granularity, KeyValueDB::Transaction txn) { bytes_per_block = granularity; - assert(isp2(bytes_per_block)); + ceph_assert(isp2(bytes_per_block)); size = p2align(new_size, bytes_per_block); blocks_per_key = cct->_conf->bluestore_freelist_blocks_per_key; @@ -230,13 +230,13 @@ bool BitmapFreelistManager::enumerate_next(uint64_t *offset, uint64_t *length) enumerate_p->lower_bound(string()); // we assert that the first block is always allocated; it's true, // and it simplifies our lives a bit. - assert(enumerate_p->valid()); + ceph_assert(enumerate_p->valid()); string k = enumerate_p->key(); const char *p = k.c_str(); _key_decode_u64(p, &enumerate_offset); enumerate_bl = enumerate_p->value(); - assert(enumerate_offset == 0); - assert(get_next_set_bit(enumerate_bl, 0) == 0); + ceph_assert(enumerate_offset == 0); + ceph_assert(get_next_set_bit(enumerate_bl, 0) == 0); } if (enumerate_offset >= size) { @@ -423,7 +423,7 @@ void BitmapFreelistManager::_verify_range(uint64_t offset, uint64_t length, first_key += bytes_per_key; } } - assert(first_key == last_key); + ceph_assert(first_key == last_key); { string k; make_offset_key(first_key, &k); @@ -452,7 +452,7 @@ void BitmapFreelistManager::_verify_range(uint64_t offset, uint64_t length, } if (errors) { derr << __func__ << " saw " << errors << " errors" << dendl; - assert(0 == "bitmap freelist errors"); + ceph_assert(0 == "bitmap freelist errors"); } } @@ -479,8 +479,8 @@ void BitmapFreelistManager::_xor( KeyValueDB::Transaction txn) { // must be block aligned - assert((offset & block_mask) == offset); - assert((length & block_mask) == length); + ceph_assert((offset & block_mask) == offset); + ceph_assert((length & block_mask) == length); uint64_t first_key = offset & key_mask; uint64_t last_key = (offset + length - 1) & key_mask; @@ -534,7 +534,7 @@ void BitmapFreelistManager::_xor( txn->merge(bitmap_prefix, k, all_set_bl); first_key += bytes_per_key; } - assert(first_key == last_key); + ceph_assert(first_key == last_key); { bufferptr p(blocks_per_key >> 3); p.zero(); diff --git a/src/os/bluestore/BlockDevice.cc b/src/os/bluestore/BlockDevice.cc index dbca074c1bfab..ca19e54a867d1 100644 --- a/src/os/bluestore/BlockDevice.cc +++ b/src/os/bluestore/BlockDevice.cc @@ -76,7 +76,7 @@ uint64_t IOContext::get_num_ios() const void IOContext::release_running_aios() { - assert(!num_running); + ceph_assert(!num_running); #ifdef HAVE_LIBAIO // release aio contexts (including pinned buffers). running_aios.clear(); diff --git a/src/os/bluestore/BlockDevice.h b/src/os/bluestore/BlockDevice.h index ef4e27d0ed8b6..262619919c482 100644 --- a/src/os/bluestore/BlockDevice.h +++ b/src/os/bluestore/BlockDevice.h @@ -86,7 +86,7 @@ public: std::lock_guard l(lock); cond.notify_all(); --num_running; - assert(num_running >= 0); + ceph_assert(num_running >= 0); } else { --num_running; } diff --git a/src/os/bluestore/BlueFS.cc b/src/os/bluestore/BlueFS.cc index c58cda4cbd799..bf81bfa73578e 100644 --- a/src/os/bluestore/BlueFS.cc +++ b/src/os/bluestore/BlueFS.cc @@ -154,8 +154,8 @@ void BlueFS::_update_logger_stats() int BlueFS::add_block_device(unsigned id, const string& path, bool trim) { dout(10) << __func__ << " bdev " << id << " path " << path << dendl; - assert(id < bdev.size()); - assert(bdev[id] == NULL); + ceph_assert(id < bdev.size()); + ceph_assert(bdev[id] == NULL); BlockDevice *b = BlockDevice::create(cct, path, NULL, NULL, discard_cb[id], static_cast(this)); int r = b->open(path); if (r < 0) { @@ -175,8 +175,8 @@ int BlueFS::add_block_device(unsigned id, const string& path, bool trim) bool BlueFS::bdev_support_label(unsigned id) { - assert(id < bdev.size()); - assert(bdev[id]); + ceph_assert(id < bdev.size()); + ceph_assert(bdev[id]); return bdev[id]->supported_bdev_label(); } @@ -193,15 +193,15 @@ void BlueFS::add_block_extent(unsigned id, uint64_t offset, uint64_t length) dout(1) << __func__ << " bdev " << id << " 0x" << std::hex << offset << "~" << length << std::dec << dendl; - assert(id < bdev.size()); - assert(bdev[id]); - assert(bdev[id]->get_size() >= offset + length); + ceph_assert(id < bdev.size()); + ceph_assert(bdev[id]); + ceph_assert(bdev[id]->get_size() >= offset + length); block_all[id].insert(offset, length); if (id < alloc.size() && alloc[id]) { log_t.op_alloc_add(id, offset, length); int r = _flush_and_sync_log(l); - assert(r == 0); + ceph_assert(r == 0); alloc[id]->init_add_free(offset, length); } @@ -216,12 +216,12 @@ int BlueFS::reclaim_blocks(unsigned id, uint64_t want, std::unique_lock l(lock); dout(1) << __func__ << " bdev " << id << " want 0x" << std::hex << want << std::dec << dendl; - assert(id < alloc.size()); - assert(alloc[id]); + ceph_assert(id < alloc.size()); + ceph_assert(alloc[id]); int64_t got = alloc[id]->allocate(want, cct->_conf->bluefs_alloc_size, 0, extents); - assert(got != 0); + ceph_assert(got != 0); if (got < 0) { derr << __func__ << " failed to allocate space to return to bluestore" << dendl; @@ -236,7 +236,7 @@ int BlueFS::reclaim_blocks(unsigned id, uint64_t want, flush_bdev(); int r = _flush_and_sync_log(l); - assert(r == 0); + ceph_assert(r == 0); logger->inc(l_bluefs_reclaim_bytes, got); dout(1) << __func__ << " bdev " << id << " want 0x" << std::hex << want @@ -247,7 +247,7 @@ int BlueFS::reclaim_blocks(unsigned id, uint64_t want, void BlueFS::handle_discard(unsigned id, interval_set& to_release) { dout(10) << __func__ << " bdev " << id << dendl; - assert(alloc[id]); + ceph_assert(alloc[id]); alloc[id]->release(to_release); } @@ -266,14 +266,14 @@ uint64_t BlueFS::get_used() uint64_t BlueFS::get_total(unsigned id) { std::lock_guard l(lock); - assert(id < block_all.size()); + ceph_assert(id < block_all.size()); return block_all[id].size(); } uint64_t BlueFS::get_free(unsigned id) { std::lock_guard l(lock); - assert(id < alloc.size()); + ceph_assert(id < alloc.size()); return alloc[id]->get_free(); } @@ -357,7 +357,7 @@ int BlueFS::mkfs(uuid_d osd_uuid) log_file->fnode.prefer_bdev, cct->_conf->bluefs_max_log_runway, &log_file->fnode); - assert(r == 0); + ceph_assert(r == 0); log_writer = _create_writer(log_file); // initial txn @@ -401,7 +401,7 @@ void BlueFS::_init_alloc() if (!bdev[id]) { continue; } - assert(bdev[id]->get_size()); + ceph_assert(bdev[id]->get_size()); alloc[id] = Allocator::create(cct, cct->_conf->bluefs_allocator, bdev[id]->get_size(), cct->_conf->bluefs_alloc_size); @@ -460,7 +460,7 @@ int BlueFS::mount() // set up the log for future writes log_writer = _create_writer(_get_file(1)); - assert(log_writer->file->fnode.ino == 1); + ceph_assert(log_writer->file->fnode.ino == 1); log_writer->pos = log_writer->file->fnode.size; dout(10) << __func__ << " log write pos set to 0x" << std::hex << log_writer->pos << std::dec @@ -526,7 +526,7 @@ int BlueFS::_write_super() dout(10) << __func__ << " super block length(encoded): " << bl.length() << dendl; dout(10) << __func__ << " superblock " << super.version << dendl; dout(10) << __func__ << " log_fnode " << super.log_fnode << dendl; - assert(bl.length() <= get_super_length()); + ceph_assert(bl.length() <= get_super_length()); bl.append_zero(get_super_length() - bl.length()); bdev[BDEV_DB]->write(get_super_offset(), bl, false); @@ -583,8 +583,8 @@ int BlueFS::_replay(bool noop, bool to_stdout) } else { // do not use fnode from superblock in 'noop' mode - log_file's one should // be fine and up-to-date - assert(log_file->fnode.ino == 1); - assert(log_file->fnode.extents.size() != 0); + ceph_assert(log_file->fnode.ino == 1); + ceph_assert(log_file->fnode.extents.size() != 0); } dout(10) << __func__ << " log_fnode " << super.log_fnode << dendl; if (unlikely(to_stdout)) { @@ -596,14 +596,14 @@ int BlueFS::_replay(bool noop, bool to_stdout) false, // !random true); // ignore eof while (true) { - assert((log_reader->buf.pos & ~super.block_mask()) == 0); + ceph_assert((log_reader->buf.pos & ~super.block_mask()) == 0); uint64_t pos = log_reader->buf.pos; uint64_t read_pos = pos; bufferlist bl; { int r = _read(log_reader, &log_reader->buf, read_pos, super.block_size, &bl, NULL); - assert(r == (int)super.block_size); + ceph_assert(r == (int)super.block_size); read_pos += r; } uint64_t more = 0; @@ -645,7 +645,7 @@ int BlueFS::_replay(bool noop, bool to_stdout) << ", which is past eof" << dendl; break; } - assert(r == (int)more); + ceph_assert(r == (int)more); bl.claim_append(t); read_pos += r; } @@ -661,7 +661,7 @@ int BlueFS::_replay(bool noop, bool to_stdout) delete log_reader; return -EIO; } - assert(seq == t.seq); + ceph_assert(seq == t.seq); dout(10) << __func__ << " 0x" << std::hex << pos << std::dec << ": " << t << dendl; if (unlikely(to_stdout)) { @@ -683,7 +683,7 @@ int BlueFS::_replay(bool noop, bool to_stdout) << ": op_init" << std::endl; } - assert(t.seq == 1); + ceph_assert(t.seq == 1); break; case bluefs_transaction_t::OP_JUMP: @@ -702,7 +702,7 @@ int BlueFS::_replay(bool noop, bool to_stdout) << std::endl; } - assert(next_seq >= log_seq); + ceph_assert(next_seq >= log_seq); log_seq = next_seq - 1; // we will increment it below uint64_t skip = offset - read_pos; if (skip) { @@ -713,7 +713,7 @@ int BlueFS::_replay(bool noop, bool to_stdout) dout(10) << __func__ << " 0x" << std::hex << read_pos << ": stop: failed to skip to " << offset << std::dec << dendl; - assert(0 == "problem with op_jump"); + ceph_assert(0 == "problem with op_jump"); } } } @@ -730,7 +730,7 @@ int BlueFS::_replay(bool noop, bool to_stdout) << ": op_jump_seq " << next_seq << std::endl; } - assert(next_seq >= log_seq); + ceph_assert(next_seq >= log_seq); log_seq = next_seq - 1; // we will increment it below } break; @@ -805,11 +805,11 @@ int BlueFS::_replay(bool noop, bool to_stdout) if (!noop) { FileRef file = _get_file(ino); - assert(file->fnode.ino); + ceph_assert(file->fnode.ino); map::iterator q = dir_map.find(dirname); - assert(q != dir_map.end()); + ceph_assert(q != dir_map.end()); map::iterator r = q->second->file_map.find(filename); - assert(r == q->second->file_map.end()); + ceph_assert(r == q->second->file_map.end()); q->second->file_map[filename] = file; ++file->refs; } @@ -832,10 +832,10 @@ int BlueFS::_replay(bool noop, bool to_stdout) if (!noop) { map::iterator q = dir_map.find(dirname); - assert(q != dir_map.end()); + ceph_assert(q != dir_map.end()); map::iterator r = q->second->file_map.find(filename); - assert(r != q->second->file_map.end()); - assert(r->second->refs > 0); + ceph_assert(r != q->second->file_map.end()); + ceph_assert(r->second->refs > 0); --r->second->refs; q->second->file_map.erase(r); } @@ -855,7 +855,7 @@ int BlueFS::_replay(bool noop, bool to_stdout) if (!noop) { map::iterator q = dir_map.find(dirname); - assert(q == dir_map.end()); + ceph_assert(q == dir_map.end()); dir_map[dirname] = new Dir; } } @@ -874,8 +874,8 @@ int BlueFS::_replay(bool noop, bool to_stdout) if (!noop) { map::iterator q = dir_map.find(dirname); - assert(q != dir_map.end()); - assert(q->second->file_map.empty()); + ceph_assert(q != dir_map.end()); + ceph_assert(q->second->file_map.empty()); dir_map.erase(q); } } @@ -915,7 +915,7 @@ int BlueFS::_replay(bool noop, bool to_stdout) if (!noop) { auto p = file_map.find(ino); - assert(p != file_map.end()); + ceph_assert(p != file_map.end()); file_map.erase(p); } } @@ -928,7 +928,7 @@ int BlueFS::_replay(bool noop, bool to_stdout) return -EIO; } } - assert(p.end()); + ceph_assert(p.end()); // we successfully replayed the transaction; bump the seq and log size ++log_seq; @@ -991,11 +991,11 @@ void BlueFS::_drop_link(FileRef file) { dout(20) << __func__ << " had refs " << file->refs << " on " << file->fnode << dendl; - assert(file->refs > 0); + ceph_assert(file->refs > 0); --file->refs; if (file->refs == 0) { dout(20) << __func__ << " destroying " << file->fnode << dendl; - assert(file->num_reading.load() == 0); + ceph_assert(file->num_reading.load() == 0); log_t.op_file_remove(file->fnode.ino); for (auto& r : file->fnode.extents) { pending_release[r.bdev].insert(r.offset, r.length); @@ -1004,8 +1004,8 @@ void BlueFS::_drop_link(FileRef file) file->deleted = true; if (file->dirty_seq) { - assert(file->dirty_seq > log_seq_stable); - assert(dirty_files.count(file->dirty_seq)); + ceph_assert(file->dirty_seq > log_seq_stable); + ceph_assert(dirty_files.count(file->dirty_seq)); auto it = dirty_files[file->dirty_seq].iterator_to(*file); dirty_files[file->dirty_seq].erase(it); file->dirty_seq = 0; @@ -1045,7 +1045,7 @@ int BlueFS::_read_random( << " of " << *p << dendl; int r = bdev[p->bdev]->read_random(p->offset + x_off, l, out, cct->_conf->bluefs_buffered_io); - assert(r == 0); + ceph_assert(r == 0); off += l; len -= l; ret += l; @@ -1105,7 +1105,7 @@ int BlueFS::_read( << " of " << *p << dendl; int r = bdev[p->bdev]->read(p->offset + x_off, l, &buf->bl, ioc[p->bdev], cct->_conf->bluefs_buffered_io); - assert(r == 0); + ceph_assert(r == 0); } left = buf->get_buf_remaining(off); dout(20) << __func__ << " left 0x" << std::hex << left @@ -1137,7 +1137,7 @@ int BlueFS::_read( } dout(20) << __func__ << " got " << ret << dendl; - assert(!outbl || (int)outbl->length() == ret); + ceph_assert(!outbl || (int)outbl->length() == ret); --h->file->num_reading; return ret; } @@ -1224,7 +1224,7 @@ void BlueFS::_compact_log_dump_metadata(bluefs_transaction_t *t) if (p.first == 1) continue; dout(20) << __func__ << " op_file_update " << p.second->fnode << dendl; - assert(p.first > 1); + ceph_assert(p.first > 1); t->op_file_update(p.second->fnode); } for (auto& p : dir_map) { @@ -1263,7 +1263,7 @@ void BlueFS::_compact_log_sync() uint64_t old_allocated = 0; log_file->fnode.swap_extents(old_extents, old_allocated); int r = _allocate(log_file->fnode.prefer_bdev, need, &log_file->fnode); - assert(r == 0); + ceph_assert(r == 0); _close_writer(log_writer); @@ -1271,7 +1271,7 @@ void BlueFS::_compact_log_sync() log_writer = _create_writer(log_file); log_writer->append(bl); r = _flush(log_writer, true); - assert(r == 0); + ceph_assert(r == 0); #ifdef HAVE_LIBAIO if (!cct->_conf->bluefs_sync_write) { list completed_ios; @@ -1322,8 +1322,8 @@ void BlueFS::_compact_log_async(std::unique_lock& l) { dout(10) << __func__ << dendl; File *log_file = log_writer->file.get(); - assert(!new_log); - assert(!new_log_writer); + ceph_assert(!new_log); + ceph_assert(!new_log_writer); // create a new log [writer] so that we know compaction is in progress // (see _should_compact_log) @@ -1344,7 +1344,7 @@ void BlueFS::_compact_log_async(std::unique_lock& l) << " need 0x" << (old_log_jump_to + cct->_conf->bluefs_max_log_runway) << std::dec << dendl; int r = _allocate(log_file->fnode.prefer_bdev, cct->_conf->bluefs_max_log_runway, &log_file->fnode); - assert(r == 0); + ceph_assert(r == 0); dout(10) << __func__ << " log extents " << log_file->fnode.extents << dendl; // update the log file change and log a jump to the offset where we want to @@ -1377,13 +1377,13 @@ void BlueFS::_compact_log_async(std::unique_lock& l) // allocate r = _allocate(BlueFS::BDEV_DB, new_log_jump_to, &new_log->fnode); - assert(r == 0); + ceph_assert(r == 0); new_log_writer = _create_writer(new_log); new_log_writer->append(bl); // 3. flush r = _flush(new_log_writer, true); - assert(r == 0); + ceph_assert(r == 0); // 4. wait _flush_bdev_safely(new_log_writer); @@ -1395,7 +1395,7 @@ void BlueFS::_compact_log_async(std::unique_lock& l) uint64_t discarded = 0; mempool::bluefs::vector old_extents; while (discarded < old_log_jump_to) { - assert(!log_file->fnode.extents.empty()); + ceph_assert(!log_file->fnode.extents.empty()); bluefs_extent_t& e = log_file->fnode.extents.front(); bluefs_extent_t temp = e; if (discarded + e.length <= old_log_jump_to) { @@ -1447,7 +1447,7 @@ void BlueFS::_compact_log_async(std::unique_lock& l) // delete the new log, remove from the dirty files list _close_writer(new_log_writer); if (new_log->dirty_seq) { - assert(dirty_files.count(new_log->dirty_seq)); + ceph_assert(dirty_files.count(new_log->dirty_seq)); auto it = dirty_files[new_log->dirty_seq].iterator_to(*new_log); dirty_files[new_log->dirty_seq].erase(it); } @@ -1483,19 +1483,19 @@ int BlueFS::_flush_and_sync_log(std::unique_lock& l, while (log_flushing) { dout(10) << __func__ << " want_seq " << want_seq << " log is currently flushing, waiting" << dendl; - assert(!jump_to); + ceph_assert(!jump_to); log_cond.wait(l); } if (want_seq && want_seq <= log_seq_stable) { dout(10) << __func__ << " want_seq " << want_seq << " <= log_seq_stable " << log_seq_stable << ", done" << dendl; - assert(!jump_to); + ceph_assert(!jump_to); return 0; } if (log_t.empty() && dirty_files.empty()) { dout(10) << __func__ << " want_seq " << want_seq << " " << log_t << " not dirty, dirty_files empty, no-op" << dendl; - assert(!jump_to); + ceph_assert(!jump_to); return 0; } @@ -1503,7 +1503,7 @@ int BlueFS::_flush_and_sync_log(std::unique_lock& l, to_release.swap(pending_release); uint64_t seq = log_t.seq = ++log_seq; - assert(want_seq == 0 || want_seq <= seq); + ceph_assert(want_seq == 0 || want_seq <= seq); log_t.uuid = super.uuid; // log dirty files @@ -1517,7 +1517,7 @@ int BlueFS::_flush_and_sync_log(std::unique_lock& l, } dout(10) << __func__ << " " << log_t << dendl; - assert(!log_t.empty()); + ceph_assert(!log_t.empty()); // allocate some more space (before we run out)? int64_t runway = log_writer->file->fnode.get_allocated() - @@ -1532,7 +1532,7 @@ int BlueFS::_flush_and_sync_log(std::unique_lock& l, int r = _allocate(log_writer->file->fnode.prefer_bdev, cct->_conf->bluefs_max_log_runway, &log_writer->file->fnode); - assert(r == 0); + ceph_assert(r == 0); log_t.op_file_update(log_writer->file->fnode); } @@ -1553,7 +1553,7 @@ int BlueFS::_flush_and_sync_log(std::unique_lock& l, log_flushing = true; int r = _flush(log_writer, true); - assert(r == 0); + ceph_assert(r == 0); if (jump_to) { dout(10) << __func__ << " jumping log offset from 0x" << std::hex @@ -1582,14 +1582,14 @@ int BlueFS::_flush_and_sync_log(std::unique_lock& l, auto l = p->second.begin(); while (l != p->second.end()) { File *file = &*l; - assert(file->dirty_seq > 0); - assert(file->dirty_seq <= log_seq_stable); + ceph_assert(file->dirty_seq > 0); + ceph_assert(file->dirty_seq <= log_seq_stable); dout(20) << __func__ << " cleaned file " << file->fnode << dendl; file->dirty_seq = 0; p->second.erase(l++); } - assert(p->second.empty()); + ceph_assert(p->second.empty()); dirty_files.erase(p++); } } else { @@ -1625,8 +1625,8 @@ int BlueFS::_flush_range(FileWriter *h, uint64_t offset, uint64_t length) dout(10) << __func__ << " " << h << " pos 0x" << std::hex << h->pos << " 0x" << offset << "~" << length << std::dec << " to " << h->file->fnode << dendl; - assert(!h->file->deleted); - assert(h->file->num_readers.load() == 0); + ceph_assert(!h->file->deleted); + ceph_assert(h->file->num_readers.load() == 0); h->buffer_appender.flush(); @@ -1645,7 +1645,7 @@ int BlueFS::_flush_range(FileWriter *h, uint64_t offset, uint64_t length) << std::hex << offset << "~" << length << std::dec << dendl; } - assert(offset <= h->file->fnode.size); + ceph_assert(offset <= h->file->fnode.size); uint64_t allocated = h->file->fnode.get_allocated(); @@ -1655,7 +1655,7 @@ int BlueFS::_flush_range(FileWriter *h, uint64_t offset, uint64_t length) if (allocated < offset + length) { // we should never run out of log space here; see the min runway check // in _flush_and_sync_log. - assert(h->file->fnode.ino != 1); + ceph_assert(h->file->fnode.ino != 1); int r = _allocate(h->file->fnode.prefer_bdev, offset + length - allocated, &h->file->fnode); @@ -1663,7 +1663,7 @@ int BlueFS::_flush_range(FileWriter *h, uint64_t offset, uint64_t length) derr << __func__ << " allocated: 0x" << std::hex << allocated << " offset: 0x" << offset << " length: 0x" << length << std::dec << dendl; - assert(0 == "bluefs enospc"); + ceph_assert(0 == "bluefs enospc"); return r; } if (cct->_conf->bluefs_preextend_wal_files && @@ -1690,7 +1690,7 @@ int BlueFS::_flush_range(FileWriter *h, uint64_t offset, uint64_t length) } if (must_dirty) { h->file->fnode.mtime = ceph_clock_now(); - assert(h->file->fnode.ino >= 1); + ceph_assert(h->file->fnode.ino >= 1); if (h->file->dirty_seq == 0) { h->file->dirty_seq = log_seq + 1; dirty_files[h->file->dirty_seq].push_back(*h->file); @@ -1699,7 +1699,7 @@ int BlueFS::_flush_range(FileWriter *h, uint64_t offset, uint64_t length) } else { if (h->file->dirty_seq != log_seq + 1) { // need re-dirty, erase from list first - assert(dirty_files.count(h->file->dirty_seq)); + ceph_assert(dirty_files.count(h->file->dirty_seq)); auto it = dirty_files[h->file->dirty_seq].iterator_to(*h->file); dirty_files[h->file->dirty_seq].erase(it); h->file->dirty_seq = log_seq + 1; @@ -1716,7 +1716,7 @@ int BlueFS::_flush_range(FileWriter *h, uint64_t offset, uint64_t length) uint64_t x_off = 0; auto p = h->file->fnode.seek(offset, &x_off); - assert(p != h->file->fnode.extents.end()); + ceph_assert(p != h->file->fnode.extents.end()); dout(20) << __func__ << " in " << *p << " x_off 0x" << std::hex << x_off << std::dec << dendl; @@ -1725,7 +1725,7 @@ int BlueFS::_flush_range(FileWriter *h, uint64_t offset, uint64_t length) if (partial) { dout(20) << __func__ << " using partial tail 0x" << std::hex << partial << std::dec << dendl; - assert(h->tail_block.length() == partial); + ceph_assert(h->tail_block.length() == partial); bl.claim_append_piecewise(h->tail_block); x_off -= partial; offset -= partial; @@ -1748,7 +1748,7 @@ int BlueFS::_flush_range(FileWriter *h, uint64_t offset, uint64_t length) dout(20) << " leaving 0x" << std::hex << h->buffer.length() << std::dec << " unflushed" << dendl; } - assert(bl.length() == length); + ceph_assert(bl.length() == length); switch (h->writer_type) { case WRITER_WAL: @@ -1786,7 +1786,7 @@ int BlueFS::_flush_range(FileWriter *h, uint64_t offset, uint64_t length) const bufferptr &last = t.back(); if (last.unused_tail_length() < zlen) { derr << " wtf, last is " << last << " from " << t << dendl; - assert(last.unused_tail_length() >= zlen); + ceph_assert(last.unused_tail_length() >= zlen); } bufferptr z = last; z.set_offset(last.offset() + last.length()); @@ -1815,7 +1815,7 @@ int BlueFS::_flush_range(FileWriter *h, uint64_t offset, uint64_t length) logger->inc(l_bluefs_bytes_written_slow, bytes_written_slow); for (unsigned i = 0; i < MAX_BDEV; ++i) { if (bdev[i]) { - assert(h->iocv[i]); + ceph_assert(h->iocv[i]); if (h->iocv[i]->has_pending_aios()) { bdev[i]->aio_submit(h->iocv[i]); } @@ -1874,7 +1874,7 @@ int BlueFS::_flush(FileWriter *h, bool force) dout(10) << __func__ << " " << h << " 0x" << std::hex << offset << "~" << length << std::dec << " to " << h->file->fnode << dendl; - assert(h->pos <= h->file->fnode.size); + ceph_assert(h->pos <= h->file->fnode.size); return _flush_range(h, offset, length); } @@ -1888,7 +1888,7 @@ int BlueFS::_truncate(FileWriter *h, uint64_t offset) } // we never truncate internal log files - assert(h->file->fnode.ino > 1); + ceph_assert(h->file->fnode.ino > 1); h->buffer_appender.flush(); @@ -1900,7 +1900,7 @@ int BlueFS::_truncate(FileWriter *h, uint64_t offset) << " unflushed bytes" << dendl; t.substr_of(h->buffer, 0, offset - h->pos); h->buffer.swap(t); - assert(0 == "actually this shouldn't happen"); + ceph_assert(0 == "actually this shouldn't happen"); } if (h->buffer.length()) { int r = _flush(h, true); @@ -1911,9 +1911,9 @@ int BlueFS::_truncate(FileWriter *h, uint64_t offset) return 0; // no-op! } if (offset > h->file->fnode.size) { - assert(0 == "truncate up not supported"); + ceph_assert(0 == "truncate up not supported"); } - assert(h->file->fnode.size >= offset); + ceph_assert(h->file->fnode.size >= offset); h->file->fnode.size = offset; log_t.op_file_update(h->file->fnode); return 0; @@ -1934,7 +1934,7 @@ int BlueFS::_fsync(FileWriter *h, std::unique_lock& l) dout(20) << __func__ << " file metadata was dirty (" << old_dirty_seq << ") on " << h->file->fnode << ", flushing log" << dendl; _flush_and_sync_log(l, old_dirty_seq); - assert(h->file->dirty_seq == 0 || // cleaned + ceph_assert(h->file->dirty_seq == 0 || // cleaned h->file->dirty_seq > s); // or redirtied by someone else } return 0; @@ -1987,7 +1987,7 @@ int BlueFS::_allocate(uint8_t id, uint64_t len, { dout(10) << __func__ << " len 0x" << std::hex << len << std::dec << " from " << (int)id << dendl; - assert(id < alloc.size()); + ceph_assert(id < alloc.size()); uint64_t min_alloc_size = cct->_conf->bluefs_alloc_size; uint64_t left = round_up_to(len, min_alloc_size); @@ -2043,7 +2043,7 @@ int BlueFS::_preallocate(FileRef f, uint64_t off, uint64_t len) dout(10) << __func__ << " deleted, no-op" << dendl; return 0; } - assert(f->fnode.ino > 1); + ceph_assert(f->fnode.ino > 1); uint64_t allocated = f->fnode.get_allocated(); if (off + len > allocated) { uint64_t want = off + len - allocated; @@ -2131,7 +2131,7 @@ int BlueFS::open_for_write( file->fnode.clear_extents(); } } - assert(file->fnode.ino > 1); + ceph_assert(file->fnode.ino > 1); file->fnode.mtime = ceph_clock_now(); file->fnode.prefer_bdev = BlueFS::BDEV_DB; @@ -2187,7 +2187,7 @@ void BlueFS::_close_writer(FileWriter *h) dout(10) << __func__ << " " << h << " type " << h->writer_type << dendl; for (unsigned i=0; iiocv[i]); + ceph_assert(h->iocv[i]); h->iocv[i]->aio_wait(); bdev[i]->queue_reap_ioc(h->iocv[i]); } @@ -2259,7 +2259,7 @@ int BlueFS::rename( dout(20) << __func__ << " dir " << new_dirname << " (" << old_dir << ") file " << new_filename << " already exists, unlinking" << dendl; - assert(q->second != file); + ceph_assert(q->second != file); log_t.op_dir_unlink(new_dirname, new_filename); _drop_link(q->second); } @@ -2388,7 +2388,7 @@ int BlueFS::unlock_file(FileLock *fl) { std::lock_guard l(lock); dout(10) << __func__ << " " << fl << " on " << fl->file->fnode << dendl; - assert(fl->file->locked); + ceph_assert(fl->file->locked); fl->file->locked = false; delete fl; return 0; diff --git a/src/os/bluestore/BlueFS.h b/src/os/bluestore/BlueFS.h index aedffa10c3f27..333c16b54bbd3 100644 --- a/src/os/bluestore/BlueFS.h +++ b/src/os/bluestore/BlueFS.h @@ -77,10 +77,10 @@ public: num_reading(0) {} ~File() override { - assert(num_readers.load() == 0); - assert(num_writers.load() == 0); - assert(num_reading.load() == 0); - assert(!locked); + ceph_assert(num_readers.load() == 0); + ceph_assert(num_writers.load() == 0); + ceph_assert(num_reading.load() == 0); + ceph_assert(!locked); } friend void intrusive_ptr_add_ref(File *f) { diff --git a/src/os/bluestore/BlueRocksEnv.cc b/src/os/bluestore/BlueRocksEnv.cc index 2813b8c23f49b..b33005e149b41 100644 --- a/src/os/bluestore/BlueRocksEnv.cc +++ b/src/os/bluestore/BlueRocksEnv.cc @@ -23,7 +23,7 @@ rocksdb::Status err_to_status(int r) return rocksdb::Status::IOError(strerror(r)); default: // FIXME :( - assert(0 == "unrecognized error code"); + ceph_assert(0 == "unrecognized error code"); return rocksdb::Status::NotSupported(rocksdb::Status::kNone); } } @@ -48,7 +48,7 @@ class BlueRocksSequentialFile : public rocksdb::SequentialFile { // REQUIRES: External synchronization rocksdb::Status Read(size_t n, rocksdb::Slice* result, char* scratch) override { int r = fs->read(h, &h->buf, h->buf.pos, n, NULL, scratch); - assert(r >= 0); + ceph_assert(r >= 0); *result = rocksdb::Slice(scratch, r); return rocksdb::Status::OK(); } @@ -96,7 +96,7 @@ class BlueRocksRandomAccessFile : public rocksdb::RandomAccessFile { rocksdb::Status Read(uint64_t offset, size_t n, rocksdb::Slice* result, char* scratch) const override { int r = fs->read_random(h, offset, n, scratch); - assert(r >= 0); + ceph_assert(r >= 0); *result = rocksdb::Slice(scratch, r); return rocksdb::Status::OK(); } diff --git a/src/os/bluestore/BlueStore.cc b/src/os/bluestore/BlueStore.cc index 9edf08386a1a9..5f9935d350d94 100644 --- a/src/os/bluestore/BlueStore.cc +++ b/src/os/bluestore/BlueStore.cc @@ -440,7 +440,7 @@ static void get_object_key(CephContext *cct, const ghobject_t& oid, S *key) derr << "key " << pretty_binary_string(*key) << dendl; derr << "oid " << oid << dendl; derr << " t " << t << dendl; - assert(r == 0 && t == oid); + ceph_assert(r == 0 && t == oid); } } } @@ -462,8 +462,8 @@ static void get_extent_shard_key(const S& onode_key, uint32_t offset, static void rewrite_extent_shard_key(uint32_t offset, string *key) { - assert(key->size() > sizeof(uint32_t) + 1); - assert(*key->rbegin() == EXTENT_SHARD_KEY_SUFFIX); + ceph_assert(key->size() > sizeof(uint32_t) + 1); + ceph_assert(*key->rbegin() == EXTENT_SHARD_KEY_SUFFIX); _key_encode_u32(offset, key->size() - sizeof(uint32_t) - 1, key); } @@ -475,7 +475,7 @@ static void generate_extent_shard_key_and_apply( std::function apply) { if (key->empty()) { // make full key - assert(!onode_key.empty()); + ceph_assert(!onode_key.empty()); get_extent_shard_key(onode_key, offset, key); } else { rewrite_extent_shard_key(offset, key); @@ -485,8 +485,8 @@ static void generate_extent_shard_key_and_apply( int get_key_extent_shard(const string& key, string *onode_key, uint32_t *offset) { - assert(key.size() > sizeof(uint32_t) + 1); - assert(*key.rbegin() == EXTENT_SHARD_KEY_SUFFIX); + ceph_assert(key.size() > sizeof(uint32_t) + 1); + ceph_assert(*key.rbegin() == EXTENT_SHARD_KEY_SUFFIX); int okey_len = key.size() - sizeof(uint32_t) - 1; *onode_key = key.substr(0, okey_len); const char *p = key.data() + okey_len; @@ -549,8 +549,8 @@ struct Int64ArrayMergeOperator : public KeyValueDB::MergeOperator { const char *ldata, size_t llen, const char *rdata, size_t rlen, std::string *new_value) override { - assert(llen == rlen); - assert((rlen % 8) == 0); + ceph_assert(llen == rlen); + ceph_assert((rlen % 8) == 0); new_value->resize(rlen); const __le64* lv = (const __le64*)ldata; const __le64* rv = (const __le64*)rdata; @@ -589,7 +589,7 @@ void BlueStore::GarbageCollector::process_protrusive_extents( uint64_t end_touch_offset, uint64_t min_alloc_size) { - assert(start_offset <= start_touch_offset && end_offset>= end_touch_offset); + ceph_assert(start_offset <= start_touch_offset && end_offset>= end_touch_offset); uint64_t lookup_start_offset = p2align(start_offset, min_alloc_size); uint64_t lookup_end_offset = round_up_to(end_offset, min_alloc_size); @@ -648,7 +648,7 @@ void BlueStore::GarbageCollector::process_protrusive_extents( blob_info_counted = &bi; used_alloc_unit = alloc_unit_end; - assert(it->length <= bi.referenced_bytes); + ceph_assert(it->length <= bi.referenced_bytes); bi.referenced_bytes -= it->length; dout(30) << __func__ << " affected_blob:" << *b << " unref 0x" << std::hex << it->length @@ -777,7 +777,7 @@ BlueStore::Cache *BlueStore::Cache::create(CephContext* cct, string type, else if (type == "2q") c = new TwoQCache(cct); else - assert(0 == "unrecognized cache type"); + ceph_assert(0 == "unrecognized cache type"); c->logger = logger; return c; @@ -823,7 +823,7 @@ void BlueStore::LRUCache::_trim(uint64_t onode_max, uint64_t buffer_max) } Buffer *b = &*i; - assert(b->is_clean()); + ceph_assert(b->is_clean()); dout(20) << __func__ << " rm " << *b << dendl; b->space->_rm_buffer(this, b); } @@ -835,7 +835,7 @@ void BlueStore::LRUCache::_trim(uint64_t onode_max, uint64_t buffer_max) uint64_t num = onode_lru.size() - onode_max; auto p = onode_lru.end(); - assert(p != onode_lru.begin()); + ceph_assert(p != onode_lru.begin()); --p; int skipped = 0; int max_skipped = g_conf()->bluestore_cache_trim_max_skip_pinned; @@ -864,7 +864,7 @@ void BlueStore::LRUCache::_trim(uint64_t onode_max, uint64_t buffer_max) onode_lru.erase(p--); } else { onode_lru.erase(p); - assert(num == 1); + ceph_assert(num == 1); } o->get(); // paranoia o->c->onode_map.remove(o->oid); @@ -887,7 +887,7 @@ void BlueStore::LRUCache::_audit(const char *when) for (auto i = buffer_lru.begin(); i != buffer_lru.end(); ++i) { derr << __func__ << " " << *i << dendl; } - assert(s == buffer_size); + ceph_assert(s == buffer_size); } dout(20) << __func__ << " " << when << " buffer_size " << buffer_size << " ok" << dendl; @@ -918,14 +918,14 @@ void BlueStore::TwoQCache::_add_buffer(Buffer *b, int level, Buffer *near) buffer_warm_in.insert(buffer_warm_in.iterator_to(*near), *b); break; case BUFFER_WARM_OUT: - assert(b->is_empty()); + ceph_assert(b->is_empty()); buffer_warm_out.insert(buffer_warm_out.iterator_to(*near), *b); break; case BUFFER_HOT: buffer_hot.insert(buffer_hot.iterator_to(*near), *b); break; default: - assert(0 == "bad cache_private"); + ceph_assert(0 == "bad cache_private"); } } else if (b->cache_private == BUFFER_NEW) { b->cache_private = BUFFER_WARM_IN; @@ -952,7 +952,7 @@ void BlueStore::TwoQCache::_add_buffer(Buffer *b, int level, Buffer *near) buffer_hot.push_front(*b); break; default: - assert(0 == "bad cache_private"); + ceph_assert(0 == "bad cache_private"); } } if (!b->is_empty()) { @@ -965,9 +965,9 @@ void BlueStore::TwoQCache::_rm_buffer(Buffer *b) { dout(20) << __func__ << " " << *b << dendl; if (!b->is_empty()) { - assert(buffer_bytes >= b->length); + ceph_assert(buffer_bytes >= b->length); buffer_bytes -= b->length; - assert(buffer_list_bytes[b->cache_private] >= b->length); + ceph_assert(buffer_list_bytes[b->cache_private] >= b->length); buffer_list_bytes[b->cache_private] -= b->length; } switch (b->cache_private) { @@ -981,7 +981,7 @@ void BlueStore::TwoQCache::_rm_buffer(Buffer *b) buffer_hot.erase(buffer_hot.iterator_to(*b)); break; default: - assert(0 == "bad cache_private"); + ceph_assert(0 == "bad cache_private"); } } @@ -993,19 +993,19 @@ void BlueStore::TwoQCache::_move_buffer(Cache *srcc, Buffer *b) // preserve which list we're on (even if we can't preserve the order!) switch (b->cache_private) { case BUFFER_WARM_IN: - assert(!b->is_empty()); + ceph_assert(!b->is_empty()); buffer_warm_in.push_back(*b); break; case BUFFER_WARM_OUT: - assert(b->is_empty()); + ceph_assert(b->is_empty()); buffer_warm_out.push_back(*b); break; case BUFFER_HOT: - assert(!b->is_empty()); + ceph_assert(!b->is_empty()); buffer_hot.push_back(*b); break; default: - assert(0 == "bad cache_private"); + ceph_assert(0 == "bad cache_private"); } if (!b->is_empty()) { buffer_bytes += b->length; @@ -1017,9 +1017,9 @@ void BlueStore::TwoQCache::_adjust_buffer_size(Buffer *b, int64_t delta) { dout(20) << __func__ << " delta " << delta << " on " << *b << dendl; if (!b->is_empty()) { - assert((int64_t)buffer_bytes + delta >= 0); + ceph_assert((int64_t)buffer_bytes + delta >= 0); buffer_bytes += delta; - assert((int64_t)buffer_list_bytes[b->cache_private] + delta >= 0); + ceph_assert((int64_t)buffer_list_bytes[b->cache_private] + delta >= 0); buffer_list_bytes[b->cache_private] += delta; } } @@ -1043,7 +1043,7 @@ void BlueStore::TwoQCache::_trim(uint64_t onode_max, uint64_t buffer_max) uint64_t buffer_num = buffer_hot.size() + buffer_warm_in.size(); if (buffer_num) { uint64_t buffer_avg_size = buffer_bytes / buffer_num; - assert(buffer_avg_size); + ceph_assert(buffer_avg_size); uint64_t calculated_buffer_num = buffer_max / buffer_avg_size; kout = calculated_buffer_num * cct->_conf->bluestore_2q_cache_kout_ratio; } @@ -1068,11 +1068,11 @@ void BlueStore::TwoQCache::_trim(uint64_t onode_max, uint64_t buffer_max) } Buffer *b = &*p; - assert(b->is_clean()); + ceph_assert(b->is_clean()); dout(20) << __func__ << " buffer_warm_in -> out " << *b << dendl; - assert(buffer_bytes >= b->length); + ceph_assert(buffer_bytes >= b->length); buffer_bytes -= b->length; - assert(buffer_list_bytes[BUFFER_WARM_IN] >= b->length); + ceph_assert(buffer_list_bytes[BUFFER_WARM_IN] >= b->length); buffer_list_bytes[BUFFER_WARM_IN] -= b->length; to_evict_bytes -= b->length; evicted += b->length; @@ -1102,7 +1102,7 @@ void BlueStore::TwoQCache::_trim(uint64_t onode_max, uint64_t buffer_max) Buffer *b = &*p; dout(20) << __func__ << " buffer_hot rm " << *b << dendl; - assert(b->is_clean()); + ceph_assert(b->is_clean()); // adjust evict size before buffer goes invalid to_evict_bytes -= b->length; evicted += b->length; @@ -1119,7 +1119,7 @@ void BlueStore::TwoQCache::_trim(uint64_t onode_max, uint64_t buffer_max) int64_t num = buffer_warm_out.size() - kout; while (num-- > 0) { Buffer *b = &*buffer_warm_out.rbegin(); - assert(b->is_empty()); + ceph_assert(b->is_empty()); dout(20) << __func__ << " buffer_warm_out rm " << *b << dendl; b->space->_rm_buffer(this, b); } @@ -1132,7 +1132,7 @@ void BlueStore::TwoQCache::_trim(uint64_t onode_max, uint64_t buffer_max) uint64_t num = onode_lru.size() - onode_max; auto p = onode_lru.end(); - assert(p != onode_lru.begin()); + ceph_assert(p != onode_lru.begin()); --p; int skipped = 0; int max_skipped = g_conf()->bluestore_cache_trim_max_skip_pinned; @@ -1162,7 +1162,7 @@ void BlueStore::TwoQCache::_trim(uint64_t onode_max, uint64_t buffer_max) onode_lru.erase(p--); } else { onode_lru.erase(p); - assert(num == 1); + ceph_assert(num == 1); } o->get(); // paranoia o->c->onode_map.remove(o->oid); @@ -1186,7 +1186,7 @@ void BlueStore::TwoQCache::_audit(const char *when) << buffer_list_bytes[BUFFER_HOT] << " != actual " << hot_bytes << dendl; - assert(hot_bytes == buffer_list_bytes[BUFFER_HOT]); + ceph_assert(hot_bytes == buffer_list_bytes[BUFFER_HOT]); } for (auto i = buffer_warm_in.begin(); i != buffer_warm_in.end(); ++i) { @@ -1199,13 +1199,13 @@ void BlueStore::TwoQCache::_audit(const char *when) << buffer_list_bytes[BUFFER_WARM_IN] << " != actual " << warm_in_bytes << dendl; - assert(warm_in_bytes == buffer_list_bytes[BUFFER_WARM_IN]); + ceph_assert(warm_in_bytes == buffer_list_bytes[BUFFER_WARM_IN]); } if (s != buffer_bytes) { derr << __func__ << " buffer_bytes " << buffer_bytes << " actual " << s << dendl; - assert(s == buffer_bytes); + ceph_assert(s == buffer_bytes); } dout(20) << __func__ << " " << when << " buffer_bytes " << buffer_bytes @@ -1319,7 +1319,7 @@ void BlueStore::BufferSpace::read( i != buffer_map.end() && offset < end && i->first < end; ++i) { Buffer *b = i->second.get(); - assert(b->end() > offset); + ceph_assert(b->end() > offset); if (b->is_writing() || b->is_clean()) { if (b->offset < offset) { uint32_t skip = offset - b->offset; @@ -1361,7 +1361,7 @@ void BlueStore::BufferSpace::read( } uint64_t hit_bytes = res_intervals.size(); - assert(hit_bytes <= want_bytes); + ceph_assert(hit_bytes <= want_bytes); uint64_t miss_bytes = want_bytes - hit_bytes; cache->logger->inc(l_bluestore_buffer_hit_bytes, hit_bytes); cache->logger->inc(l_bluestore_buffer_miss_bytes, miss_bytes); @@ -1380,7 +1380,7 @@ void BlueStore::BufferSpace::_finish_write(Cache* cache, uint64_t seq) } Buffer *b = &*i; - assert(b->is_writing()); + ceph_assert(b->is_writing()); if (b->flags & Buffer::FLAG_NOCACHE) { writing.erase(i++); @@ -1428,7 +1428,7 @@ void BlueStore::BufferSpace::split(Cache* cache, size_t pos, BlueStore::BufferSp break; } - assert(p->second->end() > pos); + ceph_assert(p->second->end() > pos); ldout(cache->cct, 30) << __func__ << " move " << *p->second << dendl; if (p->second->data.length()) { r._add_buffer(cache, new Buffer(&r, p->second->state, p->second->seq, @@ -1446,7 +1446,7 @@ void BlueStore::BufferSpace::split(Cache* cache, size_t pos, BlueStore::BufferSp _rm_buffer(cache, p--); } } - assert(writing.empty()); + ceph_assert(writing.empty()); } // OnodeSpace @@ -1526,9 +1526,9 @@ void BlueStore::OnodeSpace::rename( ceph::unordered_map::iterator po, pn; po = onode_map.find(old_oid); pn = onode_map.find(new_oid); - assert(po != pn); + ceph_assert(po != pn); - assert(po != onode_map.end()); + ceph_assert(po != onode_map.end()); if (pn != onode_map.end()) { ldout(cache->cct, 30) << __func__ << " removing target " << pn->second << dendl; @@ -1589,7 +1589,7 @@ ostream& operator<<(ostream& out, const BlueStore::SharedBlob& sb) BlueStore::SharedBlob::SharedBlob(uint64_t i, Collection *_coll) : coll(_coll), sbid_unloaded(i) { - assert(sbid_unloaded > 0); + ceph_assert(sbid_unloaded > 0); if (get_cache()) { get_cache()->add_blob(); } @@ -1626,7 +1626,7 @@ void BlueStore::SharedBlob::put() void BlueStore::SharedBlob::get_ref(uint64_t offset, uint32_t length) { - assert(persistent); + ceph_assert(persistent); persistent->ref_map.get(offset, length); } @@ -1634,7 +1634,7 @@ void BlueStore::SharedBlob::put_ref(uint64_t offset, uint32_t length, PExtentVector *r, bool *unshare) { - assert(persistent); + ceph_assert(persistent); persistent->ref_map.put(offset, length, r, unshare && !*unshare ? unshare : nullptr); } @@ -1706,7 +1706,7 @@ void BlueStore::Blob::discard_unallocated(Collection *coll) all_invalid = false; } } - assert(discard == all_invalid); // in case of compressed blob all + ceph_assert(discard == all_invalid); // in case of compressed blob all // or none pextents are invalid. if (discard) { shared_blob->bc.discard(shared_blob->get_cache(), 0, @@ -1741,7 +1741,7 @@ void BlueStore::Blob::get_ref( // references. Otherwise one is neither unable to determine required // amount of counters in case of per-au tracking nor obtain min_release_size // for single counter mode. - assert(get_blob().get_logical_length() != 0); + ceph_assert(get_blob().get_logical_length() != 0); auto cct = coll->store->cct; dout(20) << __func__ << " 0x" << std::hex << offset << "~" << length << std::dec << " " << *this << dendl; @@ -1789,8 +1789,8 @@ bool BlueStore::Blob::can_reuse_blob(uint32_t min_alloc_size, uint32_t target_blob_size, uint32_t b_offset, uint32_t *length0) { - assert(min_alloc_size); - assert(target_blob_size); + ceph_assert(min_alloc_size); + ceph_assert(target_blob_size); if (!get_blob().is_mutable()) { return false; } @@ -1864,8 +1864,8 @@ void BlueStore::Blob::split(Collection *coll, uint32_t blob_offset, Blob *r) auto cct = coll->store->cct; //used by dout dout(10) << __func__ << " 0x" << std::hex << blob_offset << std::dec << " start " << *this << dendl; - assert(blob.can_split()); - assert(used_in_blob.can_split()); + ceph_assert(blob.can_split()); + ceph_assert(used_in_blob.can_split()); bluestore_blob_t &lb = dirty_blob(); bluestore_blob_t &rb = r->dirty_blob(); @@ -1987,7 +1987,7 @@ void BlueStore::ExtentMap::dup(BlueStore* b, TransContext* txc, dirty_range_begin == 0 && dirty_range_end == 0) { dirty_range_begin = e.logical_offset; } - assert(e.logical_end() > 0); + ceph_assert(e.logical_end() > 0); // -1 to exclude next potential shard dirty_range_end = e.logical_end() - 1; } else { @@ -2060,7 +2060,7 @@ void BlueStore::ExtentMap::update(KeyValueDB::Transaction t, // we need to encode inline_bl to measure encoded length bool never_happen = encode_some(0, OBJECT_MAX_SIZE, inline_bl, &n); inline_bl.reassign_to_mempool(mempool::mempool_bluestore_cache_other); - assert(!never_happen); + ceph_assert(!never_happen); size_t len = inline_bl.length(); dout(20) << __func__ << " inline shard " << len << " bytes from " << n << " extents" << dendl; @@ -2085,7 +2085,7 @@ void BlueStore::ExtentMap::update(KeyValueDB::Transaction t, auto p = shards.begin(); auto prev_p = p; while (p != shards.end()) { - assert(p->shard_info->offset >= prev_p->shard_info->offset); + ceph_assert(p->shard_info->offset >= prev_p->shard_info->offset); auto n = p; ++n; if (p->dirty) { @@ -2101,7 +2101,7 @@ void BlueStore::ExtentMap::update(KeyValueDB::Transaction t, bl, &p->extents)) { if (force) { derr << __func__ << " encode_some needs reshard" << dendl; - assert(!force); + ceph_assert(!force); } } size_t len = bl.length(); @@ -2119,7 +2119,7 @@ void BlueStore::ExtentMap::update(KeyValueDB::Transaction t, // avoid resharding the trailing shard, even if it is small else if (n != shards.end() && len < g_conf()->bluestore_extent_map_shard_min_size) { - assert(endoff != OBJECT_MAX_SIZE); + ceph_assert(endoff != OBJECT_MAX_SIZE); if (p == shards.begin()) { // we are the first shard, combine with next shard request_reshard(p->shard_info->offset, endoff + 1); @@ -2178,7 +2178,7 @@ bid_t BlueStore::ExtentMap::allocate_spanning_blob_id() if (bid < 0) bid = 0; } } while (bid != begin_bid); - assert(0 == "no available blob id"); + ceph_assert(0 == "no available blob id"); } void BlueStore::ExtentMap::reshard( @@ -2328,7 +2328,7 @@ void BlueStore::ExtentMap::reshard( shards.insert(shards.begin() + si_begin, new_shard_info.size(), Shard()); si_end = si_begin + new_shard_info.size(); - assert(sv.size() == shards.size()); + ceph_assert(sv.size() == shards.size()); // note that we need to update every shard_info of shards here, // as sv might have been totally re-allocated above @@ -2383,7 +2383,7 @@ void BlueStore::ExtentMap::reshard( dout(30) << " extent " << *e << dendl; while (e->logical_offset >= shard_end) { shard_start = shard_end; - assert(sp != esp); + ceph_assert(sp != esp); ++sp; if (sp == esp) { shard_end = OBJECT_MAX_SIZE; @@ -2468,7 +2468,7 @@ bool BlueStore::ExtentMap::encode_some( for (auto p = start; p != extent_map.end() && p->logical_offset < end; ++p, ++n) { - assert(p->logical_offset >= offset); + ceph_assert(p->logical_offset >= offset); p->blob->last_encoded_id = -1; if (!p->blob->is_spanning() && p->blob_escapes_range(offset, length)) { dout(30) << __func__ << " 0x" << std::hex << offset << "~" << length @@ -2566,14 +2566,14 @@ unsigned BlueStore::ExtentMap::decode_some(bufferlist& bl) *_dout << dendl; */ - assert(bl.get_num_buffers() <= 1); + ceph_assert(bl.get_num_buffers() <= 1); auto p = bl.front().begin_deep(); __u8 struct_v; denc(struct_v, p); // Version 2 differs from v1 in blob's ref_map // serialization only. Hence there is no specific // handling at ExtentMap level below. - assert(struct_v == 1 || struct_v == 2); + ceph_assert(struct_v == 1 || struct_v == 2); uint32_t num; denc_varint(num, p); @@ -2610,7 +2610,7 @@ unsigned BlueStore::ExtentMap::decode_some(bufferlist& bl) blobid >>= BLOBID_SHIFT_BITS; if (blobid) { le->assign_blob(blobs[blobid - 1]); - assert(le->blob); + ceph_assert(le->blob); } else { Blob *b = new Blob(); uint64_t sbid = 0; @@ -2630,7 +2630,7 @@ unsigned BlueStore::ExtentMap::decode_some(bufferlist& bl) extent_map.insert(*le); } - assert(n == num); + ceph_assert(n == num); return num; } @@ -2675,7 +2675,7 @@ void BlueStore::ExtentMap::decode_spanning_blobs( // Version 2 differs from v1 in blob's ref_map // serialization only. Hence there is no specific // handling at ExtentMap level. - assert(struct_v == 1 || struct_v == 2); + ceph_assert(struct_v == 1 || struct_v == 2); unsigned n; denc_varint(n, p); @@ -2715,10 +2715,10 @@ void BlueStore::ExtentMap::fault_range( if (start < 0) return; - assert(last >= start); + ceph_assert(last >= start); string key; while (start <= last) { - assert((size_t)start < shards.size()); + ceph_assert((size_t)start < shards.size()); auto p = &shards[start]; if (!p->loaded) { dout(30) << __func__ << " opening shard 0x" << std::hex @@ -2732,7 +2732,7 @@ void BlueStore::ExtentMap::fault_range( derr << __func__ << " missing shard 0x" << std::hex << p->shard_info->offset << std::dec << " for " << onode->oid << dendl; - assert(r >= 0); + ceph_assert(r >= 0); } } ); @@ -2741,8 +2741,8 @@ void BlueStore::ExtentMap::fault_range( dout(20) << __func__ << " open shard 0x" << std::hex << p->shard_info->offset << std::dec << " (" << v.length() << " bytes)" << dendl; - assert(p->dirty == false); - assert(v.length() == p->shard_info->bytes); + ceph_assert(p->dirty == false); + ceph_assert(v.length() == p->shard_info->bytes); onode->c->store->logger->inc(l_bluestore_onode_shard_misses); } else { onode->c->store->logger->inc(l_bluestore_onode_shard_hits); @@ -2771,15 +2771,15 @@ void BlueStore::ExtentMap::dirty_range( if (start < 0) return; - assert(last >= start); + ceph_assert(last >= start); while (start <= last) { - assert((size_t)start < shards.size()); + ceph_assert((size_t)start < shards.size()); auto p = &shards[start]; if (!p->loaded) { derr << __func__ << "on write 0x" << std::hex << offset << "~" << length << " shard 0x" << p->shard_info->offset << std::dec << " is not loaded, can't mark dirty" << dendl; - assert(0 == "can't mark unloaded shard dirty"); + ceph_assert(0 == "can't mark unloaded shard dirty"); } if (!p->dirty) { dout(20) << __func__ << " mark shard 0x" << std::hex @@ -2847,7 +2847,7 @@ int BlueStore::ExtentMap::compress_extent_map( --p; // start to the left of offset } // the caller should have just written to this region - assert(p != extent_map.end()); + ceph_assert(p != extent_map.end()); // identify the *next* shard auto pshard = shards.begin(); @@ -2883,7 +2883,7 @@ int BlueStore::ExtentMap::compress_extent_map( break; } if (n->logical_offset >= shard_end) { - assert(pshard != shards.end()); + ceph_assert(pshard != shards.end()); ++pshard; if (pshard != shards.end()) { shard_end = pshard->shard_info->offset; @@ -2925,7 +2925,7 @@ void BlueStore::ExtentMap::punch_hole( break; } else { // deref tail - assert(p->logical_end() > offset); // else seek_lextent bug + ceph_assert(p->logical_end() > offset); // else seek_lextent bug uint64_t keep = offset - p->logical_offset; OldExtent* oe = OldExtent::create(c, offset, p->blob_offset + keep, p->length - keep, p->blob); @@ -2962,7 +2962,7 @@ BlueStore::Extent *BlueStore::ExtentMap::set_lextent( old_extent_map_t *old_extents) { // We need to have completely initialized Blob to increment its ref counters. - assert(b->get_blob().get_logical_length() != 0); + ceph_assert(b->get_blob().get_logical_length() != 0); // Do get_ref prior to punch_hole to prevent from putting reused blob into // old_extents list if we overwre the blob totally @@ -3011,7 +3011,7 @@ BlueStore::BlobRef BlueStore::ExtentMap::split_blob( dout(30) << __func__ << " to " << *ne << dendl; } else { // switch blob - assert(ep->blob_offset >= blob_offset); + ceph_assert(ep->blob_offset >= blob_offset); ep->blob = rb; ep->blob_offset -= blob_offset; @@ -3048,8 +3048,8 @@ bool BlueStore::WriteContext::has_conflict( uint64_t loffs_end, uint64_t min_alloc_size) { - assert((loffs % min_alloc_size) == 0); - assert((loffs_end % min_alloc_size) == 0); + ceph_assert((loffs % min_alloc_size) == 0); + ceph_assert((loffs_end % min_alloc_size) == 0); for (auto w : writes) { if (b == w.b) { auto loffs2 = p2align(w.logical_offset, min_alloc_size); @@ -3076,7 +3076,7 @@ void BlueStore::DeferredBatch::prepare_write( { _discard(cct, offset, length); auto i = iomap.insert(make_pair(offset, deferred_io())); - assert(i.second); // this should be a new insertion + ceph_assert(i.second); // this should be a new insertion i.first->second.seq = seq; blp.copy(length, i.first->second.bl); i.first->second.bl.reassign_to_mempool( @@ -3107,7 +3107,7 @@ void BlueStore::DeferredBatch::_discard( << " 0x" << std::hex << p->first << "~" << p->second.bl.length() << " -> 0x" << head.length() << std::dec << dendl; auto i = seq_bytes.find(p->second.seq); - assert(i != seq_bytes.end()); + ceph_assert(i != seq_bytes.end()); if (end > offset + length) { bufferlist tail; tail.substr_of(p->second.bl, offset + length - p->first, @@ -3122,7 +3122,7 @@ void BlueStore::DeferredBatch::_discard( } else { i->second -= end - offset; } - assert(i->second >= 0); + ceph_assert(i->second >= 0); p->second.bl.swap(head); } ++p; @@ -3132,7 +3132,7 @@ void BlueStore::DeferredBatch::_discard( break; } auto i = seq_bytes.find(p->second.seq); - assert(i != seq_bytes.end()); + ceph_assert(i != seq_bytes.end()); auto end = p->first + p->second.bl.length(); if (end > offset + length) { unsigned drop_front = offset + length - p->first; @@ -3152,7 +3152,7 @@ void BlueStore::DeferredBatch::_discard( << std::dec << dendl; i->second -= p->second.bl.length(); } - assert(i->second >= 0); + ceph_assert(i->second >= 0); p = iomap.erase(p); } } @@ -3165,11 +3165,11 @@ void BlueStore::DeferredBatch::_audit(CephContext *cct) } uint64_t pos = 0; for (auto& p : iomap) { - assert(p.first >= pos); + ceph_assert(p.first >= pos); sb[p.second.seq] += p.second.bl.length(); pos = p.first + p.second.bl.length(); } - assert(sb == seq_bytes); + ceph_assert(sb == seq_bytes); } @@ -3197,7 +3197,7 @@ BlueStore::Collection::Collection(BlueStore *store_, Cache *c, coll_t cid) store->zombie_osr_set.erase(p); ldout(store->cct, 10) << "resurrecting zombie osr " << osr << dendl; osr->zombie = false; - assert(osr->shard == cid.hash_to_shard(store->m_finisher_num)); + ceph_assert(osr->shard == cid.hash_to_shard(store->m_finisher_num)); } } } @@ -3219,7 +3219,7 @@ void BlueStore::Collection::flush_all_but_last() void BlueStore::Collection::open_shared_blob(uint64_t sbid, BlobRef b) { - assert(!b->shared_blob); + ceph_assert(!b->shared_blob); const bluestore_blob_t& blob = b->get_blob(); if (!blob.is_shared()) { b->shared_blob = new SharedBlob(this); @@ -3252,7 +3252,7 @@ void BlueStore::Collection::load_shared_blob(SharedBlobRef sb) lderr(store->cct) << __func__ << " sbid 0x" << std::hex << sbid << std::dec << " not found at key " << pretty_binary_string(key) << dendl; - assert(0 == "uh oh, missing shared_blob"); + ceph_assert(0 == "uh oh, missing shared_blob"); } sb->loaded = true; @@ -3267,7 +3267,7 @@ void BlueStore::Collection::load_shared_blob(SharedBlobRef sb) void BlueStore::Collection::make_blob_shared(uint64_t sbid, BlobRef b) { ldout(store->cct, 10) << __func__ << " " << *b << dendl; - assert(!b->shared_blob->is_loaded()); + ceph_assert(!b->shared_blob->is_loaded()); // update blob bluestore_blob_t& blob = b->dirty_blob(); @@ -3290,7 +3290,7 @@ void BlueStore::Collection::make_blob_shared(uint64_t sbid, BlobRef b) uint64_t BlueStore::Collection::make_blob_unshared(SharedBlob *sb) { ldout(store->cct, 10) << __func__ << " " << *sb << dendl; - assert(sb->is_loaded()); + ceph_assert(sb->is_loaded()); uint64_t sbid = sb->get_sbid(); shared_blob_set.remove(sb); @@ -3305,7 +3305,7 @@ BlueStore::OnodeRef BlueStore::Collection::get_onode( const ghobject_t& oid, bool create) { - assert(create ? lock.is_wlocked() : lock.is_locked()); + ceph_assert(create ? lock.is_wlocked() : lock.is_locked()); spg_t pgid; if (cid.is_pg(&pgid)) { @@ -3331,7 +3331,7 @@ BlueStore::OnodeRef BlueStore::Collection::get_onode( ldout(store->cct, 20) << " r " << r << " v.len " << v.length() << dendl; Onode *on; if (v.length() == 0) { - assert(r == -ENOENT); + ceph_assert(r == -ENOENT); if (!store->cct->_conf->bluestore_debug_misc && !create) return OnodeRef(); @@ -3340,7 +3340,7 @@ BlueStore::OnodeRef BlueStore::Collection::get_onode( on = new Onode(this, oid, key); } else { // loaded - assert(r >= 0); + ceph_assert(r >= 0); on = new Onode(this, oid, key); on->exists = true; auto p = v.front().begin_deep(); @@ -3377,7 +3377,7 @@ void BlueStore::Collection::split_cache( int destbits = dest->cnode.bits; spg_t destpg; bool is_pg = dest->cid.is_pg(&destpg); - assert(is_pg); + ceph_assert(is_pg); auto p = onode_map.onode_map.begin(); while (p != onode_map.onode_map.end()) { @@ -3632,7 +3632,7 @@ void BlueStore::MempoolThread::_balance_cache( } } // assert if we assigned more memory than is available. - assert(mem_avail >= 0); + ceph_assert(mem_avail >= 0); // Finally commit the new cache sizes for (auto it = caches.begin(); it != caches.end(); it++) { @@ -3796,7 +3796,7 @@ int BlueStore::OmapIteratorImpl::next(bool validate) string BlueStore::OmapIteratorImpl::key() { RWLock::RLocker l(c->lock); - assert(it->valid()); + ceph_assert(it->valid()); string db_key = it->raw_key().second; string user_key; decode_omap_key(db_key, &user_key); @@ -3806,7 +3806,7 @@ string BlueStore::OmapIteratorImpl::key() bufferlist BlueStore::OmapIteratorImpl::value() { RWLock::RLocker l(c->lock); - assert(it->valid()); + ceph_assert(it->valid()); return it->value(); } @@ -3834,7 +3834,7 @@ static void discard_cb(void *priv, void *priv2) void BlueStore::handle_discard(interval_set& to_release) { dout(10) << __func__ << dendl; - assert(alloc); + ceph_assert(alloc); alloc->release(to_release); } @@ -3885,11 +3885,11 @@ BlueStore::~BlueStore() cct->_conf.remove_observer(this); _shutdown_logger(); - assert(!mounted); - assert(db == NULL); - assert(bluefs == NULL); - assert(fsid_fd < 0); - assert(path_fd < 0); + ceph_assert(!mounted); + ceph_assert(db == NULL); + ceph_assert(bluefs == NULL); + ceph_assert(fsid_fd < 0); + ceph_assert(path_fd < 0); for (auto i : cache_shards) { delete i; } @@ -4005,7 +4005,7 @@ void BlueStore::_set_compression() if (cct->_conf->bluestore_compression_min_blob_size) { comp_min_blob_size = cct->_conf->bluestore_compression_min_blob_size; } else { - assert(bdev); + ceph_assert(bdev); if (bdev->is_rotational()) { comp_min_blob_size = cct->_conf->bluestore_compression_min_blob_size_hdd; } else { @@ -4016,7 +4016,7 @@ void BlueStore::_set_compression() if (cct->_conf->bluestore_compression_max_blob_size) { comp_max_blob_size = cct->_conf->bluestore_compression_max_blob_size; } else { - assert(bdev); + ceph_assert(bdev); if (bdev->is_rotational()) { comp_max_blob_size = cct->_conf->bluestore_compression_max_blob_size_hdd; } else { @@ -4057,7 +4057,7 @@ void BlueStore::_set_throttle_params() if (cct->_conf->bluestore_throttle_cost_per_io) { throttle_cost_per_io = cct->_conf->bluestore_throttle_cost_per_io; } else { - assert(bdev); + ceph_assert(bdev); if (bdev->is_rotational()) { throttle_cost_per_io = cct->_conf->bluestore_throttle_cost_per_io_hdd; } else { @@ -4073,7 +4073,7 @@ void BlueStore::_set_blob_size() if (cct->_conf->bluestore_max_blob_size) { max_blob_size = cct->_conf->bluestore_max_blob_size; } else { - assert(bdev); + ceph_assert(bdev); if (bdev->is_rotational()) { max_blob_size = cct->_conf->bluestore_max_blob_size_hdd; } else { @@ -4090,7 +4090,7 @@ void BlueStore::_set_finisher_num() if (cct->_conf->osd_op_num_shards) { m_finisher_num = cct->_conf->osd_op_num_shards; } else { - assert(bdev); + ceph_assert(bdev); if (bdev->is_rotational()) { m_finisher_num = cct->_conf->osd_op_num_shards_hdd; } else { @@ -4098,12 +4098,12 @@ void BlueStore::_set_finisher_num() } } } - assert(m_finisher_num != 0); + ceph_assert(m_finisher_num != 0); } int BlueStore::_set_cache_sizes() { - assert(bdev); + ceph_assert(bdev); cache_autotune = cct->_conf.get_val("bluestore_cache_autotune"); cache_autotune_chunk_size = cct->_conf.get_val("bluestore_cache_autotune_chunk_size"); @@ -4175,7 +4175,7 @@ int BlueStore::write_meta(const std::string& key, const std::string& value) } label.meta[key] = value; r = _write_bdev_label(cct, p, label); - assert(r == 0); + ceph_assert(r == 0); return ObjectStore::write_meta(key, value); } @@ -4385,7 +4385,7 @@ int BlueStore::_open_path() << "; BlueStore has hard limit of 0x" << OBJECT_MAX_SIZE << "." << std::dec << dendl; return -EINVAL; } - assert(path_fd < 0); + ceph_assert(path_fd < 0); path_fd = TEMP_FAILURE_RETRY(::open(path.c_str(), O_DIRECTORY)); if (path_fd < 0) { int r = -errno; @@ -4410,7 +4410,7 @@ int BlueStore::_write_bdev_label(CephContext *cct, encode(label, bl); uint32_t crc = bl.crc32c(-1); encode(crc, bl); - assert(bl.length() <= BDEV_LABEL_BLOCK_SIZE); + ceph_assert(bl.length() <= BDEV_LABEL_BLOCK_SIZE); bufferptr z(BDEV_LABEL_BLOCK_SIZE - bl.length()); z.zero(); bl.append(std::move(z)); @@ -4518,7 +4518,7 @@ void BlueStore::_set_alloc_sizes(void) if (cct->_conf->bluestore_prefer_deferred_size) { prefer_deferred_size = cct->_conf->bluestore_prefer_deferred_size; } else { - assert(bdev); + ceph_assert(bdev); if (bdev->is_rotational()) { prefer_deferred_size = cct->_conf->bluestore_prefer_deferred_size_hdd; } else { @@ -4529,7 +4529,7 @@ void BlueStore::_set_alloc_sizes(void) if (cct->_conf->bluestore_deferred_batch_ops) { deferred_batch_ops = cct->_conf->bluestore_deferred_batch_ops; } else { - assert(bdev); + ceph_assert(bdev); if (bdev->is_rotational()) { deferred_batch_ops = cct->_conf->bluestore_deferred_batch_ops_hdd; } else { @@ -4548,7 +4548,7 @@ void BlueStore::_set_alloc_sizes(void) int BlueStore::_open_bdev(bool create) { - assert(bdev == NULL); + ceph_assert(bdev == NULL); string p = path + "/block"; bdev = BlockDevice::create(cct, p, aio_cb, static_cast(this), discard_cb, static_cast(this)); int r = bdev->open(p); @@ -4569,7 +4569,7 @@ int BlueStore::_open_bdev(bool create) block_size = bdev->get_block_size(); block_mask = ~(block_size - 1); block_size_order = ctz(block_size); - assert(block_size == 1u << block_size_order); + ceph_assert(block_size == 1u << block_size_order); // and set cache_size based on device type r = _set_cache_sizes(); if (r < 0) { @@ -4587,24 +4587,24 @@ int BlueStore::_open_bdev(bool create) void BlueStore::_validate_bdev() { - assert(bdev); - assert(min_alloc_size); // _get_odisk_reserved depends on that + ceph_assert(bdev); + ceph_assert(min_alloc_size); // _get_odisk_reserved depends on that uint64_t dev_size = bdev->get_size(); if (dev_size < _get_ondisk_reserved() + cct->_conf->bluestore_bluefs_min) { dout(1) << __func__ << " main device size " << byte_u_t(dev_size) << " is too small, disable bluestore_bluefs_min for now" << dendl; - assert(dev_size >= _get_ondisk_reserved()); + ceph_assert(dev_size >= _get_ondisk_reserved()); int r = cct->_conf.set_val("bluestore_bluefs_min", "0"); - assert(r == 0); + ceph_assert(r == 0); } } void BlueStore::_close_bdev() { - assert(bdev); + ceph_assert(bdev); bdev->close(); delete bdev; bdev = NULL; @@ -4612,7 +4612,7 @@ void BlueStore::_close_bdev() int BlueStore::_open_fm(bool create) { - assert(fm == NULL); + ceph_assert(fm == NULL); fm = FreelistManager::create(cct, freelist_type, db, PREFIX_ALLOC); if (create) { @@ -4626,7 +4626,7 @@ int BlueStore::_open_fm(bool create) } // being able to allocate in units less than bdev block size // seems to be a bad idea. - assert( cct->_conf->bdev_block_size <= (int64_t)min_alloc_size); + ceph_assert( cct->_conf->bdev_block_size <= (int64_t)min_alloc_size); fm->create(bdev->get_size(), (int64_t)min_alloc_size, t); // allocate superblock reserved space. note that we do not mark @@ -4636,7 +4636,7 @@ int BlueStore::_open_fm(bool create) fm->allocate(0, reserved, t); if (cct->_conf->bluestore_bluefs) { - assert(bluefs_extents.num_intervals() == 1); + ceph_assert(bluefs_extents.num_intervals() == 1); interval_set::iterator p = bluefs_extents.begin(); reserved = round_up_to(p.get_start() + p.get_len(), min_alloc_size); dout(20) << __func__ << " reserved 0x" << std::hex << reserved << std::dec @@ -4665,7 +4665,7 @@ int BlueStore::_open_fm(bool create) l = end - start; l = p2align(l, min_alloc_size); } - assert(start + l <= end); + ceph_assert(start + l <= end); uint64_t u = 1 + (uint64_t)(r * (double)l); u = p2roundup(u, min_alloc_size); @@ -4675,7 +4675,7 @@ int BlueStore::_open_fm(bool create) u = p2align(u, min_alloc_size); stop = true; } - assert(start + l + u <= end); + ceph_assert(start + l + u <= end); dout(20) << __func__ << " free 0x" << std::hex << start << "~" << l << " use 0x" << u << std::dec << dendl; @@ -4705,7 +4705,7 @@ int BlueStore::_open_fm(bool create) void BlueStore::_close_fm() { dout(10) << __func__ << dendl; - assert(fm); + ceph_assert(fm); fm->shutdown(); delete fm; fm = NULL; @@ -4713,8 +4713,8 @@ void BlueStore::_close_fm() int BlueStore::_open_alloc() { - assert(alloc == NULL); - assert(bdev->get_size()); + ceph_assert(alloc == NULL); + ceph_assert(bdev->get_size()); alloc = Allocator::create(cct, cct->_conf->bluestore_allocator, bdev->get_size(), min_alloc_size); @@ -4753,10 +4753,10 @@ int BlueStore::_open_alloc() void BlueStore::_close_alloc() { - assert(bdev); + ceph_assert(bdev); bdev->discard_drain(); - assert(alloc); + ceph_assert(alloc); alloc->shutdown(); delete alloc; alloc = NULL; @@ -4764,7 +4764,7 @@ void BlueStore::_close_alloc() int BlueStore::_open_fsid(bool create) { - assert(fsid_fd < 0); + ceph_assert(fsid_fd < 0); int flags = O_RDWR; if (create) flags |= O_CREAT; @@ -4910,7 +4910,7 @@ bool BlueStore::test_mount_in_use() int BlueStore::_open_db(bool create, bool to_repair_db) { int r; - assert(!db); + ceph_assert(!db); string fn = path + "/db"; string options; stringstream err; @@ -5035,7 +5035,7 @@ int BlueStore::_open_db(bool create, bool to_repair_db) uint64_t start = p2align((bdev->get_size() - initial) / 2, cct->_conf->bluefs_alloc_size); //avoiding superblock overwrite - assert(cct->_conf->bluefs_alloc_size > _get_ondisk_reserved()); + ceph_assert(cct->_conf->bluefs_alloc_size > _get_ondisk_reserved()); start = std::max(cct->_conf->bluefs_alloc_size, start); bluefs->add_block_extent(bluefs_shared_bdev, start, initial); @@ -5218,7 +5218,7 @@ int BlueStore::_open_db(bool create, bool to_repair_db) return 0; free_bluefs: - assert(bluefs); + ceph_assert(bluefs); delete bluefs; bluefs = NULL; return r; @@ -5226,7 +5226,7 @@ free_bluefs: void BlueStore::_close_db() { - assert(db); + ceph_assert(db); delete db; db = NULL; if (bluefs) { @@ -5241,7 +5241,7 @@ int BlueStore::_reconcile_bluefs_freespace() dout(10) << __func__ << dendl; interval_set bset; int r = bluefs->get_block_extents(bluefs_shared_bdev, &bset); - assert(r == 0); + ceph_assert(r == 0); if (bset == bluefs_extents) { dout(10) << __func__ << " we agree bluefs has 0x" << std::hex << bset << std::dec << dendl; @@ -5294,11 +5294,11 @@ void BlueStore::_dump_alloc_on_rebalance_failure() int BlueStore::_balance_bluefs_freespace(PExtentVector *extents) { int ret = 0; - assert(bluefs); + ceph_assert(bluefs); vector> bluefs_usage; // ... bluefs->get_usage(&bluefs_usage); - assert(bluefs_usage.size() > bluefs_shared_bdev); + ceph_assert(bluefs_usage.size() > bluefs_shared_bdev); // fixme: look at primary bdev only for now uint64_t bluefs_free = bluefs_usage[bluefs_shared_bdev].first; @@ -5436,7 +5436,7 @@ void BlueStore::_commit_bluefs_freespace( int BlueStore::_open_collections(int *errors) { dout(10) << __func__ << dendl; - assert(coll_map.empty()); + ceph_assert(coll_map.empty()); KeyValueDB::Iterator it = db->get_iterator(PREFIX_COLL); for (it->upper_bound(string()); it->valid(); @@ -5517,7 +5517,7 @@ int BlueStore::_setup_block_symlink_or_file( } string serial_number = epath.substr(strlen(SPDK_PREFIX)); r = ::write(fd, serial_number.c_str(), serial_number.size()); - assert(r == (int)serial_number.size()); + ceph_assert(r == (int)serial_number.size()); dout(1) << __func__ << " created " << name << " symlink to " << epath << dendl; VOID_TEMP_FAILURE_RETRY(::close(fd)); @@ -5667,7 +5667,7 @@ int BlueStore::mkfs() if (cct->_conf->bluestore_min_alloc_size) { min_alloc_size = cct->_conf->bluestore_min_alloc_size; } else { - assert(bdev); + ceph_assert(bdev); if (bdev->is_rotational()) { min_alloc_size = cct->_conf->bluestore_min_alloc_size_hdd; } else { @@ -5770,7 +5770,7 @@ void BlueStore::set_cache_shards(unsigned num) { dout(10) << __func__ << " " << num << dendl; size_t old = cache_shards.size(); - assert(num >= old); + ceph_assert(num >= old); cache_shards.resize(num); for (unsigned i = old; i < num; ++i) { cache_shards[i] = Cache::create(cct, cct->_conf->bluestore_cache_type, @@ -5893,7 +5893,7 @@ int BlueStore::_mount(bool kv_only, bool open_db) int BlueStore::umount() { - assert(_kv_only || mounted); + ceph_assert(_kv_only || mounted); dout(1) << __func__ << dendl; _osr_drain_all(); @@ -5963,7 +5963,7 @@ int BlueStore::_fsck_check_extents( apply( e.offset, e.length, granularity, used_blocks, [&](uint64_t pos, mempool_dynamic_bitset &bs) { - assert(pos < bs.size()); + ceph_assert(pos < bs.size()); if (bs.test(pos)) { if (repairer) { repairer->note_misreference( @@ -6122,7 +6122,7 @@ int BlueStore::_fsck(bool deep, bool repair) apply( 0, std::max(min_alloc_size, SUPER_RESERVED), fm->get_alloc_size(), used_blocks, [&](uint64_t pos, mempool_dynamic_bitset &bs) { - assert(pos < bs.size()); + ceph_assert(pos < bs.size()); bs.set(pos); } ); @@ -6135,7 +6135,7 @@ int BlueStore::_fsck(bool deep, bool repair) if (bluefs) { interval_set bset; r = bluefs->get_block_extents(bluefs_shared_bdev, &bset); - assert(r == 0); + ceph_assert(r == 0); if (!(bset == bluefs_extents)) { dout(10) << __func__ << " bluefs says 0x" << std::hex << bset << std::dec << dendl; @@ -6157,7 +6157,7 @@ int BlueStore::_fsck(bool deep, bool repair) apply( e.get_start(), e.get_len(), fm->get_alloc_size(), used_blocks, [&](uint64_t pos, mempool_dynamic_bitset &bs) { - assert(pos < bs.size()); + ceph_assert(pos < bs.size()); bs.set(pos); } ); @@ -6332,7 +6332,7 @@ int BlueStore::_fsck(bool deep, bool repair) } pos = l.logical_offset + l.length; expected_statfs.data_stored += l.length; - assert(l.blob); + ceph_assert(l.blob); const bluestore_blob_t& blob = l.blob->get_blob(); auto& ref = ref_map[l.blob]; @@ -6354,8 +6354,8 @@ int BlueStore::_fsck(bool deep, bool repair) pu = &p->second; } uint64_t blob_len = blob.get_logical_length(); - assert((blob_len % (sizeof(*pu)*8)) == 0); - assert(l.blob_offset + l.length <= blob_len); + ceph_assert((blob_len % (sizeof(*pu)*8)) == 0); + ceph_assert(l.blob_offset + l.length <= blob_len); uint64_t chunk_size = blob_len / (sizeof(*pu)*8); uint64_t start = l.blob_offset / chunk_size; uint64_t end = @@ -6432,7 +6432,7 @@ int BlueStore::_fsck(bool deep, bool repair) ++errors; } sb_info_t& sbi = sb_info[i.first->shared_blob->get_sbid()]; - assert(sbi.cid == coll_t() || sbi.cid == c->cid); + ceph_assert(sbi.cid == coll_t() || sbi.cid == c->cid); sbi.cid = c->cid; sbi.sb = i.first->shared_blob; sbi.oids.push_back(oid); @@ -6660,7 +6660,7 @@ int BlueStore::_fsck(bool deep, bool repair) if (r < 0) { derr << __func__ << " failed to read from 0x" << std::hex << e->offset <<"~" << e->length << std::dec << dendl; - assert(0 == "read failed, wtf"); + ceph_assert(0 == "read failed, wtf"); } pext_to_release.push_back(*e); e = pextents.erase(e); @@ -6669,7 +6669,7 @@ int BlueStore::_fsck(bool deep, bool repair) b_off, bl, [&](uint64_t offset, bufferlist& t) { int r = bdev->write(offset, t, false); - assert(r == 0); + ceph_assert(r == 0); }); e += exts.size() - 1; for (auto& p : exts) { @@ -6681,7 +6681,7 @@ int BlueStore::_fsck(bool deep, bool repair) b->dirty_blob().clear_flag(bluestore_blob_t::FLAG_SHARED); auto sb_it = sb_info.find(b->shared_blob->get_sbid()); - assert(sb_it != sb_info.end()); + ceph_assert(sb_it != sb_info.end()); sb_info_t& sbi = sb_it->second; for (auto& r : sbi.ref_map.ref_map) { @@ -6738,7 +6738,7 @@ int BlueStore::_fsck(bool deep, bool repair) if (repair && (!sbi.passed || sbi.updated)) { auto sbid = p.first; if (sbi.ref_map.empty()) { - assert(sbi.passed); + ceph_assert(sbi.passed); dout(20) << __func__ << " " << *sbi.sb << " is empty, removing" << dendl; repairer.fix_shared_blob(db, sbid, nullptr); @@ -6817,7 +6817,7 @@ int BlueStore::_fsck(bool deep, bool repair) apply( e.get_start(), e.get_len(), fm->get_alloc_size(), used_blocks, [&](uint64_t pos, mempool_dynamic_bitset &bs) { - assert(pos < bs.size()); + ceph_assert(pos < bs.size()); bs.set(pos); } ); @@ -6833,7 +6833,7 @@ int BlueStore::_fsck(bool deep, bool repair) apply( e.get_start(), e.get_len(), fm->get_alloc_size(), used_blocks, [&](uint64_t pos, mempool_dynamic_bitset &bs) { - assert(pos < bs.size()); + ceph_assert(pos < bs.size()); bs.reset(pos); } ); @@ -6845,7 +6845,7 @@ int BlueStore::_fsck(bool deep, bool repair) apply( offset, length, fm->get_alloc_size(), used_blocks, [&](uint64_t pos, mempool_dynamic_bitset &bs) { - assert(pos < bs.size()); + ceph_assert(pos < bs.size()); if (bs.test(pos)) { if (offset == SUPER_RESERVED && length == min_alloc_size - SUPER_RESERVED) { @@ -6880,7 +6880,7 @@ int BlueStore::_fsck(bool deep, bool repair) fm->enumerate_reset(); size_t count = used_blocks.count(); if (used_blocks.size() != count) { - assert(used_blocks.size() > count); + ceph_assert(used_blocks.size() > count); used_blocks.flip(); size_t start = used_blocks.find_first(); while (start != decltype(used_blocks)::npos) { @@ -6968,7 +6968,7 @@ void BlueStore::inject_leaked(uint64_t len) PExtentVector exts; int64_t alloc_len = alloc->allocate(len, min_alloc_size, min_alloc_size * 256, 0, &exts); - assert(alloc_len >= (int64_t)len); + ceph_assert(alloc_len >= (int64_t)len); for (auto& p : exts) { fm->allocate(p.offset, p.length, txn); } @@ -6980,11 +6980,11 @@ void BlueStore::inject_false_free(coll_t cid, ghobject_t oid) KeyValueDB::Transaction txn; OnodeRef o; CollectionRef c = _get_collection(cid); - assert(c); + ceph_assert(c); { RWLock::WLocker l(c->lock); // just to avoid internal asserts o = c->get_onode(oid, false); - assert(o); + ceph_assert(o); o->extent_map.fault_range(db, 0, OBJECT_MAX_SIZE); } @@ -7015,7 +7015,7 @@ void BlueStore::inject_false_free(coll_t cid, ghobject_t oid) } } } - assert(injected); + ceph_assert(injected); db->submit_transaction_sync(txn); } @@ -7032,20 +7032,20 @@ void BlueStore::inject_misreference(coll_t cid1, ghobject_t oid1, { OnodeRef o1; CollectionRef c1 = _get_collection(cid1); - assert(c1); + ceph_assert(c1); { RWLock::WLocker l(c1->lock); // just to avoid internal asserts o1 = c1->get_onode(oid1, false); - assert(o1); + ceph_assert(o1); o1->extent_map.fault_range(db, offset, OBJECT_MAX_SIZE); } OnodeRef o2; CollectionRef c2 = _get_collection(cid2); - assert(c2); + ceph_assert(c2); { RWLock::WLocker l(c2->lock); // just to avoid internal asserts o2 = c2->get_onode(oid2, false); - assert(o2); + ceph_assert(o2); o2->extent_map.fault_range(db, offset, OBJECT_MAX_SIZE); } Extent& e1 = *(o1->extent_map.seek_lextent(offset)); @@ -7053,13 +7053,13 @@ void BlueStore::inject_misreference(coll_t cid1, ghobject_t oid1, // require onode/extent layout to be the same (and simple) // to make things easier - assert(o1->onode.extent_map_shards.empty()); - assert(o2->onode.extent_map_shards.empty()); - assert(o1->extent_map.spanning_blob_map.size() == 0); - assert(o2->extent_map.spanning_blob_map.size() == 0); - assert(e1.logical_offset == e2.logical_offset); - assert(e1.length == e2.length); - assert(e1.blob_offset == e2.blob_offset); + ceph_assert(o1->onode.extent_map_shards.empty()); + ceph_assert(o2->onode.extent_map_shards.empty()); + ceph_assert(o1->extent_map.spanning_blob_map.size() == 0); + ceph_assert(o2->extent_map.spanning_blob_map.size() == 0); + ceph_assert(e1.logical_offset == e2.logical_offset); + ceph_assert(e1.length == e2.length); + ceph_assert(e1.blob_offset == e2.blob_offset); KeyValueDB::Transaction txn; txn = db->get_transaction(); @@ -7191,7 +7191,7 @@ void BlueStore::_reap_collections() CollectionRef c = *p; dout(10) << __func__ << " " << c << " " << c->cid << dendl; if (c->onode_map.map_any([&](OnodeRef o) { - assert(!o->exists); + ceph_assert(!o->exists); if (o->flushing_count.load()) { dout(10) << __func__ << " " << c << " " << c->cid << " " << o->oid << " flush_txns " << o->flushing_count << dendl; @@ -7486,7 +7486,7 @@ int BlueStore::_do_read( } else { l = b_len; if (pc != cache_res.end()) { - assert(pc->first > b_off); + ceph_assert(pc->first > b_off); l = pc->first - b_off; } dout(30) << __func__ << " will read 0x" << std::hex << pos << ": 0x" @@ -7540,7 +7540,7 @@ int BlueStore::_do_read( // propagate EIO to caller return r; } - assert(r == 0); + ceph_assert(r == 0); } } else { // read the pieces @@ -7586,9 +7586,9 @@ int BlueStore::_do_read( // propagate EIO to caller return r; } - assert(r == 0); + ceph_assert(r == 0); } - assert(reg.bl.length() == r_len); + ceph_assert(reg.bl.length() == r_len); } } } @@ -7598,7 +7598,7 @@ int BlueStore::_do_read( ioc.aio_wait(); r = ioc.get_return_value(); if (r < 0) { - assert(r == -EIO); // no other errors allowed + ceph_assert(r == -EIO); // no other errors allowed return -EIO; } } @@ -7612,7 +7612,7 @@ int BlueStore::_do_read( dout(20) << __func__ << " blob " << *bptr << std::hex << " need 0x" << b2r_it->second << std::dec << dendl; if (bptr->get_blob().is_compressed()) { - assert(p != compressed_blob_bls.end()); + ceph_assert(p != compressed_blob_bls.end()); bufferlist& compressed_bl = *p++; if (_verify_csum(o, &bptr->get_blob(), 0, compressed_bl, b2r_it->second.front().logical_offset) < 0) { @@ -7664,7 +7664,7 @@ int BlueStore::_do_read( } else { uint64_t l = length - pos; if (pr != pr_end) { - assert(pr->first > pos + offset); + ceph_assert(pr->first > pos + offset); l = pr->first - (pos + offset); } dout(30) << __func__ << " assemble 0x" << std::hex << pos @@ -7674,9 +7674,9 @@ int BlueStore::_do_read( pos += l; } } - assert(bl.length() == length); - assert(pos == length); - assert(pr == pr_end); + ceph_assert(bl.length() == length); + ceph_assert(pos == length); + ceph_assert(pr == pr_end); r = bl.length(); return r; } @@ -8030,10 +8030,10 @@ int BlueStore::_collection_list( get_object_key(cct, start, &k); if (start.hobj.is_temp()) { temp = true; - assert(k >= temp_start_key && k < temp_end_key); + ceph_assert(k >= temp_start_key && k < temp_end_key); } else { temp = false; - assert(k >= start_key && k < end_key); + ceph_assert(k >= start_key && k < end_key); } dout(20) << __func__ << " start from " << pretty_binary_string(k) << " temp=" << (int)temp << dendl; @@ -8080,7 +8080,7 @@ int BlueStore::_collection_list( } ghobject_t oid; int r = get_key_object(it->key(), &oid); - assert(r == 0); + ceph_assert(r == 0); dout(20) << __func__ << " oid " << oid << " end " << end << dendl; if (ls->size() >= (unsigned)max) { dout(20) << __func__ << " reached max " << max << dendl; @@ -8362,7 +8362,7 @@ void BlueStore::_prepare_ondisk_format_super(KeyValueDB::Transaction& t) dout(10) << __func__ << " ondisk_format " << ondisk_format << " min_compat_ondisk_format " << min_compat_ondisk_format << dendl; - assert(ondisk_format == latest_ondisk_format); + ceph_assert(ondisk_format == latest_ondisk_format); { bufferlist bl; encode(ondisk_format, bl); @@ -8421,7 +8421,7 @@ int BlueStore::_open_super_meta() freelist_type = std::string(bl.c_str(), bl.length()); dout(10) << __func__ << " freelist_type " << freelist_type << dendl; } else { - assert("Not Support extent freelist manager" == 0); + ceph_assert("Not Support extent freelist manager" == 0); } } @@ -8464,7 +8464,7 @@ int BlueStore::_open_super_meta() bl.clear(); { r = db->get(PREFIX_SUPER, "min_compat_ondisk_format", &bl); - assert(!r); + ceph_assert(!r); auto p = bl.cbegin(); try { decode(compat_ondisk_format, p); @@ -8501,7 +8501,7 @@ int BlueStore::_open_super_meta() decode(val, p); min_alloc_size = val; min_alloc_size_order = ctz(val); - assert(min_alloc_size == 1u << min_alloc_size_order); + ceph_assert(min_alloc_size == 1u << min_alloc_size_order); } catch (buffer::error& e) { derr << __func__ << " unable to read min_alloc_size" << dendl; return -EIO; @@ -8527,8 +8527,8 @@ int BlueStore::_upgrade_super() { dout(1) << __func__ << " from " << ondisk_format << ", latest " << latest_ondisk_format << dendl; - assert(ondisk_format > 0); - assert(ondisk_format < latest_ondisk_format); + ceph_assert(ondisk_format > 0); + ceph_assert(ondisk_format < latest_ondisk_format); if (ondisk_format == 1) { // changes: @@ -8556,7 +8556,7 @@ int BlueStore::_upgrade_super() ondisk_format = 2; _prepare_ondisk_format_super(t); int r = db->submit_transaction_sync(t); - assert(r == 0); + ceph_assert(r == 0); } // done @@ -8567,7 +8567,7 @@ int BlueStore::_upgrade_super() void BlueStore::_assign_nid(TransContext *txc, OnodeRef o) { if (o->onode.nid) { - assert(o->exists); + ceph_assert(o->exists); return; } uint64_t nid = ++nid_last; @@ -8688,7 +8688,7 @@ void BlueStore::_txc_state_proc(TransContext *txc) } else { txc->state = TransContext::STATE_KV_SUBMITTED; int r = cct->_conf->bluestore_debug_omit_kv_commit ? 0 : db->submit_transaction(txc->t); - assert(r == 0); + ceph_assert(r == 0); _txc_applied_kv(txc); } } @@ -8732,7 +8732,7 @@ void BlueStore::_txc_state_proc(TransContext *txc) default: derr << __func__ << " unexpected txc " << txc << " state " << txc->get_state_name() << dendl; - assert(0 == "unexpected txc state"); + ceph_assert(0 == "unexpected txc state"); return; } } @@ -8908,7 +8908,7 @@ void BlueStore::_txc_committed_kv(TransContext *txc) void BlueStore::_txc_finish(TransContext *txc) { dout(20) << __func__ << " " << txc << " onodes " << txc->onodes << dendl; - assert(txc->state == TransContext::STATE_FINISHING); + ceph_assert(txc->state == TransContext::STATE_FINISHING); for (auto& sb : txc->shared_blobs_written) { sb->finish_write(txc->seq); @@ -9018,7 +9018,7 @@ void BlueStore::_osr_register_zombie(OpSequencer *osr) dout(10) << __func__ << " " << osr << " " << osr->cid << dendl; osr->zombie = true; auto i = zombie_osr_set.emplace(osr->cid, osr); - assert(i.second); // this should be a new insertion + ceph_assert(i.second); // this should be a new insertion } void BlueStore::_osr_drain_preceding(TransContext *txc) @@ -9091,11 +9091,11 @@ void BlueStore::_osr_drain_all() for (auto& osr : zombies) { if (zombie_osr_set.erase(osr->cid)) { dout(10) << __func__ << " reaping empty zombie osr " << osr << dendl; - assert(osr->q.empty()); + ceph_assert(osr->q.empty()); } else if (osr->zombie) { dout(10) << __func__ << " empty zombie osr " << osr << " already reaped" << dendl; - assert(osr->q.empty()); + ceph_assert(osr->q.empty()); } else { dout(10) << __func__ << " empty zombie osr " << osr << " resurrected" << dendl; @@ -9147,7 +9147,7 @@ void BlueStore::_kv_stop() } kv_sync_thread.join(); kv_finalize_thread.join(); - assert(removed_collections.empty()); + ceph_assert(removed_collections.empty()); { std::lock_guard l(kv_lock); kv_stop = false; @@ -9171,11 +9171,11 @@ void BlueStore::_kv_sync_thread() dout(10) << __func__ << " start" << dendl; deque deferred_stable_queue; ///< deferred ios done + stable std::unique_lock l(kv_lock); - assert(!kv_sync_started); + ceph_assert(!kv_sync_started); kv_sync_started = true; kv_cond.notify_all(); while (true) { - assert(kv_committing.empty()); + ceph_assert(kv_committing.empty()); if (kv_queue.empty() && ((deferred_done_queue.empty() && deferred_stable_queue.empty()) || !deferred_aggressive)) { @@ -9277,7 +9277,7 @@ void BlueStore::_kv_sync_thread() if (txc->state == TransContext::STATE_KV_QUEUED) { txc->log_state_latency(logger, l_bluestore_state_kv_queued_lat); int r = cct->_conf->bluestore_debug_omit_kv_commit ? 0 : db->submit_transaction(txc->t); - assert(r == 0); + ceph_assert(r == 0); _txc_applied_kv(txc); --txc->osr->kv_committing_serially; txc->state = TransContext::STATE_KV_SUBMITTED; @@ -9287,7 +9287,7 @@ void BlueStore::_kv_sync_thread() } } else { - assert(txc->state == TransContext::STATE_KV_SUBMITTED); + ceph_assert(txc->state == TransContext::STATE_KV_SUBMITTED); txc->log_state_latency(logger, l_bluestore_state_kv_queued_lat); } if (txc->had_ios) { @@ -9309,7 +9309,7 @@ void BlueStore::_kv_sync_thread() ceph::make_timespan(cct->_conf->bluestore_bluefs_balance_interval)) { bluefs_last_balance = after_flush; int r = _balance_bluefs_freespace(&bluefs_gift_extents); - assert(r >= 0); + ceph_assert(r >= 0); if (r > 0) { for (auto& p : bluefs_gift_extents) { bluefs_extents.insert(p.offset, p.length); @@ -9326,7 +9326,7 @@ void BlueStore::_kv_sync_thread() for (auto b : deferred_stable) { for (auto& txc : b->txcs) { bluestore_deferred_transaction_t& wt = *txc.deferred_txn; - assert(wt.released.empty()); // only kraken did this + ceph_assert(wt.released.empty()); // only kraken did this string key; get_deferred_key(wt.seq, &key); synct->rm_single_key(PREFIX_DEFERRED, key); @@ -9335,7 +9335,7 @@ void BlueStore::_kv_sync_thread() // submit synct synchronously (block and wait for it to commit) int r = cct->_conf->bluestore_debug_omit_kv_commit ? 0 : db->submit_transaction_sync(synct); - assert(r == 0); + ceph_assert(r == 0); { std::unique_lock m(kv_finalize_lock); @@ -9412,12 +9412,12 @@ void BlueStore::_kv_finalize_thread() deque deferred_stable; dout(10) << __func__ << " start" << dendl; std::unique_lock l(kv_finalize_lock); - assert(!kv_finalize_started); + ceph_assert(!kv_finalize_started); kv_finalize_started = true; kv_finalize_cond.notify_all(); while (true) { - assert(kv_committed.empty()); - assert(deferred_stable.empty()); + ceph_assert(kv_committed.empty()); + ceph_assert(deferred_stable.empty()); if (kv_committing_to_finalize.empty() && deferred_stable_to_finalize.empty()) { if (kv_finalize_stop) @@ -9436,7 +9436,7 @@ void BlueStore::_kv_finalize_thread() while (!kv_committed.empty()) { TransContext *txc = kv_committed.front(); - assert(txc->state == TransContext::STATE_KV_SUBMITTED); + ceph_assert(txc->state == TransContext::STATE_KV_SUBMITTED); _txc_state_proc(txc); kv_committed.pop_front(); } @@ -9500,7 +9500,7 @@ void BlueStore::_deferred_queue(TransContext *txc) bluestore_deferred_transaction_t& wt = *txc->deferred_txn; for (auto opi = wt.ops.begin(); opi != wt.ops.end(); ++opi) { const auto& op = *opi; - assert(op.op == bluestore_deferred_op_t::OP_WRITE); + ceph_assert(op.op == bluestore_deferred_op_t::OP_WRITE); bufferlist::const_iterator p = op.data.begin(); for (auto e : op.extents) { txc->osr->deferred_pending->prepare_write( @@ -9545,12 +9545,12 @@ void BlueStore::_deferred_submit_unlock(OpSequencer *osr) dout(10) << __func__ << " osr " << osr << " " << osr->deferred_pending->iomap.size() << " ios pending " << dendl; - assert(osr->deferred_pending); - assert(!osr->deferred_running); + ceph_assert(osr->deferred_pending); + ceph_assert(!osr->deferred_running); auto b = osr->deferred_pending; deferred_queue_size -= b->seq_bytes.size(); - assert(deferred_queue_size >= 0); + ceph_assert(deferred_queue_size >= 0); osr->deferred_running = osr->deferred_pending; osr->deferred_pending = nullptr; @@ -9573,7 +9573,7 @@ void BlueStore::_deferred_submit_unlock(OpSequencer *osr) logger->inc(l_bluestore_deferred_write_ops); logger->inc(l_bluestore_deferred_write_bytes, bl.length()); int r = bdev->aio_write(start, bl, &b->ioc, false); - assert(r == 0); + ceph_assert(r == 0); } } if (i == b->iomap.end()) { @@ -9608,12 +9608,12 @@ struct C_DeferredTrySubmit : public Context { void BlueStore::_deferred_aio_finish(OpSequencer *osr) { dout(10) << __func__ << " osr " << osr << dendl; - assert(osr->deferred_running); + ceph_assert(osr->deferred_running); DeferredBatch *b = osr->deferred_running; { std::lock_guard l(deferred_lock); - assert(osr->deferred_running == b); + ceph_assert(osr->deferred_running == b); osr->deferred_running = nullptr; if (!osr->deferred_pending) { dout(20) << __func__ << " dequeueing" << dendl; @@ -9836,7 +9836,7 @@ void BlueStore::_txc_add_transaction(TransContext *txc, Transaction *t) case Transaction::OP_MKCOLL: { - assert(!c); + ceph_assert(!c); const coll_t &cid = i.get_cid(op->cid); r = _create_collection(txc, cid, op->split_bits, &c); if (!r) @@ -9845,7 +9845,7 @@ void BlueStore::_txc_add_transaction(TransContext *txc, Transaction *t) break; case Transaction::OP_SPLIT_COLLECTION: - assert(0 == "deprecated"); + ceph_assert(0 == "deprecated"); break; case Transaction::OP_SPLIT_COLLECTION2: @@ -9889,7 +9889,7 @@ void BlueStore::_txc_add_transaction(TransContext *txc, Transaction *t) break; case Transaction::OP_COLL_RENAME: - assert(0 == "not implemented"); + ceph_assert(0 == "not implemented"); break; } if (r < 0) { @@ -9897,7 +9897,7 @@ void BlueStore::_txc_add_transaction(TransContext *txc, Transaction *t) << " not handled on operation " << op->op << " (op " << pos << ", counting from 0)" << dendl; _dump_transaction<0>(t); - assert(0 == "unexpected error"); + ceph_assert(0 == "unexpected error"); } // these operations implicity create the object @@ -10007,7 +10007,7 @@ void BlueStore::_txc_add_transaction(TransContext *txc, Transaction *t) break; case Transaction::OP_CLONERANGE: - assert(0 == "deprecated"); + ceph_assert(0 == "deprecated"); break; case Transaction::OP_CLONERANGE2: @@ -10025,21 +10025,21 @@ void BlueStore::_txc_add_transaction(TransContext *txc, Transaction *t) break; case Transaction::OP_COLL_ADD: - assert(0 == "not implemented"); + ceph_assert(0 == "not implemented"); break; case Transaction::OP_COLL_REMOVE: - assert(0 == "not implemented"); + ceph_assert(0 == "not implemented"); break; case Transaction::OP_COLL_MOVE: - assert(0 == "deprecated"); + ceph_assert(0 == "deprecated"); break; case Transaction::OP_COLL_MOVE_RENAME: case Transaction::OP_TRY_RENAME: { - assert(op->cid == op->dest_cid); + ceph_assert(op->cid == op->dest_cid); const ghobject_t& noid = i.get_oid(op->dest_oid); OnodeRef& no = ovec[op->dest_oid]; if (!no) { @@ -10141,7 +10141,7 @@ void BlueStore::_txc_add_transaction(TransContext *txc, Transaction *t) << dendl; derr << msg << dendl; _dump_transaction<0>(t); - assert(0 == "unexpected error"); + ceph_assert(0 == "unexpected error"); } } } @@ -10200,7 +10200,7 @@ void BlueStore::_dump_extent_map(ExtentMap &em) } for (auto& e : em.extent_map) { dout(LogLevelV) << __func__ << " " << e << dendl; - assert(e.logical_offset >= pos); + ceph_assert(e.logical_offset >= pos); pos = e.logical_offset + e.length; const bluestore_blob_t& blob = e.blob->get_blob(); if (blob.has_csum()) { @@ -10270,9 +10270,9 @@ void BlueStore::_pad_zeros( uint64_t end = *offset + length; unsigned back_copy = end % chunk_size; if (back_copy) { - assert(back_pad == 0); + ceph_assert(back_pad == 0); back_pad = chunk_size - back_copy; - assert(back_copy <= length); + ceph_assert(back_copy <= length); bufferptr tail(chunk_size); bl->copy(length - back_copy, back_copy, tail.c_str()); tail.zero(back_copy, back_pad, false); @@ -10291,7 +10291,7 @@ void BlueStore::_pad_zeros( *_dout << dendl; if (pad_count) logger->inc(l_bluestore_write_pad_bytes, pad_count); - assert(bl->length() == length); + ceph_assert(bl->length() == length); } void BlueStore::_do_write_small( @@ -10304,7 +10304,7 @@ void BlueStore::_do_write_small( { dout(10) << __func__ << " 0x" << std::hex << offset << "~" << length << std::dec << dendl; - assert(length < min_alloc_size); + ceph_assert(length < min_alloc_size); uint64_t end_offs = offset + length; logger->inc(l_bluestore_write_small); @@ -10452,7 +10452,7 @@ void BlueStore::_do_write_small( bufferlist head_bl; int r = _do_read(c.get(), o, offset - head_pad - head_read, head_read, head_bl, 0); - assert(r >= 0 && r <= (int)head_read); + ceph_assert(r >= 0 && r <= (int)head_read); size_t zlen = head_read - r; if (zlen) { head_bl.append_zero(zlen); @@ -10466,7 +10466,7 @@ void BlueStore::_do_write_small( bufferlist tail_bl; int r = _do_read(c.get(), o, offset + length + tail_pad, tail_read, tail_bl, 0); - assert(r >= 0 && r <= (int)tail_read); + ceph_assert(r >= 0 && r <= (int)tail_read); size_t zlen = tail_read - r; if (zlen) { tail_bl.append_zero(zlen); @@ -10488,7 +10488,7 @@ void BlueStore::_do_write_small( op->extents.emplace_back(bluestore_pextent_t(offset, length)); return 0; }); - assert(r == 0); + ceph_assert(r == 0); if (b->get_blob().csum_type) { b->dirty_blob().calc_csum(b_off, bl); } @@ -10509,7 +10509,7 @@ void BlueStore::_do_write_small( max_bsize, offset0 - bstart, &alloc_len)) { - assert(alloc_len == min_alloc_size); // expecting data always + ceph_assert(alloc_len == min_alloc_size); // expecting data always // fit into reused blob // Need to check for pending writes desiring to // reuse the same pextent. The rationale is that during GC two chunks @@ -10555,7 +10555,7 @@ void BlueStore::_do_write_small( max_bsize, offset0 - bstart, &alloc_len)) { - assert(alloc_len == min_alloc_size); // expecting data always + ceph_assert(alloc_len == min_alloc_size); // expecting data always // fit into reused blob // Need to check for pending writes desiring to // reuse the same pextent. The rationale is that during GC two chunks @@ -10763,13 +10763,13 @@ int BlueStore::_do_alloc_write( auto start = mono_clock::now(); // compress - assert(wi.b_off == 0); - assert(wi.blob_length == wi.bl.length()); + ceph_assert(wi.b_off == 0); + ceph_assert(wi.blob_length == wi.bl.length()); // FIXME: memory alignment here is bad bufferlist t; int r = c->compress(wi.bl, t); - assert(r == 0); + ceph_assert(r == 0); bluestore_compression_header_t chdr; chdr.type = c->get_type(); @@ -10824,7 +10824,7 @@ int BlueStore::_do_alloc_write( << dendl; return -ENOSPC; } - assert(prealloc_left == (int64_t)need); + ceph_assert(prealloc_left == (int64_t)need); dout(20) << __func__ << " prealloc " << prealloc << dendl; auto prealloc_pos = prealloc.begin(); @@ -10843,7 +10843,7 @@ int BlueStore::_do_alloc_write( dblob.set_compressed(wi.blob_length, wi.compressed_len); } else if (wi.new_blob) { // initialize newly created blob only - assert(dblob.is_mutable()); + ceph_assert(dblob.is_mutable()); unsigned csum_order; if (l->length() != wi.blob_length) { // hrm, maybe we could do better here, but let's not bother. @@ -10862,7 +10862,7 @@ int BlueStore::_do_alloc_write( suggested_boff > b_off) { dout(20) << __func__ << " forcing blob_offset to 0x" << std::hex << suggested_boff << std::dec << dendl; - assert(suggested_boff >= b_off); + ceph_assert(suggested_boff >= b_off); csum_length += suggested_boff - b_off; b_off = suggested_boff; } @@ -10879,7 +10879,7 @@ int BlueStore::_do_alloc_write( PExtentVector extents; int64_t left = final_length; while (left > 0) { - assert(prealloc_left > 0); + ceph_assert(prealloc_left > 0); if (prealloc_pos->length <= left) { prealloc_left -= prealloc_pos->length; left -= prealloc_pos->length; @@ -10940,7 +10940,7 @@ int BlueStore::_do_alloc_write( op->extents.emplace_back(bluestore_pextent_t(offset, length)); return 0; }); - assert(r == 0); + ceph_assert(r == 0); op->data = *l; } else { b->get_blob().map_bl( @@ -10951,8 +10951,8 @@ int BlueStore::_do_alloc_write( } } } - assert(prealloc_pos == prealloc.end()); - assert(prealloc_left == 0); + ceph_assert(prealloc_pos == prealloc.end()); + ceph_assert(prealloc_left == 0); return 0; } @@ -10992,7 +10992,7 @@ void BlueStore::_wctx_finish( unshare_ptr); } if (unshare) { - assert(maybe_unshared_blobs); + ceph_assert(maybe_unshared_blobs); maybe_unshared_blobs->insert(b->shared_blob.get()); } dout(20) << __func__ << " shared_blob release " << final @@ -11193,7 +11193,7 @@ int BlueStore::_do_gc( ++it) { bufferlist bl; int r = _do_read(c.get(), o, it->offset, it->length, bl, 0); - assert(r == (int)it->length); + ceph_assert(r == (int)it->length); _do_write_data(txc, c, o, it->offset, it->length, bl, &wctx_gc); logger->inc(l_bluestore_gc_merged, it->length); @@ -12007,7 +12007,7 @@ int BlueStore::_rename(TransContext *txc, r = -EEXIST; goto out; } - assert(txc->onodes.count(newo) == 0); + ceph_assert(txc->onodes.count(newo) == 0); } txc->t->rmkey(PREFIX_OBJ, oldo->key.c_str(), oldo->key.size()); @@ -12060,7 +12060,7 @@ int BlueStore::_create_collection( goto out; } auto p = new_coll_map.find(cid); - assert(p != new_coll_map.end()); + ceph_assert(p != new_coll_map.end()); *c = p->second; (*c)->cnode.bits = bits; coll_map[cid] = *c; @@ -12089,7 +12089,7 @@ int BlueStore::_remove_collection(TransContext *txc, const coll_t &cid, goto out; } size_t nonexistent_count = 0; - assert((*c)->exists); + ceph_assert((*c)->exists); if ((*c)->onode_map.map_any([&](OnodeRef o) { if (o->exists) { dout(10) << __func__ << " " << o->oid << " " << o @@ -12172,21 +12172,21 @@ int BlueStore::_split_collection(TransContext *txc, spg_t pgid, dest_pgid; bool is_pg = c->cid.is_pg(&pgid); - assert(is_pg); + ceph_assert(is_pg); is_pg = d->cid.is_pg(&dest_pgid); - assert(is_pg); + ceph_assert(is_pg); // the destination should initially be empty. - assert(d->onode_map.empty()); - assert(d->shared_blob_set.empty()); - assert(d->cnode.bits == bits); + ceph_assert(d->onode_map.empty()); + ceph_assert(d->shared_blob_set.empty()); + ceph_assert(d->cnode.bits == bits); c->split_cache(d.get()); // adjust bits. note that this will be redundant for all but the first // split call for this parent (first child). c->cnode.bits = bits; - assert(d->cnode.bits == bits); + ceph_assert(d->cnode.bits == bits); r = 0; bufferlist bl; @@ -12378,7 +12378,7 @@ void BlueStore::_flush_cache() dout(10) << __func__ << dendl; for (auto i : cache_shards) { i->trim_all(); - assert(i->empty()); + ceph_assert(i->empty()); } for (auto& p : coll_map) { if (!p.second->onode_map.empty()) { @@ -12389,8 +12389,8 @@ void BlueStore::_flush_cache() derr << __func__ << " stray shared blobs on " << p.first << dendl; p.second->shared_blob_set.dump<0>(cct); } - assert(p.second->onode_map.empty()); - assert(p.second->shared_blob_set.empty()); + ceph_assert(p.second->onode_map.empty()); + ceph_assert(p.second->shared_blob_set.empty()); } coll_map.clear(); } @@ -12478,10 +12478,10 @@ void BlueStore::_record_onode(OnodeRef &o, KeyValueDB::Transaction &txn) size_t BlueStoreRepairer::StoreSpaceTracker::filter_out( const interval_set& extents) { - assert(granularity); // initialized + ceph_assert(granularity); // initialized // can't call for the second time - assert(!was_filtered_out); - assert(collections_bfs.size() == objects_bfs.size()); + ceph_assert(!was_filtered_out); + ceph_assert(collections_bfs.size() == objects_bfs.size()); uint64_t prev_pos = 0; uint64_t npos = collections_bfs.size(); @@ -12496,7 +12496,7 @@ size_t BlueStoreRepairer::StoreSpaceTracker::filter_out( uint64_t pos = max(e.first / granularity, prev_pos); uint64_t end_pos = 1 + (e.first + e.second - 1) / granularity; while (pos != npos && pos < end_pos) { - assert( collections_bfs[pos].element_count() == + ceph_assert( collections_bfs[pos].element_count() == objects_bfs[pos].element_count()); if (collections_bfs[pos].element_count()) { collections_reduced.push_back(std::move(collections_bfs[pos])); @@ -12593,7 +12593,7 @@ bool BlueStoreRepairer::preprocess_misreference(KeyValueDB *db) { if (misreferenced_extents.size()) { size_t n = space_usage_tracker.filter_out(misreferenced_extents); - assert(n > 0); + ceph_assert(n > 0); if (!fix_misreferences_txn) { fix_misreferences_txn = db->get_transaction(); } diff --git a/src/os/bluestore/BlueStore.h b/src/os/bluestore/BlueStore.h index 90651f95a261d..ae776a099e080 100644 --- a/src/os/bluestore/BlueStore.h +++ b/src/os/bluestore/BlueStore.h @@ -218,7 +218,7 @@ public: } void truncate(uint32_t newlen) { - assert(newlen < length); + ceph_assert(newlen < length); if (data.length()) { bufferlist t; t.substr_of(data, 0, newlen); @@ -263,8 +263,8 @@ public: state_list_t writing; ///< writing buffers, sorted by seq, ascending ~BufferSpace() { - assert(buffer_map.empty()); - assert(writing.empty()); + ceph_assert(buffer_map.empty()); + ceph_assert(writing.empty()); } void _add_buffer(Cache* cache, Buffer *b, int level, Buffer *near) { @@ -280,7 +280,7 @@ public: ++it; } - assert(it->seq >= b->seq); + ceph_assert(it->seq >= b->seq); // note that this will insert b before it // hence the order is maintained writing.insert(it, *b); @@ -296,7 +296,7 @@ public: } void _rm_buffer(Cache* cache, map>::iterator p) { - assert(p != buffer_map.end()); + ceph_assert(p != buffer_map.end()); cache->_audit("_rm_buffer start"); if (p->second->is_writing()) { writing.erase(writing.iterator_to(*p->second)); @@ -359,7 +359,7 @@ public: f->open_array_section("buffers"); for (auto& i : buffer_map) { f->open_object_section("buffer"); - assert(i.first == i.second->offset); + ceph_assert(i.first == i.second->offset); i.second->dump(f); f->close_section(); } @@ -456,7 +456,7 @@ public: void remove(SharedBlob *sb) { std::lock_guard l(lock); - assert(sb->get_parent() == this); + ceph_assert(sb->get_parent() == this); // only remove if it still points to us auto p = sb_map.find(sb->get_sbid()); if (p != sb_map.end() && @@ -576,7 +576,7 @@ public: if (blob_bl.length() == 0 ) { encode(blob, blob_bl); } else { - assert(blob_bl.length()); + ceph_assert(blob_bl.length()); } } void bound_encode( @@ -676,7 +676,7 @@ public: } void assign_blob(const BlobRef& b) { - assert(!blob); + ceph_assert(!blob); blob = b; blob->shared_blob->get_cache()->add_extent(); } @@ -803,7 +803,7 @@ public: BlobRef get_spanning_blob(int id) { auto p = spanning_blob_map.find(id); - assert(p != spanning_blob_map.end()); + ceph_assert(p != spanning_blob_map.end()); return p->second; } @@ -846,7 +846,7 @@ public: return false; } int s = seek_shard(offset); - assert(s >= 0); + ceph_assert(s >= 0); if (s == (int)shards.size() - 1) { return false; // last shard } @@ -1175,7 +1175,7 @@ public: buffer_size += b->length; } void _rm_buffer(Buffer *b) override { - assert(buffer_size >= b->length); + ceph_assert(buffer_size >= b->length); buffer_size -= b->length; auto q = buffer_lru.iterator_to(*b); buffer_lru.erase(q); @@ -1185,7 +1185,7 @@ public: _add_buffer(b, 0, nullptr); } void _adjust_buffer_size(Buffer *b, int64_t delta) override { - assert((int64_t)buffer_size + delta >= 0); + ceph_assert((int64_t)buffer_size + delta >= 0); buffer_size += delta; } void _touch_buffer(Buffer *b) override { @@ -1279,7 +1279,7 @@ public: break; case BUFFER_WARM_OUT: // move from warm_out to hot LRU - assert(0 == "this happens via discard hint"); + ceph_assert(0 == "this happens via discard hint"); break; case BUFFER_HOT: // move to front of hot LRU @@ -1703,7 +1703,7 @@ public: store(store), cid(c) { } ~OpSequencer() { - assert(q.empty()); + ceph_assert(q.empty()); } void queue_new(TransContext *txc) { @@ -1726,7 +1726,7 @@ public: bool _is_all_kv_submitted() { // caller must hold qlock & q.empty() must not empty - assert(!q.empty()); + ceph_assert(!q.empty()); TransContext *txc = &q.back(); if (txc->state >= TransContext::STATE_KV_SUBMITTED) { return true; @@ -2075,7 +2075,7 @@ private: void *entry() override; void init() { - assert(stop == false); + ceph_assert(stop == false); create("bstore_mempool"); } void shutdown() { @@ -2515,7 +2515,7 @@ public: uint64_t offset); void compact() override { - assert(db); + ceph_assert(db); db->compact(); } bool has_builtin_csum() const override { @@ -2855,9 +2855,9 @@ public: void init(uint64_t total, uint64_t min_alloc_size, uint64_t mem_cap = DEF_MEM_CAP) { - assert(!granularity); // not initialized yet - assert(min_alloc_size && isp2(min_alloc_size)); - assert(mem_cap); + ceph_assert(!granularity); // not initialized yet + ceph_assert(min_alloc_size && isp2(min_alloc_size)); + ceph_assert(mem_cap); total = round_up_to(total, min_alloc_size); granularity = total * BLOOM_FILTER_TABLE_SIZE * 2 / mem_cap; @@ -2885,10 +2885,10 @@ public: } inline void set_used(uint64_t offset, uint64_t len, const coll_t& cid, const ghobject_t& oid) { - assert(granularity); // initialized + ceph_assert(granularity); // initialized // can't call this func after filter_out has been apllied - assert(!was_filtered_out); + ceph_assert(!was_filtered_out); if (!len) { return; } @@ -2906,7 +2906,7 @@ public: // determines if collection's present after filtering-out inline bool is_used(const coll_t& cid) const { - assert(was_filtered_out); + ceph_assert(was_filtered_out); for(auto& bf : collections_bfs) { if (bf.contains(get_hash(cid))) { return true; @@ -2916,7 +2916,7 @@ public: } // determines if object's present after filtering-out inline bool is_used(const ghobject_t& oid) const { - assert(was_filtered_out); + ceph_assert(was_filtered_out); for(auto& bf : objects_bfs) { if (bf.contains(oid.hobj.get_hash())) { return true; @@ -2926,8 +2926,8 @@ public: } // determines if collection's present before filtering-out inline bool is_used(const coll_t& cid, uint64_t offs) const { - assert(granularity); // initialized - assert(!was_filtered_out); + ceph_assert(granularity); // initialized + ceph_assert(!was_filtered_out); auto &bf = collections_bfs[offs / granularity]; if (bf.contains(get_hash(cid))) { return true; @@ -2936,8 +2936,8 @@ public: } // determines if object's present before filtering-out inline bool is_used(const ghobject_t& oid, uint64_t offs) const { - assert(granularity); // initialized - assert(!was_filtered_out); + ceph_assert(granularity); // initialized + ceph_assert(!was_filtered_out); auto &bf = objects_bfs[offs / granularity]; if (bf.contains(oid.hobj.get_hash())) { return true; diff --git a/src/os/bluestore/FreelistManager.cc b/src/os/bluestore/FreelistManager.cc index 9b91cc09437ca..3da42b6b9ad1d 100644 --- a/src/os/bluestore/FreelistManager.cc +++ b/src/os/bluestore/FreelistManager.cc @@ -14,7 +14,7 @@ FreelistManager *FreelistManager::create( // put the freelistmanagers in different prefixes because the merge // op is per prefix, has to done pre-db-open, and we don't know the // freelist type until after we open the db. - assert(prefix == "B"); + ceph_assert(prefix == "B"); if (type == "bitmap") return new BitmapFreelistManager(cct, kvdb, "B", "b"); return NULL; diff --git a/src/os/bluestore/KernelDevice.cc b/src/os/bluestore/KernelDevice.cc index 7b69cdb2b6479..e3c075bbaf2fa 100644 --- a/src/os/bluestore/KernelDevice.cc +++ b/src/os/bluestore/KernelDevice.cc @@ -83,7 +83,7 @@ int KernelDevice::open(const string& p) dio = true; aio = cct->_conf->bdev_aio; if (!aio) { - assert(0 == "non-aio not supported"); + ceph_assert(0 == "non-aio not supported"); } // disable readahead as it will wreak havoc on our mix of @@ -199,11 +199,11 @@ void KernelDevice::close() vdo_fd = -1; } - assert(fd_direct >= 0); + ceph_assert(fd_direct >= 0); VOID_TEMP_FAILURE_RETRY(::close(fd_direct)); fd_direct = -1; - assert(fd_buffered >= 0); + ceph_assert(fd_buffered >= 0); VOID_TEMP_FAILURE_RETRY(::close(fd_buffered)); fd_buffered = -1; @@ -430,7 +430,7 @@ void KernelDevice::_aio_thread() aio, max); if (r < 0) { derr << __func__ << " got " << cpp_strerror(r) << dendl; - assert(0 == "got unexpected error from io_getevents"); + ceph_assert(0 == "got unexpected error from io_getevents"); } if (r > 0) { dout(30) << __func__ << " got " << r << " completed aios" << dendl; @@ -459,13 +459,13 @@ void KernelDevice::_aio_thread() << dendl; ioc->set_return_value(-EIO); } else { - assert(0 == "got unexpected error from aio_t::get_return_value. " + ceph_assert(0 == "got unexpected error from aio_t::get_return_value. " "This may suggest HW issue. Please check your dmesg!"); } } else if (aio[i]->length != (uint64_t)r) { derr << "aio to " << aio[i]->offset << "~" << aio[i]->length << " but returned: " << r << dendl; - assert(0 == "unexpected aio error"); + ceph_assert(0 == "unexpected aio error"); } dout(10) << __func__ << " finished aio " << aio[i] << " r " << r @@ -499,7 +499,7 @@ void KernelDevice::_aio_thread() << " since " << debug_stall_since << ", timeout is " << cct->_conf->bdev_debug_aio_suicide_timeout << "s, suicide" << dendl; - assert(0 == "stalled aio... buggy kernel or bad device?"); + ceph_assert(0 == "stalled aio... buggy kernel or bad device?"); } } } @@ -523,11 +523,11 @@ void KernelDevice::_aio_thread() void KernelDevice::_discard_thread() { std::unique_lock l(discard_lock); - assert(!discard_started); + ceph_assert(!discard_started); discard_started = true; discard_cond.notify_all(); while (true) { - assert(discard_finishing.empty()); + ceph_assert(discard_finishing.empty()); if (discard_queued.empty()) { if (discard_stop) break; @@ -644,8 +644,8 @@ void KernelDevice::aio_submit(IOContext *ioc) int pending = ioc->num_pending.load(); ioc->num_running += pending; ioc->num_pending -= pending; - assert(ioc->num_pending.load() == 0); // we should be only thread doing this - assert(ioc->pending_aios.size() == 0); + ceph_assert(ioc->num_pending.load() == 0); // we should be only thread doing this + ceph_assert(ioc->pending_aios.size() == 0); if (cct->_conf->bdev_debug_aio) { list::iterator p = ioc->running_aios.begin(); @@ -665,7 +665,7 @@ void KernelDevice::aio_submit(IOContext *ioc) derr << __func__ << " retries " << retries << dendl; if (r < 0) { derr << " aio submit got " << cpp_strerror(r) << dendl; - assert(r == 0); + ceph_assert(r == 0); } } @@ -715,7 +715,7 @@ int KernelDevice::write( dout(20) << __func__ << " 0x" << std::hex << off << "~" << len << std::dec << (buffered ? " (buffered)" : " (direct)") << dendl; - assert(is_valid_io(off, len)); + ceph_assert(is_valid_io(off, len)); if ((!buffered || bl.get_num_buffers() >= IOV_MAX) && bl.rebuild_aligned_size_and_memory(block_size, block_size, IOV_MAX)) { @@ -738,7 +738,7 @@ int KernelDevice::aio_write( dout(20) << __func__ << " 0x" << std::hex << off << "~" << len << std::dec << (buffered ? " (buffered)" : " (direct)") << dendl; - assert(is_valid_io(off, len)); + ceph_assert(is_valid_io(off, len)); if ((!buffered || bl.get_num_buffers() >= IOV_MAX) && bl.rebuild_aligned_size_and_memory(block_size, block_size, IOV_MAX)) { @@ -803,7 +803,7 @@ int KernelDevice::read(uint64_t off, uint64_t len, bufferlist *pbl, dout(5) << __func__ << " 0x" << std::hex << off << "~" << len << std::dec << (buffered ? " (buffered)" : " (direct)") << dendl; - assert(is_valid_io(off, len)); + ceph_assert(is_valid_io(off, len)); _aio_log_start(ioc, off, len); @@ -814,7 +814,7 @@ int KernelDevice::read(uint64_t off, uint64_t len, bufferlist *pbl, r = -errno; goto out; } - assert((uint64_t)r == len); + ceph_assert((uint64_t)r == len); pbl->push_back(std::move(p)); dout(40) << "data: "; @@ -838,7 +838,7 @@ int KernelDevice::aio_read( int r = 0; #ifdef HAVE_LIBAIO if (aio && dio) { - assert(is_valid_io(off, len)); + ceph_assert(is_valid_io(off, len)); _aio_log_start(ioc, off, len); ioc->pending_aios.push_back(aio_t(ioc, fd_direct)); ++ioc->num_pending; @@ -871,7 +871,7 @@ int KernelDevice::direct_read_unaligned(uint64_t off, uint64_t len, char *buf) << " error: " << cpp_strerror(r) << dendl; goto out; } - assert((uint64_t)r == aligned_len); + ceph_assert((uint64_t)r == aligned_len); memcpy(buf, p.c_str() + (off - aligned_off), len); dout(40) << __func__ << " data: "; @@ -889,9 +889,9 @@ int KernelDevice::read_random(uint64_t off, uint64_t len, char *buf, { dout(5) << __func__ << " 0x" << std::hex << off << "~" << len << std::dec << dendl; - assert(len > 0); - assert(off < size); - assert(off + len <= size); + ceph_assert(len > 0); + ceph_assert(off < size); + ceph_assert(off + len <= size); int r = 0; //if it's direct io and unaligned, we have to use a internal buffer @@ -926,7 +926,7 @@ int KernelDevice::read_random(uint64_t off, uint64_t len, char *buf, << dendl; goto out; } - assert((uint64_t)r == len); + ceph_assert((uint64_t)r == len); } dout(40) << __func__ << " data: "; @@ -943,8 +943,8 @@ int KernelDevice::invalidate_cache(uint64_t off, uint64_t len) { dout(5) << __func__ << " 0x" << std::hex << off << "~" << len << std::dec << dendl; - assert(off % block_size == 0); - assert(len % block_size == 0); + ceph_assert(off % block_size == 0); + ceph_assert(len % block_size == 0); int r = posix_fadvise(fd_buffered, off, len, POSIX_FADV_DONTNEED); if (r) { r = -r; diff --git a/src/os/bluestore/NVMEDevice.cc b/src/os/bluestore/NVMEDevice.cc index 3ac4291f1a789..633700d5ab669 100644 --- a/src/os/bluestore/NVMEDevice.cc +++ b/src/os/bluestore/NVMEDevice.cc @@ -162,14 +162,14 @@ class SharedDriverQueueData { // usable queue depth should minus 1 to aovid overflow. max_queue_depth = opts.io_queue_size - 1; qpair = spdk_nvme_ctrlr_alloc_io_qpair(ctrlr, &opts, sizeof(opts)); - assert(qpair != NULL); + ceph_assert(qpair != NULL); // allocate spdk dma memory for (uint16_t i = 0; i < data_buffer_default_num; i++) { void *b = spdk_dma_zmalloc(data_buffer_size, CEPH_PAGE_SIZE, NULL); if (!b) { derr << __func__ << " failed to create memory pool for nvme data buffer" << dendl; - assert(b); + ceph_assert(b); } data_buf_mempool.push_back(b); } @@ -203,7 +203,7 @@ class SharedDriverQueueData { if (!data_buf_mempool.empty()) { for (uint16_t i = 0; i < data_buffer_default_num; i++) { void *b = data_buf_mempool[i]; - assert(b); + ceph_assert(b); spdk_dma_free(b); } data_buf_mempool.clear(); @@ -233,7 +233,7 @@ struct Task { return_code(rc), start(ceph::coarse_real_clock::now()) {} ~Task() { - assert(!io_request.nseg); + ceph_assert(!io_request.nseg); } void release_segs(SharedDriverQueueData *queue_data) { if (io_request.extra_segs) { @@ -269,7 +269,7 @@ static void data_buf_reset_sgl(void *cb_arg, uint32_t sgl_offset) Task *t = static_cast(cb_arg); uint32_t i = sgl_offset / data_buffer_size; uint32_t offset = i * data_buffer_size; - assert(i <= t->io_request.nseg); + ceph_assert(i <= t->io_request.nseg); for (; i < t->io_request.nseg; i++) { offset += data_buffer_size; @@ -492,10 +492,10 @@ class NVMEManager { int try_get(const string &sn_tag, SharedDriverData **driver); void register_ctrlr(const string &sn_tag, spdk_nvme_ctrlr *c, struct spdk_pci_device *pci_dev, SharedDriverData **driver) { - assert(lock.is_locked()); + ceph_assert(lock.is_locked()); spdk_nvme_ns *ns; int num_ns = spdk_nvme_ctrlr_get_num_ns(c); - assert(num_ns >= 1); + ceph_assert(num_ns >= 1); if (num_ns > 1) { dout(0) << __func__ << " namespace count larger than 1, currently only use the first namespace" << dendl; } @@ -508,7 +508,7 @@ class NVMEManager { << ":" << spdk_pci_device_get_dev(pci_dev) << ":" << spdk_pci_device_get_func(pci_dev) << dendl; // only support one device per osd now! - assert(shared_driver_datas.empty()); + ceph_assert(shared_driver_datas.empty()); // index 0 is occurred by master thread shared_driver_datas.push_back(new SharedDriverData(shared_driver_datas.size()+1, sn_tag, c, ns)); *driver = shared_driver_datas.back(); @@ -571,7 +571,7 @@ static void attach_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid, pci_dev = spdk_pci_get_device(&pci_addr); if (!pci_dev) { dout(0) << __func__ << " failed to get pci device" << dendl; - assert(pci_dev); + ceph_assert(pci_dev); } NVMEManager::ProbeContext *ctx = static_cast(cb_ctx); @@ -640,7 +640,7 @@ int NVMEManager::try_get(const string &sn_tag, SharedDriverData **driver) probe_queue.pop_front(); r = spdk_nvme_probe(NULL, ctxt, probe_cb, attach_cb, NULL); if (r < 0) { - assert(!ctxt->driver); + ceph_assert(!ctxt->driver); derr << __func__ << " device probe nvme failed" << dendl; } ctxt->done = true; @@ -674,14 +674,14 @@ void io_complete(void *t, const struct spdk_nvme_cpl *completion) IOContext *ctx = task->ctx; SharedDriverQueueData *queue = task->queue; - assert(queue != NULL); - assert(ctx != NULL); + ceph_assert(queue != NULL); + ceph_assert(ctx != NULL); --queue->current_queue_depth; auto dur = std::chrono::duration_cast( ceph::coarse_real_clock::now() - task->start); if (task->command == IOCommand::WRITE_COMMAND) { queue->logger->tinc(l_bluestore_nvmedevice_write_lat, dur); - assert(!spdk_nvme_cpl_is_error(completion)); + ceph_assert(!spdk_nvme_cpl_is_error(completion)); dout(20) << __func__ << " write/zero op successfully, left " << queue->queue_op_seq - queue->completed_op_seq << dendl; // check waiting count before doing callback (which may @@ -697,7 +697,7 @@ void io_complete(void *t, const struct spdk_nvme_cpl *completion) delete task; } else if (task->command == IOCommand::READ_COMMAND) { queue->logger->tinc(l_bluestore_nvmedevice_read_lat, dur); - assert(!spdk_nvme_cpl_is_error(completion)); + ceph_assert(!spdk_nvme_cpl_is_error(completion)); dout(20) << __func__ << " read op successfully" << dendl; task->fill_cb(); task->release_segs(queue); @@ -716,8 +716,8 @@ void io_complete(void *t, const struct spdk_nvme_cpl *completion) ctx->try_aio_wake(); } } else { - assert(task->command == IOCommand::FLUSH_COMMAND); - assert(!spdk_nvme_cpl_is_error(completion)); + ceph_assert(task->command == IOCommand::FLUSH_COMMAND); + ceph_assert(!spdk_nvme_cpl_is_error(completion)); queue->logger->tinc(l_bluestore_nvmedevice_flush_lat, dur); dout(20) << __func__ << " flush op successfully" << dendl; task->return_code = 0; @@ -831,7 +831,7 @@ void NVMEDevice::aio_submit(IOContext *ioc) if (pending && t) { ioc->num_running += pending; ioc->num_pending -= pending; - assert(ioc->num_pending.load() == 0); // we should be only thread doing this + ceph_assert(ioc->num_pending.load() == 0); // we should be only thread doing this // Only need to push the first entry ioc->nvme_task_first = ioc->nvme_task_last = nullptr; if (!queue_t) @@ -880,7 +880,7 @@ int NVMEDevice::aio_write( uint64_t len = bl.length(); dout(20) << __func__ << " " << off << "~" << len << " ioc " << ioc << " buffered " << buffered << dendl; - assert(is_valid_io(off, len)); + ceph_assert(is_valid_io(off, len)); write_split(this, off, bl, ioc); dout(5) << __func__ << " " << off << "~" << len << dendl; @@ -893,11 +893,11 @@ int NVMEDevice::write(uint64_t off, bufferlist &bl, bool buffered) uint64_t len = bl.length(); dout(20) << __func__ << " " << off << "~" << len << " buffered " << buffered << dendl; - assert(off % block_size == 0); - assert(len % block_size == 0); - assert(len > 0); - assert(off < size); - assert(off + len <= size); + ceph_assert(off % block_size == 0); + ceph_assert(len % block_size == 0); + ceph_assert(len > 0); + ceph_assert(off < size); + ceph_assert(off + len <= size); IOContext ioc(cct, NULL); write_split(this, off, bl, &ioc); @@ -912,7 +912,7 @@ int NVMEDevice::read(uint64_t off, uint64_t len, bufferlist *pbl, bool buffered) { dout(5) << __func__ << " " << off << "~" << len << " ioc " << ioc << dendl; - assert(is_valid_io(off, len)); + ceph_assert(is_valid_io(off, len)); Task *t = new Task(this, IOCommand::READ_COMMAND, off, len, 1); bufferptr p = buffer::create_page_aligned(len); @@ -941,7 +941,7 @@ int NVMEDevice::aio_read( IOContext *ioc) { dout(20) << __func__ << " " << off << "~" << len << " ioc " << ioc << dendl; - assert(is_valid_io(off, len)); + ceph_assert(is_valid_io(off, len)); Task *t = new Task(this, IOCommand::READ_COMMAND, off, len); @@ -967,9 +967,9 @@ int NVMEDevice::aio_read( int NVMEDevice::read_random(uint64_t off, uint64_t len, char *buf, bool buffered) { - assert(len > 0); - assert(off < size); - assert(off + len <= size); + ceph_assert(len > 0); + ceph_assert(off < size); + ceph_assert(off + len <= size); uint64_t aligned_off = align_down(off, block_size); uint64_t aligned_len = align_up(off+len, block_size) - aligned_off; diff --git a/src/os/bluestore/PMEMDevice.cc b/src/os/bluestore/PMEMDevice.cc index 1cad25ce28fa9..68859d0168647 100644 --- a/src/os/bluestore/PMEMDevice.cc +++ b/src/os/bluestore/PMEMDevice.cc @@ -120,9 +120,9 @@ void PMEMDevice::close() { dout(1) << __func__ << dendl; - assert(addr != NULL); + ceph_assert(addr != NULL); pmem_unmap(addr, size); - assert(fd >= 0); + ceph_assert(fd >= 0); VOID_TEMP_FAILURE_RETRY(::close(fd)); fd = -1; @@ -201,7 +201,7 @@ int PMEMDevice::flush() void PMEMDevice::aio_submit(IOContext *ioc) { if (ioc->priv) { - assert(ioc->num_running == 0); + ceph_assert(ioc->num_running == 0); aio_callback(aio_callback_priv, ioc->priv); } else { ioc->try_aio_wake(); @@ -213,7 +213,7 @@ int PMEMDevice::write(uint64_t off, bufferlist& bl, bool buffered) { uint64_t len = bl.length(); dout(20) << __func__ << " " << off << "~" << len << dendl; - assert(is_valid_io(off, len)); + ceph_assert(is_valid_io(off, len)); dout(40) << "data: "; bl.hexdump(*_dout); @@ -254,7 +254,7 @@ int PMEMDevice::read(uint64_t off, uint64_t len, bufferlist *pbl, bool buffered) { dout(5) << __func__ << " " << off << "~" << len << dendl; - assert(is_valid_io(off, len)); + ceph_assert(is_valid_io(off, len)); bufferptr p = buffer::create_page_aligned(len); memcpy(p.c_str(), addr + off, len); @@ -278,7 +278,7 @@ int PMEMDevice::aio_read(uint64_t off, uint64_t len, bufferlist *pbl, int PMEMDevice::read_random(uint64_t off, uint64_t len, char *buf, bool buffered) { dout(5) << __func__ << " " << off << "~" << len << dendl; - assert(is_valid_io(off, len)); + ceph_assert(is_valid_io(off, len)); memcpy(buf, addr + off, len); return 0; diff --git a/src/os/bluestore/StupidAllocator.cc b/src/os/bluestore/StupidAllocator.cc index 5beca2e5cfa12..899716e014e1b 100644 --- a/src/os/bluestore/StupidAllocator.cc +++ b/src/os/bluestore/StupidAllocator.cc @@ -173,7 +173,7 @@ int64_t StupidAllocator::allocate_int( } num_free -= *length; - assert(num_free >= 0); + ceph_assert(num_free >= 0); last_alloc = *offset + *length; return 0; } @@ -250,7 +250,7 @@ uint64_t StupidAllocator::get_free() double StupidAllocator::get_fragmentation(uint64_t alloc_unit) { - assert(alloc_unit); + ceph_assert(alloc_unit); double res; uint64_t max_intervals = 0; uint64_t intervals = 0; @@ -263,7 +263,7 @@ double StupidAllocator::get_fragmentation(uint64_t alloc_unit) } ldout(cct, 30) << __func__ << " " << intervals << "/" << max_intervals << dendl; - assert(intervals <= max_intervals); + ceph_assert(intervals <= max_intervals); if (!intervals || max_intervals <= 1) { return 0.0; } @@ -333,9 +333,9 @@ void StupidAllocator::init_rm_free(uint64_t offset, uint64_t length) rm.subtract(overlap); } } - assert(rm.empty()); + ceph_assert(rm.empty()); num_free -= length; - assert(num_free >= 0); + ceph_assert(num_free >= 0); } diff --git a/src/os/bluestore/aio.cc b/src/os/bluestore/aio.cc index 7e63d5571a4fa..4811434492597 100644 --- a/src/os/bluestore/aio.cc +++ b/src/os/bluestore/aio.cc @@ -32,7 +32,7 @@ int aio_queue_t::submit_batch(aio_iter begin, aio_iter end, ++left; ++cur; } - assert(aios_size >= left); + ceph_assert(aios_size >= left); int done = 0; while (left > 0) { int r = io_submit(ctx, std::min(left, max_iodepth), piocb + done); @@ -45,7 +45,7 @@ int aio_queue_t::submit_batch(aio_iter begin, aio_iter end, } return r; } - assert(r > 0); + ceph_assert(r > 0); done += r; left -= r; attempts = 16; diff --git a/src/os/bluestore/aio.h b/src/os/bluestore/aio.h index 1c51ec4be6910..324b13e694087 100644 --- a/src/os/bluestore/aio.h +++ b/src/os/bluestore/aio.h @@ -62,11 +62,11 @@ struct aio_queue_t { ctx(0) { } ~aio_queue_t() { - assert(ctx == 0); + ceph_assert(ctx == 0); } int init() { - assert(ctx == 0); + ceph_assert(ctx == 0); int r = io_setup(max_iodepth, &ctx); if (r < 0) { if (ctx) { @@ -79,7 +79,7 @@ struct aio_queue_t { void shutdown() { if (ctx) { int r = io_destroy(ctx); - assert(r == 0); + ceph_assert(r == 0); ctx = 0; } } diff --git a/src/os/bluestore/bluestore_types.cc b/src/os/bluestore/bluestore_types.cc index df24a06442c3f..8597c75022187 100644 --- a/src/os/bluestore/bluestore_types.cc +++ b/src/os/bluestore/bluestore_types.cc @@ -107,9 +107,9 @@ void bluestore_extent_ref_map_t::_check() const unsigned refs = 0; for (const auto &p : ref_map) { if (p.first < pos) - assert(0 == "overlap"); + ceph_assert(0 == "overlap"); if (p.first == pos && p.second.refs == refs) - assert(0 == "unmerged"); + ceph_assert(0 == "unmerged"); pos = p.first + p.second.length; refs = p.second.refs; } @@ -160,14 +160,14 @@ void bluestore_extent_ref_map_t::get(uint64_t offset, uint32_t length) } if (p->first < offset) { // split off the portion before offset - assert(p->first + p->second.length > offset); + ceph_assert(p->first + p->second.length > offset); uint64_t left = p->first + p->second.length - offset; p->second.length = offset - p->first; p = ref_map.insert(map::value_type( offset, record_t(left, p->second.refs))).first; // continue below } - assert(p->first == offset); + ceph_assert(p->first == offset); if (length < p->second.length) { ref_map.insert(make_pair(offset + length, record_t(p->second.length - length, @@ -197,11 +197,11 @@ void bluestore_extent_ref_map_t::put( auto p = ref_map.lower_bound(offset); if (p == ref_map.end() || p->first > offset) { if (p == ref_map.begin()) { - assert(0 == "put on missing extent (nothing before)"); + ceph_assert(0 == "put on missing extent (nothing before)"); } --p; if (p->first + p->second.length <= offset) { - assert(0 == "put on missing extent (gap)"); + ceph_assert(0 == "put on missing extent (gap)"); } } if (p->first < offset) { @@ -214,7 +214,7 @@ void bluestore_extent_ref_map_t::put( offset, record_t(left, p->second.refs))).first; } while (length > 0) { - assert(p->first == offset); + ceph_assert(p->first == offset); if (length < p->second.length) { if (p->second.refs != 1) { unshared = false; @@ -356,7 +356,7 @@ ostream& operator<<(ostream& out, const bluestore_extent_ref_map_t& m) void bluestore_blob_use_tracker_t::allocate() { - assert(num_au != 0); + ceph_assert(num_au != 0); bytes_per_au = new uint32_t[num_au]; for (uint32_t i = 0; i < num_au; ++i) { bytes_per_au[i] = 0; @@ -365,9 +365,9 @@ void bluestore_blob_use_tracker_t::allocate() void bluestore_blob_use_tracker_t::init( uint32_t full_length, uint32_t _au_size) { - assert(!au_size || is_empty()); - assert(_au_size > 0); - assert(full_length > 0); + ceph_assert(!au_size || is_empty()); + ceph_assert(_au_size > 0); + ceph_assert(full_length > 0); clear(); uint32_t _num_au = round_up_to(full_length, _au_size) / _au_size; au_size = _au_size; @@ -380,7 +380,7 @@ void bluestore_blob_use_tracker_t::init( void bluestore_blob_use_tracker_t::get( uint32_t offset, uint32_t length) { - assert(au_size); + ceph_assert(au_size); if (!num_au) { total_bytes += length; } else { @@ -399,13 +399,13 @@ bool bluestore_blob_use_tracker_t::put( uint32_t offset, uint32_t length, PExtentVector *release_units) { - assert(au_size); + ceph_assert(au_size); if (release_units) { release_units->clear(); } bool maybe_empty = true; if (!num_au) { - assert(total_bytes >= length); + ceph_assert(total_bytes >= length); total_bytes -= length; } else { auto end = offset + length; @@ -414,7 +414,7 @@ bool bluestore_blob_use_tracker_t::put( auto phase = offset % au_size; size_t pos = offset / au_size; auto diff = std::min(au_size - phase, end - offset); - assert(diff <= bytes_per_au[pos]); + ceph_assert(diff <= bytes_per_au[pos]); bytes_per_au[pos] -= diff; offset += (phase ? au_size - phase : au_size); if (bytes_per_au[pos] == 0) { @@ -446,7 +446,7 @@ bool bluestore_blob_use_tracker_t::can_split() const bool bluestore_blob_use_tracker_t::can_split_at(uint32_t blob_offset) const { - assert(au_size); + ceph_assert(au_size); return (blob_offset % au_size) == 0 && blob_offset < num_au * au_size; } @@ -455,10 +455,10 @@ void bluestore_blob_use_tracker_t::split( uint32_t blob_offset, bluestore_blob_use_tracker_t* r) { - assert(au_size); - assert(can_split()); - assert(can_split_at(blob_offset)); - assert(r->is_empty()); + ceph_assert(au_size); + ceph_assert(can_split()); + ceph_assert(can_split_at(blob_offset)); + ceph_assert(r->is_empty()); uint32_t new_num_au = blob_offset / au_size; r->init( (num_au - new_num_au) * au_size, au_size); @@ -750,7 +750,7 @@ void bluestore_blob_t::allocated(uint32_t b_off, uint32_t length, const PExtentV if (extents.size() == 0) { // if blob is compressed then logical length to be already configured // otherwise - to be unset. - assert((is_compressed() && logical_length != 0) || + ceph_assert((is_compressed() && logical_length != 0) || (!is_compressed() && logical_length == 0)); extents.reserve(allocs.size() + (b_off ? 1 : 0)); @@ -768,14 +768,14 @@ void bluestore_blob_t::allocated(uint32_t b_off, uint32_t length, const PExtentV logical_length = new_len; } } else { - assert(!is_compressed()); // partial allocations are forbidden when + ceph_assert(!is_compressed()); // partial allocations are forbidden when // compressed - assert(b_off < logical_length); + ceph_assert(b_off < logical_length); uint32_t cur_offs = 0; auto start_it = extents.begin(); size_t pos = 0; while (true) { - assert(start_it != extents.end()); + ceph_assert(start_it != extents.end()); if (cur_offs + start_it->length > b_off) { break; } @@ -788,15 +788,15 @@ void bluestore_blob_t::allocated(uint32_t b_off, uint32_t length, const PExtentV auto end_it = start_it; while (true) { - assert(end_it != extents.end()); - assert(!end_it->is_valid()); + ceph_assert(end_it != extents.end()); + ceph_assert(!end_it->is_valid()); if (cur_offs + end_it->length >= end_off) { break; } cur_offs += end_it->length; ++end_it; } - assert(cur_offs + end_it->length >= end_off); + ceph_assert(cur_offs + end_it->length >= end_off); uint32_t tail = cur_offs + end_it->length - end_off; start_it = extents.erase(start_it, end_it + 1); @@ -875,7 +875,7 @@ bool bluestore_blob_t::release_extents(bool all, } pos += e.length; } - assert(is_compressed() || get_logical_length() == pos); + ceph_assert(is_compressed() || get_logical_length() == pos); extents.resize(1); extents[0].offset = bluestore_pextent_t::INVALID_OFFSET; extents[0].length = pos; @@ -893,7 +893,7 @@ bool bluestore_blob_t::release_extents(bool all, if (loffs_it == lend || pext_loffs_start + pext_it->length <= loffs_it->offset) { int delta0 = pext_loffs - pext_loffs_start; - assert(delta0 >= 0); + ceph_assert(delta0 >= 0); if ((uint32_t)delta0 < pext_it->length) { vb.add(pext_it->offset + delta0, pext_it->length - delta0); } @@ -904,10 +904,10 @@ bool bluestore_blob_t::release_extents(bool all, else { //assert(pext_loffs == pext_loffs_start); int delta0 = pext_loffs - pext_loffs_start; - assert(delta0 >= 0); + ceph_assert(delta0 >= 0); int delta = loffs_it->offset - pext_loffs; - assert(delta >= 0); + ceph_assert(delta >= 0); if (delta > 0) { vb.add(pext_it->offset + delta0, delta); pext_loffs += delta; @@ -989,7 +989,7 @@ void bluestore_blob_t::split(uint32_t blob_offset, bluestore_blob_t& rb) rb.csum_type = csum_type; rb.csum_chunk_order = csum_chunk_order; size_t csum_order = get_csum_chunk_size(); - assert(blob_offset % csum_order == 0); + ceph_assert(blob_offset % csum_order == 0); size_t pos = (blob_offset / csum_order) * get_csum_value_size(); // deep copy csum data bufferptr old; diff --git a/src/os/bluestore/bluestore_types.h b/src/os/bluestore/bluestore_types.h index f1a938c7acb7e..405a33bcaaa02 100644 --- a/src/os/bluestore/bluestore_types.h +++ b/src/os/bluestore/bluestore_types.h @@ -291,7 +291,7 @@ struct bluestore_blob_use_tracker_t { if (num_au) { new_len = round_up_to(new_len, au_size); uint32_t _num_au = new_len / au_size; - assert(_num_au <= num_au); + ceph_assert(_num_au <= num_au); if (_num_au) { num_au = _num_au; // bytes_per_au array is left unmodified @@ -302,7 +302,7 @@ struct bluestore_blob_use_tracker_t { } void add_tail(uint32_t new_len, uint32_t _au_size) { auto full_size = au_size * (num_au ? num_au : 1); - assert(new_len >= full_size); + ceph_assert(new_len >= full_size); if (new_len == full_size) { return; } @@ -310,13 +310,13 @@ struct bluestore_blob_use_tracker_t { uint32_t old_total = total_bytes; total_bytes = 0; init(new_len, _au_size); - assert(num_au); + ceph_assert(num_au); bytes_per_au[0] = old_total; } else { - assert(_au_size == au_size); + ceph_assert(_au_size == au_size); new_len = round_up_to(new_len, au_size); uint32_t _num_au = new_len / au_size; - assert(_num_au >= num_au); + ceph_assert(_num_au >= num_au); if (_num_au > num_au) { auto old_bytes = bytes_per_au; auto old_num_au = num_au; @@ -451,7 +451,7 @@ public: DENC_HELPERS; void bound_encode(size_t& p, uint64_t struct_v) const { - assert(struct_v == 1 || struct_v == 2); + ceph_assert(struct_v == 1 || struct_v == 2); denc(extents, p); denc_varint(flags, p); denc_varint_lowz(logical_length, p); @@ -464,7 +464,7 @@ public: } void encode(bufferlist::contiguous_appender& p, uint64_t struct_v) const { - assert(struct_v == 1 || struct_v == 2); + ceph_assert(struct_v == 1 || struct_v == 2); denc(extents, p); denc_varint(flags, p); if (is_compressed()) { @@ -484,7 +484,7 @@ public: } void decode(bufferptr::const_iterator& p, uint64_t struct_v) { - assert(struct_v == 1 || struct_v == 2); + ceph_assert(struct_v == 1 || struct_v == 2); denc(extents, p); denc_varint(flags, p); if (is_compressed()) { @@ -566,11 +566,11 @@ public: } uint64_t calc_offset(uint64_t x_off, uint64_t *plen) const { auto p = extents.begin(); - assert(p != extents.end()); + ceph_assert(p != extents.end()); while (x_off >= p->length) { x_off -= p->length; ++p; - assert(p != extents.end()); + ceph_assert(p != extents.end()); } if (plen) *plen = p->length - x_off; @@ -582,15 +582,15 @@ public: bool _validate_range(uint64_t b_off, uint64_t b_len, bool require_allocated) const { auto p = extents.begin(); - assert(p != extents.end()); + ceph_assert(p != extents.end()); while (b_off >= p->length) { b_off -= p->length; ++p; - assert(p != extents.end()); + ceph_assert(p != extents.end()); } b_len += b_off; while (b_len) { - assert(p != extents.end()); + ceph_assert(p != extents.end()); if (require_allocated != p->is_valid()) { return false; } @@ -601,7 +601,7 @@ public: b_len -= p->length; ++p; } - assert(0 == "we should not get here"); + ceph_assert(0 == "we should not get here"); return false; } @@ -623,8 +623,8 @@ public: return false; } uint64_t blob_len = get_logical_length(); - assert((blob_len % (sizeof(unused)*8)) == 0); - assert(offset + length <= blob_len); + ceph_assert((blob_len % (sizeof(unused)*8)) == 0); + ceph_assert(offset + length <= blob_len); uint64_t chunk_size = blob_len / (sizeof(unused)*8); uint64_t start = offset / chunk_size; uint64_t end = round_up_to(offset + length, chunk_size) / chunk_size; @@ -638,8 +638,8 @@ public: /// mark a range that has never been used void add_unused(uint64_t offset, uint64_t length) { uint64_t blob_len = get_logical_length(); - assert((blob_len % (sizeof(unused)*8)) == 0); - assert(offset + length <= blob_len); + ceph_assert((blob_len % (sizeof(unused)*8)) == 0); + ceph_assert(offset + length <= blob_len); uint64_t chunk_size = blob_len / (sizeof(unused)*8); uint64_t start = round_up_to(offset, chunk_size) / chunk_size; uint64_t end = (offset + length) / chunk_size; @@ -655,8 +655,8 @@ public: void mark_used(uint64_t offset, uint64_t length) { if (has_unused()) { uint64_t blob_len = get_logical_length(); - assert((blob_len % (sizeof(unused)*8)) == 0); - assert(offset + length <= blob_len); + ceph_assert((blob_len % (sizeof(unused)*8)) == 0); + ceph_assert(offset + length <= blob_len); uint64_t chunk_size = blob_len / (sizeof(unused)*8); uint64_t start = offset / chunk_size; uint64_t end = round_up_to(offset + length, chunk_size) / chunk_size; @@ -674,14 +674,14 @@ public: static_assert(std::is_invocable_r_v); auto p = extents.begin(); - assert(p != extents.end()); + ceph_assert(p != extents.end()); while (x_off >= p->length) { x_off -= p->length; ++p; - assert(p != extents.end()); + ceph_assert(p != extents.end()); } while (x_len > 0) { - assert(p != extents.end()); + ceph_assert(p != extents.end()); uint64_t l = std::min(p->length - x_off, x_len); int r = f(p->offset + x_off, l); if (r < 0) @@ -699,16 +699,16 @@ public: static_assert(std::is_invocable_v); auto p = extents.begin(); - assert(p != extents.end()); + ceph_assert(p != extents.end()); while (x_off >= p->length) { x_off -= p->length; ++p; - assert(p != extents.end()); + ceph_assert(p != extents.end()); } bufferlist::iterator it = bl.begin(); uint64_t x_len = bl.length(); while (x_len > 0) { - assert(p != extents.end()); + ceph_assert(p != extents.end()); uint64_t l = std::min(p->length - x_off, x_len); bufferlist t; it.copy(l, t); @@ -743,7 +743,7 @@ public: const char *p = csum_data.c_str(); switch (cs) { case 0: - assert(0 == "no csum data, bad index"); + ceph_assert(0 == "no csum data, bad index"); case 1: return reinterpret_cast(p)[i]; case 2: @@ -753,7 +753,7 @@ public: case 8: return reinterpret_cast(p)[i]; default: - assert(0 == "unrecognized csum word size"); + ceph_assert(0 == "unrecognized csum word size"); } } const char *get_csum_item_ptr(unsigned i) const { @@ -802,9 +802,9 @@ public: } } void add_tail(uint32_t new_len) { - assert(is_mutable()); - assert(!has_unused()); - assert(new_len > logical_length); + ceph_assert(is_mutable()); + ceph_assert(!has_unused()); + ceph_assert(new_len > logical_length); extents.emplace_back( bluestore_pextent_t( bluestore_pextent_t::INVALID_OFFSET, diff --git a/src/os/bluestore/fastbmap_allocator_impl.cc b/src/os/bluestore/fastbmap_allocator_impl.cc index 424f8051871d6..ccd012b4433ba 100755 --- a/src/os/bluestore/fastbmap_allocator_impl.cc +++ b/src/os/bluestore/fastbmap_allocator_impl.cc @@ -44,8 +44,8 @@ interval_t AllocatorLevel01Loose::_get_longest_from_l0(uint64_t pos0, interval_t res_candidate; if (tail->length != 0) { - assert((tail->offset % l0_granularity) == 0); - assert((tail->length % l0_granularity) == 0); + ceph_assert((tail->offset % l0_granularity) == 0); + ceph_assert((tail->length % l0_granularity) == 0); res_candidate.offset = tail->offset / l0_granularity; res_candidate.length = tail->length / l0_granularity; } @@ -131,8 +131,8 @@ void AllocatorLevel01Loose::_analyze_partials(uint64_t pos_start, search_ctx_t* ctx) { auto d = CHILD_PER_SLOT; - assert((pos_start % d) == 0); - assert((pos_end % d) == 0); + ceph_assert((pos_start % d) == 0); + ceph_assert((pos_end % d) == 0); uint64_t l0_w = slotset_width * CHILD_PER_SLOT_L0; @@ -213,8 +213,8 @@ void AllocatorLevel01Loose::_mark_l1_on_l0(int64_t l0_pos, int64_t l0_pos_end) auto d0 = bits_per_slotset; uint64_t l1_w = CHILD_PER_SLOT; // this should be aligned with slotset boundaries - assert(0 == (l0_pos % d0)); - assert(0 == (l0_pos_end % d0)); + ceph_assert(0 == (l0_pos % d0)); + ceph_assert(0 == (l0_pos_end % d0)); int64_t idx = l0_pos / bits_per_slot; int64_t idx_end = l0_pos_end / bits_per_slot; @@ -250,7 +250,7 @@ void AllocatorLevel01Loose::_mark_l1_on_l0(int64_t l0_pos, int64_t l0_pos_end) idx = p2roundup(idx, int64_t(slotset_width)); } if ((idx % slotset_width) == 0) { - assert(mask_to_apply != L1_ENTRY_NOT_USED); + ceph_assert(mask_to_apply != L1_ENTRY_NOT_USED); uint64_t shift = (l1_pos % l1_w) * L1_ENTRY_WIDTH; slot_t& slot_val = l1[l1_pos / l1_w]; auto mask = slot_t(L1_ENTRY_MASK) << shift; @@ -322,7 +322,7 @@ interval_t AllocatorLevel01Loose::_allocate_l1_contiguous(uint64_t length, // full length match required. if (ctx.affordable_len) { // allocate as specified - assert(ctx.affordable_len >= length); + ceph_assert(ctx.affordable_len >= length); auto pos = ctx.affordable_offs / l0_granularity; _mark_alloc_l1_l0(pos, pos + 1); res = interval_t(ctx.affordable_offs, length); @@ -332,7 +332,7 @@ interval_t AllocatorLevel01Loose::_allocate_l1_contiguous(uint64_t length, // allocate from free slot sets if (ctx.free_count) { auto l = std::min(length, ctx.free_count * l1_granularity); - assert((l % l0_granularity) == 0); + ceph_assert((l % l0_granularity) == 0); auto pos_end = ctx.free_l1_pos * l0_w + l / l0_granularity; _mark_alloc_l1_l0(ctx.free_l1_pos * l0_w, pos_end); @@ -347,7 +347,7 @@ interval_t AllocatorLevel01Loose::_allocate_l1_contiguous(uint64_t length, if (ctx.free_count) { auto l = std::min(length, ctx.free_count * l1_granularity); - assert((l % l0_granularity) == 0); + ceph_assert((l % l0_granularity) == 0); auto pos_end = ctx.free_l1_pos * l0_w + l / l0_granularity; _mark_alloc_l1_l0(ctx.free_l1_pos * l0_w, pos_end); @@ -357,13 +357,13 @@ interval_t AllocatorLevel01Loose::_allocate_l1_contiguous(uint64_t length, } // we can terminate earlier on free entry only - assert(ctx.fully_processed); + ceph_assert(ctx.fully_processed); // check partially free slot sets first (including neighboring), // full length match required. if (ctx.affordable_len) { - assert(ctx.affordable_len >= length); - assert((length % l0_granularity) == 0); + ceph_assert(ctx.affordable_len >= length); + ceph_assert((length % l0_granularity) == 0); auto pos_start = ctx.affordable_offs + length / l0_granularity; auto pos_end = (ctx.affordable_offs + length) / l0_granularity; _mark_alloc_l1_l0(pos_start, pos_end); @@ -379,12 +379,12 @@ interval_t AllocatorLevel01Loose::_allocate_l1_contiguous(uint64_t length, } else { search_ctx_t ctx; _analyze_partials(pos_start, pos_end, length, min_length, NO_STOP, &ctx); - assert(ctx.fully_processed); + ceph_assert(ctx.fully_processed); // check partially free slot sets first (including neighboring), // full length match required. if (ctx.affordable_len) { - assert(ctx.affordable_len >= length); - assert((length % l0_granularity) == 0); + ceph_assert(ctx.affordable_len >= length); + ceph_assert((length % l0_granularity) == 0); auto pos_start = ctx.affordable_offs / l0_granularity; auto pos_end = (ctx.affordable_offs + length) / l0_granularity; _mark_alloc_l1_l0(pos_start, pos_end); @@ -400,8 +400,8 @@ interval_t AllocatorLevel01Loose::_allocate_l1_contiguous(uint64_t length, if (aligned_extent.length > 0) { aligned_extent.length = std::min(length, uint64_t(aligned_extent.length)); - assert((aligned_extent.offset % l0_granularity) == 0); - assert((aligned_extent.length % l0_granularity) == 0); + ceph_assert((aligned_extent.offset % l0_granularity) == 0); + ceph_assert((aligned_extent.length % l0_granularity) == 0); auto pos_start = aligned_extent.offset / l0_granularity; auto pos_end = (aligned_extent.offset + aligned_extent.length) / l0_granularity; @@ -429,8 +429,8 @@ bool AllocatorLevel01Loose::_allocate_l1(uint64_t length, uint64_t d0 = CHILD_PER_SLOT_L0; uint64_t d1 = CHILD_PER_SLOT; - assert(0 == (l1_pos_start % (slotset_width * d1))); - assert(0 == (l1_pos_end % (slotset_width * d1))); + ceph_assert(0 == (l1_pos_start % (slotset_width * d1))); + ceph_assert(0 == (l1_pos_end % (slotset_width * d1))); if (min_length != l0_granularity) { // probably not the most effecient way but // don't care much about that at the moment @@ -467,9 +467,9 @@ bool AllocatorLevel01Loose::_allocate_l1(uint64_t length, continue; } auto free_pos = find_next_set_bit(slot_val, 0); - assert(free_pos < bits_per_slot); + ceph_assert(free_pos < bits_per_slot); do { - assert(length > *allocated); + ceph_assert(length > *allocated); bool empty; empty = _allocate_l0(length, max_length, diff --git a/src/os/bluestore/fastbmap_allocator_impl.h b/src/os/bluestore/fastbmap_allocator_impl.h index d53d031a52e6f..88fbf584ccfd1 100755 --- a/src/os/bluestore/fastbmap_allocator_impl.h +++ b/src/os/bluestore/fastbmap_allocator_impl.h @@ -192,11 +192,11 @@ class AllocatorLevel01Loose : public AllocatorLevel01 ++l0_dives; - assert(l0_pos0 < l0_pos1); - assert(length > *allocated); - assert(0 == (l0_pos0 % (slotset_width * d0))); - assert(0 == (l0_pos1 % (slotset_width * d0))); - assert(((length - *allocated) % l0_granularity) == 0); + ceph_assert(l0_pos0 < l0_pos1); + ceph_assert(length > *allocated); + ceph_assert(0 == (l0_pos0 % (slotset_width * d0))); + ceph_assert(0 == (l0_pos1 % (slotset_width * d0))); + ceph_assert(((length - *allocated) % l0_granularity) == 0); uint64_t need_entries = (length - *allocated) / l0_granularity; @@ -225,7 +225,7 @@ class AllocatorLevel01Loose : public AllocatorLevel01 } auto free_pos = find_next_set_bit(slot_val, 0); - assert(free_pos < bits_per_slot); + ceph_assert(free_pos < bits_per_slot); auto next_pos = free_pos + 1; while (next_pos < bits_per_slot && (next_pos - free_pos) < need_entries) { @@ -368,8 +368,8 @@ protected: { bool no_free = true; uint64_t d = slotset_width * CHILD_PER_SLOT_L0; - assert(0 == (l0_pos % d)); - assert(0 == (l0_pos_end % d)); + ceph_assert(0 == (l0_pos % d)); + ceph_assert(0 == (l0_pos_end % d)); auto idx = l0_pos / CHILD_PER_SLOT_L0; auto idx_end = l0_pos_end / CHILD_PER_SLOT_L0; @@ -383,8 +383,8 @@ protected: { bool no_free = true; uint64_t d = slotset_width * _children_per_slot(); - assert(0 == (l1_pos % d)); - assert(0 == (l1_pos_end % d)); + ceph_assert(0 == (l1_pos % d)); + ceph_assert(0 == (l1_pos_end % d)); auto idx = l1_pos / CHILD_PER_SLOT; auto idx_end = l1_pos_end / CHILD_PER_SLOT; @@ -433,8 +433,8 @@ public: uint64_t debug_get_free(uint64_t l1_pos0 = 0, uint64_t l1_pos1 = 0) { - assert(0 == (l1_pos0 % CHILD_PER_SLOT)); - assert(0 == (l1_pos1 % CHILD_PER_SLOT)); + ceph_assert(0 == (l1_pos0 % CHILD_PER_SLOT)); + ceph_assert(0 == (l1_pos1 % CHILD_PER_SLOT)); auto idx0 = l1_pos0 * slotset_width; auto idx1 = l1_pos1 * slotset_width; @@ -525,7 +525,7 @@ protected: void _init(uint64_t capacity, uint64_t _alloc_unit, bool mark_as_free = true) { - assert(isp2(_alloc_unit)); + ceph_assert(isp2(_alloc_unit)); l1._init(capacity, _alloc_unit, mark_as_free); l2_granularity = @@ -552,8 +552,8 @@ protected: void _mark_l2_allocated(int64_t l2_pos, int64_t l2_pos_end) { auto d = CHILD_PER_SLOT; - assert(0 <= l2_pos_end); - assert((int64_t)l2.size() >= (l2_pos_end / d)); + ceph_assert(0 <= l2_pos_end); + ceph_assert((int64_t)l2.size() >= (l2_pos_end / d)); while (l2_pos < l2_pos_end) { l2[l2_pos / d] &= ~(slot_t(1) << (l2_pos % d)); @@ -564,8 +564,8 @@ protected: void _mark_l2_free(int64_t l2_pos, int64_t l2_pos_end) { auto d = CHILD_PER_SLOT; - assert(0 <= l2_pos_end); - assert((int64_t)l2.size() >= (l2_pos_end / d)); + ceph_assert(0 <= l2_pos_end); + ceph_assert((int64_t)l2.size() >= (l2_pos_end / d)); while (l2_pos < l2_pos_end) { l2[l2_pos / d] |= (slot_t(1) << (l2_pos % d)); @@ -576,8 +576,8 @@ protected: void _mark_l2_on_l1(int64_t l2_pos, int64_t l2_pos_end) { auto d = CHILD_PER_SLOT; - assert(0 <= l2_pos_end); - assert((int64_t)l2.size() >= (l2_pos_end / d)); + ceph_assert(0 <= l2_pos_end); + ceph_assert((int64_t)l2.size() >= (l2_pos_end / d)); auto idx = l2_pos * slotset_width; auto idx_end = l2_pos_end * slotset_width; @@ -613,12 +613,12 @@ protected: { uint64_t prev_allocated = *allocated; uint64_t d = CHILD_PER_SLOT; - assert(isp2(min_length)); - assert(min_length <= l2_granularity); - assert(max_length == 0 || max_length >= min_length); - assert(max_length == 0 || (max_length % min_length) == 0); - assert(length >= min_length); - assert((length % min_length) == 0); + ceph_assert(isp2(min_length)); + ceph_assert(min_length <= l2_granularity); + ceph_assert(max_length == 0 || max_length >= min_length); + ceph_assert(max_length == 0 || (max_length % min_length) == 0); + ceph_assert(length >= min_length); + ceph_assert((length % min_length) == 0); uint64_t l1_w = slotset_width * l1._children_per_slot(); @@ -651,10 +651,10 @@ protected: all_set = true; } else { free_pos = find_next_set_bit(slot_val, 0); - assert(free_pos < bits_per_slot); + ceph_assert(free_pos < bits_per_slot); } do { - assert(length > *allocated); + ceph_assert(length > *allocated); bool empty = l1._allocate_l1(length, min_length, max_length, @@ -683,7 +683,7 @@ protected: ++l2_allocs; auto allocated_here = *allocated - prev_allocated; - assert(available >= allocated_here); + ceph_assert(available >= allocated_here); available -= allocated_here; } @@ -726,7 +726,7 @@ protected: std::lock_guard l(lock); auto allocated = l1._mark_alloc_l1(o, len); - assert(available >= allocated); + ceph_assert(available >= allocated); available -= allocated; _mark_l2_on_l1(l2_pos, l2_pos_end); } diff --git a/src/os/filestore/DBObjectMap.cc b/src/os/filestore/DBObjectMap.cc index 3bb2cb538b434..eee34c90f2a55 100644 --- a/src/os/filestore/DBObjectMap.cc +++ b/src/os/filestore/DBObjectMap.cc @@ -277,7 +277,7 @@ int DBObjectMap::DBObjectMapIteratorImpl::init() if (ready) { return 0; } - assert(!parent_iter); + ceph_assert(!parent_iter); if (header->parent) { Header parent = map->lookup_parent(header); if (!parent) { @@ -287,11 +287,11 @@ int DBObjectMap::DBObjectMapIteratorImpl::init() parent_iter = std::make_shared(map, parent); } key_iter = map->db->get_iterator(map->user_prefix(header)); - assert(key_iter); + ceph_assert(key_iter); complete_iter = map->db->get_iterator(map->complete_prefix(header)); - assert(complete_iter); + ceph_assert(complete_iter); cur_iter = key_iter; - assert(cur_iter); + ceph_assert(cur_iter); ready = true; return 0; } @@ -390,7 +390,7 @@ int DBObjectMap::DBObjectMapIteratorImpl::upper_bound(const string &after) bool DBObjectMap::DBObjectMapIteratorImpl::valid() { bool valid = !invalid && ready; - assert(!valid || cur_iter->valid()); + ceph_assert(!valid || cur_iter->valid()); return valid; } @@ -404,8 +404,8 @@ bool DBObjectMap::DBObjectMapIteratorImpl::valid_parent() int DBObjectMap::DBObjectMapIteratorImpl::next(bool validate) { - assert(cur_iter->valid()); - assert(valid()); + ceph_assert(cur_iter->valid()); + ceph_assert(valid()); cur_iter->next(); return adjust(); } @@ -416,7 +416,7 @@ int DBObjectMap::DBObjectMapIteratorImpl::next_parent() if (r < 0) return r; while (parent_iter && parent_iter->valid() && !on_parent()) { - assert(valid()); + ceph_assert(valid()); r = lower_bound(parent_iter->key()); if (r < 0) return r; @@ -448,8 +448,8 @@ int DBObjectMap::DBObjectMapIteratorImpl::in_complete_region(const string &to_te return false; } - assert(complete_iter->key() <= to_test); - assert(complete_iter->value().length() >= 1); + ceph_assert(complete_iter->key() <= to_test); + ceph_assert(complete_iter->value().length() >= 1); string _end(complete_iter->value().c_str(), complete_iter->value().length() - 1); if (_end.empty() || _end > to_test) { @@ -460,7 +460,7 @@ int DBObjectMap::DBObjectMapIteratorImpl::in_complete_region(const string &to_te return true; } else { complete_iter->next(); - assert(!complete_iter->valid() || complete_iter->key() > to_test); + ceph_assert(!complete_iter->valid() || complete_iter->key() > to_test); return false; } } @@ -494,7 +494,7 @@ int DBObjectMap::DBObjectMapIteratorImpl::adjust() } else { invalid = true; } - assert(invalid || cur_iter->valid()); + ceph_assert(invalid || cur_iter->valid()); return 0; } @@ -600,7 +600,7 @@ int DBObjectMap::clear(const ghobject_t &oid, if (check_spos(oid, header, spos)) return 0; remove_map_header(hl, oid, header, t); - assert(header->num_children > 0); + ceph_assert(header->num_children > 0); header->num_children--; int r = _clear(header, t); if (r < 0) @@ -623,7 +623,7 @@ int DBObjectMap::_clear(Header header, if (!parent) { return -EINVAL; } - assert(parent->num_children > 0); + ceph_assert(parent->num_children > 0); parent->num_children--; header.swap(parent); } @@ -658,7 +658,7 @@ int DBObjectMap::rm_keys(const ghobject_t &oid, return db->submit_transaction(t); } - assert(state.legacy); + ceph_assert(state.legacy); { // We only get here for legacy (v2) stores @@ -711,7 +711,7 @@ int DBObjectMap::clear_keys_header(const ghobject_t &oid, // remove current header remove_map_header(hl, oid, header, t); - assert(header->num_children > 0); + ceph_assert(header->num_children > 0); header->num_children--; int r = _clear(header, t); if (r < 0) @@ -1044,7 +1044,7 @@ void DBObjectMap::set_state() KeyValueDB::Transaction t = db->get_transaction(); write_state(t); int ret = db->submit_transaction_sync(t); - assert(ret == 0); + ceph_assert(ret == 0); dout(1) << __func__ << " done" << dendl; return; } @@ -1106,7 +1106,7 @@ int DBObjectMap::sync(const ghobject_t *oid, const SequencerPosition *spos) { KeyValueDB::Transaction t = db->get_transaction(); if (oid) { - assert(spos); + ceph_assert(spos); MapHeaderLock hl(this, *oid); Header header = lookup_map_header(hl, *oid); if (header) { @@ -1133,7 +1133,7 @@ int DBObjectMap::sync(const ghobject_t *oid, } int DBObjectMap::write_state(KeyValueDB::Transaction _t) { - assert(header_lock.is_locked_by_me()); + ceph_assert(header_lock.is_locked_by_me()); dout(20) << "dbobjectmap: seq is " << state.seq << dendl; KeyValueDB::Transaction t = _t ? _t : db->get_transaction(); bufferlist bl; @@ -1149,13 +1149,13 @@ DBObjectMap::Header DBObjectMap::_lookup_map_header( const MapHeaderLock &l, const ghobject_t &oid) { - assert(l.get_locked() == oid); + ceph_assert(l.get_locked() == oid); _Header *header = new _Header(); { Mutex::Locker l(cache_lock); if (caches.lookup(oid, header)) { - assert(!in_use.count(header->seq)); + ceph_assert(!in_use.count(header->seq)); in_use.insert(header->seq); return Header(header, RemoveOnDelete(this)); } @@ -1176,7 +1176,7 @@ DBObjectMap::Header DBObjectMap::_lookup_map_header( caches.add(oid, *ret); } - assert(!in_use.count(header->seq)); + ceph_assert(!in_use.count(header->seq)); in_use.insert(header->seq); return ret; } @@ -1192,7 +1192,7 @@ DBObjectMap::Header DBObjectMap::_generate_new_header(const ghobject_t &oid, } header->num_children = 1; header->oid = oid; - assert(!in_use.count(header->seq)); + ceph_assert(!in_use.count(header->seq)); in_use.insert(header->seq); write_state(); @@ -1223,7 +1223,7 @@ DBObjectMap::Header DBObjectMap::lookup_parent(Header input) Header header = Header(new _Header(), RemoveOnDelete(this)); auto iter = out.begin()->second.cbegin(); header->decode(iter); - assert(header->seq == input->parent); + ceph_assert(header->seq == input->parent); dout(20) << "lookup_parent: parent seq is " << header->seq << " with parent " << header->parent << dendl; in_use.insert(header->seq); @@ -1271,7 +1271,7 @@ void DBObjectMap::remove_map_header( Header header, KeyValueDB::Transaction t) { - assert(l.get_locked() == oid); + ceph_assert(l.get_locked() == oid); dout(20) << "remove_map_header: removing " << header->seq << " oid " << oid << dendl; set to_remove; @@ -1288,7 +1288,7 @@ void DBObjectMap::set_map_header( const ghobject_t &oid, _Header header, KeyValueDB::Transaction t) { - assert(l.get_locked() == oid); + ceph_assert(l.get_locked() == oid); dout(20) << "set_map_header: setting " << header.seq << " oid " << oid << " parent seq " << header.parent << dendl; diff --git a/src/os/filestore/DBObjectMap.h b/src/os/filestore/DBObjectMap.h index 0a61322cd6dc7..badfbb7ab14ca 100644 --- a/src/os/filestore/DBObjectMap.h +++ b/src/os/filestore/DBObjectMap.h @@ -93,12 +93,12 @@ public: } const ghobject_t &get_locked() const { - assert(locked); + ceph_assert(locked); return *locked; } void swap(MapHeaderLock &o) { - assert(db == o.db); + ceph_assert(db == o.db); // centos6's boost optional doesn't seem to have swap :( boost::optional _locked = o.locked; @@ -109,7 +109,7 @@ public: ~MapHeaderLock() { if (locked) { Mutex::Locker l(db->header_lock); - assert(db->map_header_in_use.count(*locked)); + ceph_assert(db->map_header_in_use.count(*locked)); db->map_header_cond.Signal(); db->map_header_in_use.erase(*locked); } @@ -236,7 +236,7 @@ public: int sync(const ghobject_t *oid=0, const SequencerPosition *spos=0) override; void compact() override { - assert(db); + ceph_assert(db); db->compact(); } @@ -566,7 +566,7 @@ private: db(db) {} void operator() (_Header *header) { Mutex::Locker l(db->header_lock); - assert(db->in_use.count(header->seq)); + ceph_assert(db->in_use.count(header->seq)); db->in_use.erase(header->seq); db->header_cond.Signal(); delete header; diff --git a/src/os/filestore/FDCache.h b/src/os/filestore/FDCache.h index db4eac55d9a6e..ee8c4fb0d5769 100644 --- a/src/os/filestore/FDCache.h +++ b/src/os/filestore/FDCache.h @@ -40,7 +40,7 @@ public: public: const int fd; explicit FD(int _fd) : fd(_fd) { - assert(_fd >= 0); + ceph_assert(_fd >= 0); } int operator*() const { return fd; @@ -58,7 +58,7 @@ private: public: explicit FDCache(CephContext *cct) : cct(cct), registry_shards(std::max(cct->_conf->filestore_fd_cache_shards, 1)) { - assert(cct); + ceph_assert(cct); cct->_conf.add_observer(this); registry = new SharedLRU[registry_shards]; for (int i = 0; i < registry_shards; ++i) { diff --git a/src/os/filestore/FileJournal.cc b/src/os/filestore/FileJournal.cc index 5a2d6c72f1c13..3006cfc28ca62 100644 --- a/src/os/filestore/FileJournal.cc +++ b/src/os/filestore/FileJournal.cc @@ -250,7 +250,7 @@ int FileJournal::check() { int ret; - assert(fd == -1); + ceph_assert(fd == -1); ret = _open(false, false); if (ret) return ret; @@ -361,7 +361,7 @@ done: // This can not be used on an active journal int FileJournal::peek_fsid(uuid_d& fsid) { - assert(fd == -1); + ceph_assert(fd == -1); int r = _open(false, false); if (r) return r; @@ -490,9 +490,9 @@ void FileJournal::close() stop_writer(); // close - assert(writeq_empty()); - assert(!must_write_header); - assert(fd >= 0); + ceph_assert(writeq_empty()); + ceph_assert(!must_write_header); + ceph_assert(fd >= 0); _close(fd); fd = -1; } @@ -520,7 +520,7 @@ int FileJournal::_fdump(Formatter &f, bool simple) { dout(10) << "_fdump" << dendl; - assert(fd == -1); + ceph_assert(fd == -1); int err = _open(false, false); if (err) return err; @@ -812,9 +812,9 @@ int FileJournal::prepare_multi_write(bufferlist& bl, uint64_t& orig_ops, uint64_ #ifdef HAVE_LIBAIO { Mutex::Locker locker(aio_lock); - assert(aio_write_queue_ops > 0); + ceph_assert(aio_write_queue_ops > 0); aio_write_queue_ops--; - assert(aio_write_queue_bytes >= bytes); + ceph_assert(aio_write_queue_bytes >= bytes); aio_write_queue_bytes -= bytes; } #else @@ -867,7 +867,7 @@ int FileJournal::prepare_multi_write(bufferlist& bl, uint64_t& orig_ops, uint64_ out: dout(20) << "prepare_multi_write queue_pos now " << queue_pos << dendl; - assert((write_pos + bl.length() == queue_pos) || + ceph_assert((write_pos + bl.length() == queue_pos) || (write_pos + bl.length() - header.max_size + get_top() == queue_pos)); return 0; } @@ -893,7 +893,7 @@ void FileJournal::queue_write_fin(uint64_t seq, Context *fin) void FileJournal::queue_completions_thru(uint64_t seq) { - assert(finisher_lock.is_locked()); + ceph_assert(finisher_lock.is_locked()); utime_t now = ceph_clock_now(); list items; batch_pop_completions(items); @@ -981,9 +981,9 @@ void FileJournal::check_align(off64_t pos, bufferlist& bl) { // make sure list segments are page aligned if (directio && !bl.is_aligned_size_and_memory(block_size, CEPH_DIRECTIO_ALIGNMENT)) { - assert((bl.length() & (CEPH_DIRECTIO_ALIGNMENT - 1)) == 0); - assert((pos & (CEPH_DIRECTIO_ALIGNMENT - 1)) == 0); - assert(0 == "bl was not aligned"); + ceph_assert((bl.length() & (CEPH_DIRECTIO_ALIGNMENT - 1)) == 0); + ceph_assert((pos & (CEPH_DIRECTIO_ALIGNMENT - 1)) == 0); + ceph_assert(0 == "bl was not aligned"); } } @@ -1049,7 +1049,7 @@ void FileJournal::do_write(bufferlist& bl) split = header.max_size - pos; first.substr_of(bl, 0, split); second.substr_of(bl, split, bl.length() - split); - assert(first.length() + second.length() == bl.length()); + ceph_assert(first.length() + second.length() == bl.length()); dout(10) << "do_write wrapping, first bit at " << pos << " len " << first.length() << " second bit len " << second.length() << " (orig len " << bl.length() << ")" << dendl; @@ -1083,7 +1083,7 @@ void FileJournal::do_write(bufferlist& bl) check_align(first_pos, first); ceph_abort(); } - assert(first_pos == get_top()); + ceph_assert(first_pos == get_top()); } else { // header too? if (hbp.length()) { @@ -1143,8 +1143,8 @@ void FileJournal::do_write(bufferlist& bl) write_lock.Lock(); - assert(write_pos == pos); - assert(write_pos % header.alignment == 0); + ceph_assert(write_pos == pos); + ceph_assert(write_pos % header.alignment == 0); { Mutex::Locker locker(finisher_lock); @@ -1253,7 +1253,7 @@ void FileJournal::write_thread_entry() continue; } } - assert(r == 0); + ceph_assert(r == 0); if (logger) { logger->inc(l_filestore_journal_wr); @@ -1308,7 +1308,7 @@ void FileJournal::do_aio_write(bufferlist& bl) split = header.max_size - pos; first.substr_of(bl, 0, split); second.substr_of(bl, split, bl.length() - split); - assert(first.length() + second.length() == bl.length()); + ceph_assert(first.length() + second.length() == bl.length()); dout(10) << "do_aio_write wrapping, first bit at " << pos << "~" << first.length() << dendl; if (write_aio_bl(pos, first, 0)) { @@ -1316,7 +1316,7 @@ void FileJournal::do_aio_write(bufferlist& bl) << ") failed" << dendl; ceph_abort(); } - assert(pos == header.max_size); + ceph_assert(pos == header.max_size); if (hbp.length()) { // be sneaky: include the header in the second fragment bufferlist tmp; @@ -1353,7 +1353,7 @@ void FileJournal::do_aio_write(bufferlist& bl) write_pos = pos; if (write_pos == header.max_size) write_pos = get_top(); - assert(write_pos % header.alignment == 0); + ceph_assert(write_pos % header.alignment == 0); } /** @@ -1374,7 +1374,7 @@ int FileJournal::write_aio_bl(off64_t& pos, bufferlist& bl, uint64_t seq) for (std::list::const_iterator p = bl.buffers().begin(); n < max; ++p, ++n) { - assert(p != bl.buffers().end()); + ceph_assert(p != bl.buffers().end()); iov[n].iov_base = (void *)p->c_str(); iov[n].iov_len = p->length(); len += p->length(); @@ -1421,7 +1421,7 @@ int FileJournal::write_aio_bl(off64_t& pos, bufferlist& bl, uint64_t seq) continue; } check_align(pos, tbl); - assert(0 == "io_submit got unexpected error"); + ceph_assert(0 == "io_submit got unexpected error"); } else { break; } @@ -1460,7 +1460,7 @@ void FileJournal::write_finish_thread_entry() continue; } derr << "io_getevents got " << cpp_strerror(r) << dendl; - assert(0 == "got unexpected error from io_getevents"); + ceph_assert(0 == "got unexpected error from io_getevents"); } { @@ -1470,7 +1470,7 @@ void FileJournal::write_finish_thread_entry() if (event[i].res != ai->len) { derr << "aio to " << ai->off << "~" << ai->len << " returned: " << (int)event[i].res << dendl; - assert(0 == "unexpected aio error"); + ceph_assert(0 == "unexpected aio error"); } dout(10) << __func__ << " aio " << ai->off << "~" << ai->len << " done" << dendl; @@ -1489,7 +1489,7 @@ void FileJournal::write_finish_thread_entry() */ void FileJournal::check_aio_completion() { - assert(aio_lock.is_locked()); + ceph_assert(aio_lock.is_locked()); dout(20) << "check_aio_completion" << dendl; bool completed_something = false, signal = false; @@ -1593,8 +1593,8 @@ void FileJournal::submit_entry(uint64_t seq, bufferlist& e, uint32_t orig_len, dout(5) << "submit_entry seq " << seq << " len " << e.length() << " (" << oncommit << ")" << dendl; - assert(e.length() > 0); - assert(e.length() < header.max_size); + ceph_assert(e.length() > 0); + ceph_assert(e.length() < header.max_size); if (logger) { logger->inc(l_filestore_journal_queue_bytes, orig_len); @@ -1647,14 +1647,14 @@ bool FileJournal::writeq_empty() FileJournal::write_item &FileJournal::peek_write() { - assert(write_lock.is_locked()); + ceph_assert(write_lock.is_locked()); Mutex::Locker locker(writeq_lock); return writeq.front(); } void FileJournal::pop_write() { - assert(write_lock.is_locked()); + ceph_assert(write_lock.is_locked()); Mutex::Locker locker(writeq_lock); if (logger) { logger->dec(l_filestore_journal_queue_bytes, writeq.front().orig_len); @@ -1665,7 +1665,7 @@ void FileJournal::pop_write() void FileJournal::batch_pop_write(list &items) { - assert(write_lock.is_locked()); + ceph_assert(write_lock.is_locked()); { Mutex::Locker locker(writeq_lock); writeq.swap(items); @@ -1680,7 +1680,7 @@ void FileJournal::batch_pop_write(list &items) void FileJournal::batch_unpop_write(list &items) { - assert(write_lock.is_locked()); + ceph_assert(write_lock.is_locked()); for (auto &&i : items) { if (logger) { logger->inc(l_filestore_journal_queue_bytes, i.orig_len); @@ -1734,7 +1734,7 @@ void FileJournal::do_discard(int64_t offset, int64_t end) if (offset >= end) return; end = round_up_to(end - block_size, block_size); - assert(end >= offset); + ceph_assert(end >= offset); if (offset < end) if (block_device_discard(fd, offset, end - offset) < 0) dout(1) << __func__ << " ioctl(BLKDISCARD) error:" << cpp_strerror(errno) << dendl; @@ -1752,7 +1752,7 @@ void FileJournal::committed_thru(uint64_t seq) if (seq < last_committed_seq) { dout(5) << "committed_thru " << seq << " < last_committed_seq " << last_committed_seq << dendl; - assert(seq >= last_committed_seq); + ceph_assert(seq >= last_committed_seq); return; } if (seq == last_committed_seq) { @@ -1895,7 +1895,7 @@ void FileJournal::wrap_read_bl( len = olen; // rest int64_t actual = ::lseek64(fd, pos, SEEK_SET); - assert(actual == pos); + ceph_assert(actual == pos); bufferptr bp = buffer::create(len); int r = safe_read_exact(fd, bp.c_str(), len); @@ -2063,7 +2063,7 @@ FileJournal::read_entry_result FileJournal::do_read_entry( if (_h) *_h = *h; - assert(cur_pos % header.alignment == 0); + ceph_assert(cur_pos % header.alignment == 0); return SUCCESS; } @@ -2112,18 +2112,18 @@ void FileJournal::corrupt( corrupt_at = corrupt_at + get_top() - header.max_size; int64_t actual = ::lseek64(fd, corrupt_at, SEEK_SET); - assert(actual == corrupt_at); + ceph_assert(actual == corrupt_at); char buf[10]; int r = safe_read_exact(fd, buf, 1); - assert(r == 0); + ceph_assert(r == 0); actual = ::lseek64(wfd, corrupt_at, SEEK_SET); - assert(actual == corrupt_at); + ceph_assert(actual == corrupt_at); buf[0]++; r = safe_write(wfd, buf, 1); - assert(r == 0); + ceph_assert(r == 0); } void FileJournal::corrupt_payload( diff --git a/src/os/filestore/FileJournal.h b/src/os/filestore/FileJournal.h index 9502ea299b01e..81d75d8358827 100644 --- a/src/os/filestore/FileJournal.h +++ b/src/os/filestore/FileJournal.h @@ -98,12 +98,12 @@ public: } completion_item completion_peek_front() { Mutex::Locker l(completions_lock); - assert(!completions.empty()); + ceph_assert(!completions.empty()); return completions.front(); } void completion_pop_front() { Mutex::Locker l(completions_lock); - assert(!completions.empty()); + ceph_assert(!completions.empty()); completions.pop_front(); } @@ -446,7 +446,7 @@ private: cct->_conf.add_observer(this); } ~FileJournal() override { - assert(fd == -1); + ceph_assert(fd == -1); delete[] zero_buf; cct->_conf.remove_observer(this); } diff --git a/src/os/filestore/FileStore.cc b/src/os/filestore/FileStore.cc index 9485b1740adbf..3755e4807ffba 100644 --- a/src/os/filestore/FileStore.cc +++ b/src/os/filestore/FileStore.cc @@ -169,7 +169,7 @@ int FileStore::get_cdir(const coll_t& cid, char *s, int len) int FileStore::get_index(const coll_t& cid, Index *index) { int r = index_manager.get_index(cid, basedir, index); - assert(!m_filestore_fail_eio || r != -EIO); + ceph_assert(!m_filestore_fail_eio || r != -EIO); return r; } @@ -178,7 +178,7 @@ int FileStore::init_index(const coll_t& cid) char path[PATH_MAX]; get_cdir(cid, path, sizeof(path)); int r = index_manager.init_index(cid, path, target_version); - assert(!m_filestore_fail_eio || r != -EIO); + ceph_assert(!m_filestore_fail_eio || r != -EIO); return r; } @@ -188,10 +188,10 @@ int FileStore::lfn_find(const ghobject_t& oid, const Index& index, IndexedPath * if (!path) path = &path2; int r, exist; - assert(index.index); + ceph_assert(index.index); r = (index.index)->lookup(oid, path, &exist); if (r < 0) { - assert(!m_filestore_fail_eio || r != -EIO); + ceph_assert(!m_filestore_fail_eio || r != -EIO); return r; } if (!exist) @@ -210,10 +210,10 @@ int FileStore::lfn_truncate(const coll_t& cid, const ghobject_t& oid, off_t leng r = -errno; if (r >= 0 && m_filestore_sloppy_crc) { int rc = backend->_crc_update_truncate(**fd, length); - assert(rc >= 0); + ceph_assert(rc >= 0); } lfn_close(fd); - assert(!m_filestore_fail_eio || r != -EIO); + ceph_assert(!m_filestore_fail_eio || r != -EIO); return r; } @@ -225,7 +225,7 @@ int FileStore::lfn_stat(const coll_t& cid, const ghobject_t& oid, struct stat *b if (r < 0) return r; - assert(index.index); + ceph_assert(index.index); RWLock::RLocker l((index.index)->access_lock); r = lfn_find(oid, index, &path); @@ -243,7 +243,7 @@ int FileStore::lfn_open(const coll_t& cid, FDRef *outfd, Index *index) { - assert(outfd); + ceph_assert(outfd); int r = 0; bool need_lock = true; int flags = O_RDWR; @@ -269,7 +269,7 @@ int FileStore::lfn_open(const coll_t& cid, } int fd, exist; - assert((*index).index); + ceph_assert((*index).index); if (need_lock) { ((*index).index)->access_lock.get_write(); } @@ -343,7 +343,7 @@ int FileStore::lfn_open(const coll_t& cid, ((*index).index)->access_lock.put_write(); } - assert(!m_filestore_fail_eio || r != -EIO); + ceph_assert(!m_filestore_fail_eio || r != -EIO); return r; } @@ -380,8 +380,8 @@ int FileStore::lfn_link(const coll_t& c, const coll_t& newcid, const ghobject_t& return r; } - assert(index_old.index); - assert(index_new.index); + ceph_assert(index_old.index); + ceph_assert(index_new.index); if (!index_same) { @@ -389,7 +389,7 @@ int FileStore::lfn_link(const coll_t& c, const coll_t& newcid, const ghobject_t& r = index_old->lookup(o, &path_old, &exist); if (r < 0) { - assert(!m_filestore_fail_eio || r != -EIO); + ceph_assert(!m_filestore_fail_eio || r != -EIO); return r; } if (!exist) @@ -399,7 +399,7 @@ int FileStore::lfn_link(const coll_t& c, const coll_t& newcid, const ghobject_t& r = index_new->lookup(newoid, &path_new, &exist); if (r < 0) { - assert(!m_filestore_fail_eio || r != -EIO); + ceph_assert(!m_filestore_fail_eio || r != -EIO); return r; } if (exist) @@ -413,7 +413,7 @@ int FileStore::lfn_link(const coll_t& c, const coll_t& newcid, const ghobject_t& r = index_new->created(newoid, path_new->path()); if (r < 0) { - assert(!m_filestore_fail_eio || r != -EIO); + ceph_assert(!m_filestore_fail_eio || r != -EIO); return r; } } else { @@ -421,7 +421,7 @@ int FileStore::lfn_link(const coll_t& c, const coll_t& newcid, const ghobject_t& r = index_old->lookup(o, &path_old, &exist); if (r < 0) { - assert(!m_filestore_fail_eio || r != -EIO); + ceph_assert(!m_filestore_fail_eio || r != -EIO); return r; } if (!exist) @@ -429,7 +429,7 @@ int FileStore::lfn_link(const coll_t& c, const coll_t& newcid, const ghobject_t& r = index_new->lookup(newoid, &path_new, &exist); if (r < 0) { - assert(!m_filestore_fail_eio || r != -EIO); + ceph_assert(!m_filestore_fail_eio || r != -EIO); return r; } if (exist) @@ -446,7 +446,7 @@ int FileStore::lfn_link(const coll_t& c, const coll_t& newcid, const ghobject_t& r = index_new->created(newoid, path_new->path()); if (r < 0) { - assert(!m_filestore_fail_eio || r != -EIO); + ceph_assert(!m_filestore_fail_eio || r != -EIO); return r; } } @@ -464,7 +464,7 @@ int FileStore::lfn_unlink(const coll_t& cid, const ghobject_t& o, return r; } - assert(index.index); + ceph_assert(index.index); RWLock::WLocker l((index.index)->access_lock); { @@ -472,7 +472,7 @@ int FileStore::lfn_unlink(const coll_t& cid, const ghobject_t& o, int hardlink; r = index->lookup(o, &path, &hardlink); if (r < 0) { - assert(!m_filestore_fail_eio || r != -EIO); + ceph_assert(!m_filestore_fail_eio || r != -EIO); return r; } @@ -487,7 +487,7 @@ int FileStore::lfn_unlink(const coll_t& cid, const ghobject_t& o, r = object_map->clear(o, &spos); if (r < 0 && r != -ENOENT) { dout(25) << __FUNC__ << ": omap clear failed " << cpp_strerror(r) << dendl; - assert(!m_filestore_fail_eio || r != -EIO); + ceph_assert(!m_filestore_fail_eio || r != -EIO); return r; } if (cct->_conf->filestore_debug_inject_read_err) { @@ -739,8 +739,8 @@ int FileStore::statfs(struct store_statfs_t *buf0) buf0->reset(); if (::statfs(basedir.c_str(), &buf) < 0) { int r = -errno; - assert(!m_filestore_fail_eio || r != -EIO); - assert(r != -ENOENT); + ceph_assert(!m_filestore_fail_eio || r != -EIO); + ceph_assert(r != -ENOENT); return r; } @@ -833,7 +833,7 @@ void FileStore::create_backend(unsigned long f_type) { m_fs_type = f_type; - assert(!backend); + ceph_assert(!backend); backend = FileStoreBackend::create(f_type, this); dout(0) << "backend " << backend->get_name() @@ -991,7 +991,7 @@ int FileStore::mkfs() if (backend->can_checkpoint()) { // create snap_1 too current_fd = ::open(current_fn.c_str(), O_RDONLY); - assert(current_fd >= 0); + ceph_assert(current_fd >= 0); char s[NAME_MAX]; snprintf(s, sizeof(s), COMMIT_SNAP_ITEM, 1ull); ret = backend->create_checkpoint(s, nullptr); @@ -1023,7 +1023,7 @@ int FileStore::mkfs() } if (read_fsid(omap_fsid_fd, &old_omap_fsid) < 0 || old_omap_fsid.is_zero()) { - assert(!fsid.is_zero()); + ceph_assert(!fsid.is_zero()); fsid.print(fsid_str); strcat(fsid_str, "\n"); ret = ::ftruncate(omap_fsid_fd, 0); @@ -1454,7 +1454,7 @@ int FileStore::read_op_seq(uint64_t *seq) int op_fd = ::open(current_op_seq_fn.c_str(), O_CREAT|O_RDWR, 0644); if (op_fd < 0) { int r = -errno; - assert(!m_filestore_fail_eio || r != -EIO); + ceph_assert(!m_filestore_fail_eio || r != -EIO); return r; } char s[40]; @@ -1463,7 +1463,7 @@ int FileStore::read_op_seq(uint64_t *seq) if (ret < 0) { derr << __FUNC__ << ": error reading " << current_op_seq_fn << ": " << cpp_strerror(ret) << dendl; VOID_TEMP_FAILURE_RETRY(::close(op_fd)); - assert(!m_filestore_fail_eio || ret != -EIO); + ceph_assert(!m_filestore_fail_eio || ret != -EIO); return ret; } *seq = atoll(s); @@ -1477,7 +1477,7 @@ int FileStore::write_op_seq(int fd, uint64_t seq) int ret = TEMP_FAILURE_RETRY(::pwrite(fd, s, strlen(s), 0)); if (ret < 0) { ret = -errno; - assert(!m_filestore_fail_eio || ret != -EIO); + ceph_assert(!m_filestore_fail_eio || ret != -EIO); } return ret; } @@ -1599,7 +1599,7 @@ int FileStore::mount() char clustersnap[NAME_MAX]; for (list::iterator it = ls.begin(); it != ls.end(); ++it) { if (sscanf(it->c_str(), COMMIT_SNAP_ITEM, &c) == 1) { - assert(c > prev); + ceph_assert(c > prev); prev = c; snaps.push_back(c); } else if (sscanf(it->c_str(), CLUSTER_SNAP_ITEM, clustersnap) == 1) @@ -1629,7 +1629,7 @@ int FileStore::mount() << " ** NOTE: rolling back to cluster snapshot " << m_osd_rollback_to_cluster_snap << " **" << TEXT_NORMAL << dendl; - assert(cluster_snaps.count(m_osd_rollback_to_cluster_snap)); + ceph_assert(cluster_snaps.count(m_osd_rollback_to_cluster_snap)); snprintf(s, sizeof(s), CLUSTER_SNAP_ITEM, m_osd_rollback_to_cluster_snap.c_str()); } else { { @@ -1687,7 +1687,7 @@ int FileStore::mount() goto close_basedir_fd; } - assert(current_fd >= 0); + ceph_assert(current_fd >= 0); op_fd = read_op_seq(&initial_op_seq); if (op_fd < 0) { @@ -1864,7 +1864,7 @@ int FileStore::mount() << " with error: " << ret << dendl; goto close_current_fd; } - assert(index.index); + ceph_assert(index.index); RWLock::WLocker l((index.index)->access_lock); index->cleanup(); @@ -1949,7 +1949,7 @@ close_fsid_fd: VOID_TEMP_FAILURE_RETRY(::close(fsid_fd)); fsid_fd = -1; done: - assert(!m_filestore_fail_eio || ret != -EIO); + ceph_assert(!m_filestore_fail_eio || ret != -EIO); delete backend; backend = nullptr; object_map.reset(); @@ -1961,7 +1961,7 @@ void FileStore::init_temp_collections() dout(10) << __FUNC__ << dendl; vector ls; int r = list_collections(ls, true); - assert(r >= 0); + ceph_assert(r >= 0); dout(20) << " ls " << ls << dendl; @@ -1985,14 +1985,14 @@ void FileStore::init_temp_collections() } else { dout(10) << __FUNC__ << ": creating " << temp << dendl; r = _create_collection(temp, 0, spos); - assert(r == 0); + ceph_assert(r == 0); } } for (set::iterator p = temps.begin(); p != temps.end(); ++p) { dout(10) << __FUNC__ << ": removing stray " << *p << dendl; r = _collection_remove_recursive(*p, spos); - assert(r == 0); + ceph_assert(r == 0); } } @@ -2351,7 +2351,7 @@ int FileStore::queue_transactions(CollectionHandle& ch, vector& tls return 0; } - assert(journal); + ceph_assert(journal); //prepare and encode transactions data out of lock bufferlist tbl; int orig_len = -1; @@ -2447,12 +2447,12 @@ void FileStore::_set_global_replay_guard(const coll_t& cid, int ret = object_map->sync(); if (ret < 0) { derr << __FUNC__ << ": omap sync error " << cpp_strerror(ret) << dendl; - assert(0 == "_set_global_replay_guard failed"); + ceph_assert(0 == "_set_global_replay_guard failed"); } ret = sync_filesystem(basedir_fd); if (ret < 0) { derr << __FUNC__ << ": sync_filesystem error " << cpp_strerror(ret) << dendl; - assert(0 == "_set_global_replay_guard failed"); + ceph_assert(0 == "_set_global_replay_guard failed"); } char fn[PATH_MAX]; @@ -2461,7 +2461,7 @@ void FileStore::_set_global_replay_guard(const coll_t& cid, if (fd < 0) { int err = errno; derr << __FUNC__ << ": " << cid << " error " << cpp_strerror(err) << dendl; - assert(0 == "_set_global_replay_guard failed"); + ceph_assert(0 == "_set_global_replay_guard failed"); } _inject_failure(); @@ -2474,7 +2474,7 @@ void FileStore::_set_global_replay_guard(const coll_t& cid, if (r < 0) { derr << __FUNC__ << ": fsetxattr " << GLOBAL_REPLAY_GUARD_XATTR << " got " << cpp_strerror(r) << dendl; - assert(0 == "fsetxattr failed"); + ceph_assert(0 == "fsetxattr failed"); } // and make sure our xattr is durable. @@ -2501,7 +2501,7 @@ int FileStore::_check_global_replay_guard(const coll_t& cid, int r = chain_fgetxattr(fd, GLOBAL_REPLAY_GUARD_XATTR, buf, sizeof(buf)); if (r < 0) { dout(20) << __FUNC__ << ": no xattr" << dendl; - assert(!m_filestore_fail_eio || r != -EIO); + ceph_assert(!m_filestore_fail_eio || r != -EIO); VOID_TEMP_FAILURE_RETRY(::close(fd)); return 1; // no xattr } @@ -2527,7 +2527,7 @@ void FileStore::_set_replay_guard(const coll_t& cid, if (fd < 0) { int err = errno; derr << __FUNC__ << ": " << cid << " error " << cpp_strerror(err) << dendl; - assert(0 == "_set_replay_guard failed"); + ceph_assert(0 == "_set_replay_guard failed"); } _set_replay_guard(fd, spos, 0, in_progress); VOID_TEMP_FAILURE_RETRY(::close(fd)); @@ -2566,7 +2566,7 @@ void FileStore::_set_replay_guard(int fd, fd, REPLAY_GUARD_XATTR, v.c_str(), v.length()); if (r < 0) { derr << "fsetxattr " << REPLAY_GUARD_XATTR << " got " << cpp_strerror(r) << dendl; - assert(0 == "fsetxattr failed"); + ceph_assert(0 == "fsetxattr failed"); } // and make sure our xattr is durable. @@ -2586,7 +2586,7 @@ void FileStore::_close_replay_guard(const coll_t& cid, if (fd < 0) { int err = errno; derr << __FUNC__ << ": " << cid << " error " << cpp_strerror(err) << dendl; - assert(0 == "_close_replay_guard failed"); + ceph_assert(0 == "_close_replay_guard failed"); } _close_replay_guard(fd, spos); VOID_TEMP_FAILURE_RETRY(::close(fd)); @@ -2616,7 +2616,7 @@ void FileStore::_close_replay_guard(int fd, const SequencerPosition& spos, fd, REPLAY_GUARD_XATTR, v.c_str(), v.length()); if (r < 0) { derr << "fsetxattr " << REPLAY_GUARD_XATTR << " got " << cpp_strerror(r) << dendl; - assert(0 == "fsetxattr failed"); + ceph_assert(0 == "fsetxattr failed"); } // and make sure our xattr is durable. @@ -2674,7 +2674,7 @@ int FileStore::_check_replay_guard(int fd, const SequencerPosition& spos) int r = chain_fgetxattr(fd, REPLAY_GUARD_XATTR, buf, sizeof(buf)); if (r < 0) { dout(20) << __FUNC__ << ": no xattr" << dendl; - assert(!m_filestore_fail_eio || r != -EIO); + ceph_assert(!m_filestore_fail_eio || r != -EIO); return 1; // no xattr } bufferlist bl; @@ -2978,15 +2978,15 @@ void FileStore::_do_transaction( const coll_t &ncid = i.get_cid(op->dest_cid); const ghobject_t &oid = i.get_oid(op->oid); - assert(oid.hobj.pool >= -1); + ceph_assert(oid.hobj.pool >= -1); // always followed by OP_COLL_REMOVE Transaction::Op *op2 = i.decode_op(); const coll_t &ocid2 = i.get_cid(op2->cid); const ghobject_t &oid2 = i.get_oid(op2->oid); - assert(op2->op == Transaction::OP_COLL_REMOVE); - assert(ocid2 == ocid); - assert(oid2 == oid); + ceph_assert(op2->op == Transaction::OP_COLL_REMOVE); + ceph_assert(ocid2 == ocid); + ceph_assert(oid2 == oid); tracepoint(objectstore, coll_add_enter); r = _collection_add(ncid, ocid, oid, spos); @@ -3049,7 +3049,7 @@ void FileStore::_do_transaction( case Transaction::OP_COLL_SETATTR: case Transaction::OP_COLL_RMATTR: - assert(0 == "collection attr methods no longer implemented"); + ceph_assert(0 == "collection attr methods no longer implemented"); break; case Transaction::OP_COLL_RENAME: @@ -3129,7 +3129,7 @@ void FileStore::_do_transaction( break; case Transaction::OP_SPLIT_COLLECTION: { - assert(0 == "not legacy journal; upgrade to firefly first"); + ceph_assert(0 == "not legacy journal; upgrade to firefly first"); } break; case Transaction::OP_SPLIT_COLLECTION2: @@ -3248,7 +3248,7 @@ void FileStore::_do_transaction( dump_open_fds(cct); } - assert(0 == "unexpected error"); + ceph_assert(0 == "unexpected error"); } } @@ -3284,7 +3284,7 @@ int FileStore::stat( osr->wait_for_apply(oid); const coll_t& cid = !_need_temp_object_collection(ch->cid, oid) ? ch->cid : ch->cid.get_temp(); int r = lfn_stat(cid, oid, st); - assert(allow_eio || !m_filestore_fail_eio || r != -EIO); + ceph_assert(allow_eio || !m_filestore_fail_eio || r != -EIO); if (r < 0) { dout(10) << __FUNC__ << ": " << ch->cid << "/" << oid << " = " << r << dendl; @@ -3338,7 +3338,7 @@ int FileStore::read( struct stat st; memset(&st, 0, sizeof(struct stat)); int r = ::fstat(**fd, &st); - assert(r == 0); + ceph_assert(r == 0); len = st.st_size; } @@ -3373,7 +3373,7 @@ int FileStore::read( if (errors != 0) { dout(0) << __FUNC__ << ": " << cid << "/" << oid << " " << offset << "~" << got << " ... BAD CRC:\n" << ss.str() << dendl; - assert(0 == "bad crc on read"); + ceph_assert(0 == "bad crc on read"); } } @@ -3561,7 +3561,7 @@ int FileStore::fiemap(CollectionHandle& ch, const ghobject_t& oid, done: dout(10) << __FUNC__ << ": " << cid << "/" << oid << " " << offset << "~" << len << " = " << r << " num_extents=" << destmap.size() << " " << destmap << dendl; - assert(!m_filestore_fail_eio || r != -EIO); + ceph_assert(!m_filestore_fail_eio || r != -EIO); tracepoint(objectstore, fiemap_exit, r); return r; } @@ -3627,7 +3627,7 @@ int FileStore::_write(const coll_t& cid, const ghobject_t& oid, if (r >= 0 && m_filestore_sloppy_crc) { int rc = backend->_crc_update_write(**fd, offset, len, bl); - assert(rc >= 0); + ceph_assert(rc >= 0); } if (replaying || m_disable_wbthrottle) { @@ -3692,7 +3692,7 @@ int FileStore::_zero(const coll_t& cid, const ghobject_t& oid, uint64_t offset, if (ret >= 0 && m_filestore_sloppy_crc) { int rc = backend->_crc_update_zero(**fd, offset, len); - assert(rc >= 0); + ceph_assert(rc >= 0); } if (ret == 0) @@ -3740,7 +3740,7 @@ int FileStore::_clone(const coll_t& cid, const ghobject_t& oldoid, const ghobjec if (r < 0) { goto out2; } - assert(index.index); + ceph_assert(index.index); RWLock::WLocker l((index.index)->access_lock); r = lfn_open(cid, newoid, true, &n, &index); @@ -3802,7 +3802,7 @@ int FileStore::_clone(const coll_t& cid, const ghobject_t& oldoid, const ghobjec lfn_close(o); out2: dout(10) << __FUNC__ << ": " << cid << "/" << oldoid << " -> " << cid << "/" << newoid << " = " << r << dendl; - assert(!m_filestore_fail_eio || r != -EIO); + ceph_assert(!m_filestore_fail_eio || r != -EIO); return r; } @@ -3848,7 +3848,7 @@ int FileStore::_do_sparse_copy_range(int from, int to, uint64_t srcoff, uint64_t if (r >= 0) { if (m_filestore_sloppy_crc) { int rc = backend->_crc_update_clone_range(from, to, srcoff, len, dstoff); - assert(rc >= 0); + ceph_assert(rc >= 0); } struct stat st; r = ::fstat(to, &st); @@ -3986,14 +3986,14 @@ int FileStore::_do_copy_range(int from, int to, uint64_t srcoff, uint64_t len, u } if (r < 0 && replaying) { - assert(r == -ERANGE); + ceph_assert(r == -ERANGE); derr << __FUNC__ << ": short source tolerated because we are replaying" << dendl; r = len; } - assert(replaying || pos == end); + ceph_assert(replaying || pos == end); if (r >= 0 && !skip_sloppycrc && m_filestore_sloppy_crc) { int rc = backend->_crc_update_clone_range(from, to, srcoff, len, dstoff); - assert(rc >= 0); + ceph_assert(rc >= 0); } dout(20) << __FUNC__ << ": " << srcoff << "~" << len << " to " << dstoff << " = " << r << dendl; return r; @@ -4127,7 +4127,7 @@ void FileStore::sync_entry() int err = write_op_seq(op_fd, cp); if (err < 0) { derr << "Error during write_op_seq: " << cpp_strerror(err) << dendl; - assert(0 == "error during write_op_seq"); + ceph_assert(0 == "error during write_op_seq"); } char s[NAME_MAX]; @@ -4137,7 +4137,7 @@ void FileStore::sync_entry() if (err < 0) { int err = errno; derr << "snap create '" << s << "' got error " << err << dendl; - assert(err == 0); + ceph_assert(err == 0); } snaps.push_back(cp); @@ -4149,7 +4149,7 @@ void FileStore::sync_entry() err = backend->sync_checkpoint(cid); if (err < 0) { derr << "ioctl WAIT_SYNC got " << cpp_strerror(err) << dendl; - assert(0 == "wait_sync got error"); + ceph_assert(0 == "wait_sync got error"); } dout(20) << " done waiting for checkpoint " << cid << " to complete" << dendl; } @@ -4160,24 +4160,24 @@ void FileStore::sync_entry() int err = object_map->sync(); if (err < 0) { derr << "object_map sync got " << cpp_strerror(err) << dendl; - assert(0 == "object_map sync returned error"); + ceph_assert(0 == "object_map sync returned error"); } err = backend->syncfs(); if (err < 0) { derr << "syncfs got " << cpp_strerror(err) << dendl; - assert(0 == "syncfs returned error"); + ceph_assert(0 == "syncfs returned error"); } err = write_op_seq(op_fd, cp); if (err < 0) { derr << "Error during write_op_seq: " << cpp_strerror(err) << dendl; - assert(0 == "error during write_op_seq"); + ceph_assert(0 == "error during write_op_seq"); } err = ::fsync(op_fd); if (err < 0) { derr << "Error during fsync of op_seq: " << cpp_strerror(err) << dendl; - assert(0 == "error during fsync of op_seq"); + ceph_assert(0 == "error during fsync of op_seq"); } } @@ -4382,7 +4382,7 @@ int FileStore::_fgetattr(int fd, const char *name, bufferptr& bp) l = chain_fgetxattr(fd, name, bp.c_str(), l); } } - assert(!m_filestore_fail_eio || l != -EIO); + ceph_assert(!m_filestore_fail_eio || l != -EIO); return l; } @@ -4396,7 +4396,7 @@ int FileStore::_fgetattrs(int fd, map& aset) if (len == -ERANGE) { len = chain_flistxattr(fd, 0, 0); if (len < 0) { - assert(!m_filestore_fail_eio || len != -EIO); + ceph_assert(!m_filestore_fail_eio || len != -EIO); return len; } dout(10) << " -ERANGE, len is " << len << dendl; @@ -4404,13 +4404,13 @@ int FileStore::_fgetattrs(int fd, map& aset) len = chain_flistxattr(fd, names2, len); dout(10) << " -ERANGE, got " << len << dendl; if (len < 0) { - assert(!m_filestore_fail_eio || len != -EIO); + ceph_assert(!m_filestore_fail_eio || len != -EIO); delete[] names2; return len; } name = names2; } else if (len < 0) { - assert(!m_filestore_fail_eio || len != -EIO); + ceph_assert(!m_filestore_fail_eio || len != -EIO); return len; } else { name = names1; @@ -4542,7 +4542,7 @@ int FileStore::getattr(CollectionHandle& ch, const ghobject_t& oid, const char * } out: dout(10) << __FUNC__ << ": " << cid << "/" << oid << " '" << name << "' = " << r << dendl; - assert(!m_filestore_fail_eio || r != -EIO); + ceph_assert(!m_filestore_fail_eio || r != -EIO); if (cct->_conf->filestore_debug_inject_read_err && debug_mdata_eio(oid)) { return -EIO; @@ -4609,7 +4609,7 @@ int FileStore::getattrs(CollectionHandle& ch, const ghobject_t& oid, map::iterator i = omap_aset.begin(); i != omap_aset.end(); ++i) { @@ -4619,7 +4619,7 @@ int FileStore::getattrs(CollectionHandle& ch, const ghobject_t& oid, mapremove_xattrs(oid, omap_remove, &spos); if (r < 0 && r != -ENOENT) { dout(10) << __FUNC__ << ": could not remove_xattrs r = " << r << dendl; - assert(!m_filestore_fail_eio || r != -EIO); + ceph_assert(!m_filestore_fail_eio || r != -EIO); goto out_close; } else { r = 0; // don't confuse the debug output @@ -4718,7 +4718,7 @@ int FileStore::_setattrs(const coll_t& cid, const ghobject_t& oid, mapset_xattrs(oid, omap_set, &spos); if (r < 0) { dout(10) << __FUNC__ << ": could not set_xattrs r = " << r << dendl; - assert(!m_filestore_fail_eio || r != -EIO); + ceph_assert(!m_filestore_fail_eio || r != -EIO); goto out_close; } } @@ -4763,7 +4763,7 @@ int FileStore::_rmattr(const coll_t& cid, const ghobject_t& oid, const char *nam r = object_map->remove_xattrs(oid, to_remove, &spos); if (r < 0 && r != -ENOENT) { dout(10) << __FUNC__ << ": could not remove_xattrs index r = " << r << dendl; - assert(!m_filestore_fail_eio || r != -EIO); + ceph_assert(!m_filestore_fail_eio || r != -EIO); goto out_close; } } @@ -4823,7 +4823,7 @@ int FileStore::_rmattrs(const coll_t& cid, const ghobject_t& oid, r = object_map->get_all_xattrs(oid, &omap_attrs); if (r < 0 && r != -ENOENT) { dout(10) << __FUNC__ << ": could not get omap_attrs r = " << r << dendl; - assert(!m_filestore_fail_eio || r != -EIO); + ceph_assert(!m_filestore_fail_eio || r != -EIO); goto out_close; } r = object_map->remove_xattrs(oid, omap_attrs, &spos); @@ -4868,7 +4868,7 @@ int FileStore::_collection_remove_recursive(const coll_t &cid, for (vector::iterator i = objects.begin(); i != objects.end(); ++i) { - assert(_check_replay_guard(cid, *i, spos)); + ceph_assert(_check_replay_guard(cid, *i, spos)); r = _remove(cid, *i, spos); if (r < 0) return r; @@ -4899,7 +4899,7 @@ int FileStore::list_collections(vector& ls, bool include_temp) if (!dir) { r = -errno; derr << "tried opening directory " << fn << ": " << cpp_strerror(-r) << dendl; - assert(!m_filestore_fail_eio || r != -EIO); + ceph_assert(!m_filestore_fail_eio || r != -EIO); return r; } @@ -4912,14 +4912,14 @@ int FileStore::list_collections(vector& ls, bool include_temp) if (int n = snprintf(filename, sizeof(filename), "%s/%s", fn, de->d_name); n >= static_cast(sizeof(filename))) { derr << __func__ << " path length overrun: " << n << dendl; - assert(false); + ceph_assert(false); } r = ::stat(filename, &sb); if (r < 0) { r = -errno; derr << "stat on " << filename << ": " << cpp_strerror(-r) << dendl; - assert(!m_filestore_fail_eio || r != -EIO); + ceph_assert(!m_filestore_fail_eio || r != -EIO); break; } if (!S_ISDIR(sb.st_mode)) { @@ -4951,7 +4951,7 @@ int FileStore::list_collections(vector& ls, bool include_temp) } ::closedir(dir); - assert(!m_filestore_fail_eio || r != -EIO); + ceph_assert(!m_filestore_fail_eio || r != -EIO); tracepoint(objectstore, list_collections_exit, r); return r; } @@ -4966,7 +4966,7 @@ int FileStore::collection_stat(const coll_t& c, struct stat *st) if (r < 0) r = -errno; dout(10) << __FUNC__ << ": " << fn << " = " << r << dendl; - assert(!m_filestore_fail_eio || r != -EIO); + ceph_assert(!m_filestore_fail_eio || r != -EIO); tracepoint(objectstore, collection_stat_exit, r); return r; } @@ -4992,7 +4992,7 @@ int FileStore::collection_empty(const coll_t& cid, bool *empty) return r; } - assert(index.index); + ceph_assert(index.index); RWLock::RLocker l((index.index)->access_lock); vector ls; @@ -5001,7 +5001,7 @@ int FileStore::collection_empty(const coll_t& cid, bool *empty) if (r < 0) { derr << __FUNC__ << ": collection_list_partial returned: " << cpp_strerror(r) << dendl; - assert(!m_filestore_fail_eio || r != -EIO); + ceph_assert(!m_filestore_fail_eio || r != -EIO); return r; } *empty = ls.empty(); @@ -5117,13 +5117,13 @@ int FileStore::collection_list(const coll_t& c, if (r < 0) return r; - assert(index.index); + ceph_assert(index.index); RWLock::RLocker l((index.index)->access_lock); r = index->collection_list_partial(start, end, max, ls, next); if (r < 0) { - assert(!m_filestore_fail_eio || r != -EIO); + ceph_assert(!m_filestore_fail_eio || r != -EIO); return r; } dout(20) << "objects: " << *ls << dendl; @@ -5154,7 +5154,7 @@ int FileStore::omap_get(CollectionHandle& ch, const ghobject_t &hoid, if (r < 0) return r; { - assert(index.index); + ceph_assert(index.index); RWLock::RLocker l((index.index)->access_lock); r = lfn_find(hoid, index); if (r < 0) @@ -5162,7 +5162,7 @@ int FileStore::omap_get(CollectionHandle& ch, const ghobject_t &hoid, } r = object_map->get(hoid, header, out); if (r < 0 && r != -ENOENT) { - assert(!m_filestore_fail_eio || r != -EIO); + ceph_assert(!m_filestore_fail_eio || r != -EIO); return r; } tracepoint(objectstore, omap_get_exit, 0); @@ -5187,7 +5187,7 @@ int FileStore::omap_get_header( if (r < 0) return r; { - assert(index.index); + ceph_assert(index.index); RWLock::RLocker l((index.index)->access_lock); r = lfn_find(hoid, index); if (r < 0) @@ -5195,7 +5195,7 @@ int FileStore::omap_get_header( } r = object_map->get_header(hoid, bl); if (r < 0 && r != -ENOENT) { - assert(allow_eio || !m_filestore_fail_eio || r != -EIO); + ceph_assert(allow_eio || !m_filestore_fail_eio || r != -EIO); return r; } tracepoint(objectstore, omap_get_header_exit, 0); @@ -5216,7 +5216,7 @@ int FileStore::omap_get_keys(CollectionHandle& ch, const ghobject_t &hoid, setaccess_lock); r = lfn_find(hoid, index); if (r < 0) @@ -5224,7 +5224,7 @@ int FileStore::omap_get_keys(CollectionHandle& ch, const ghobject_t &hoid, setget_keys(hoid, keys); if (r < 0 && r != -ENOENT) { - assert(!m_filestore_fail_eio || r != -EIO); + ceph_assert(!m_filestore_fail_eio || r != -EIO); return r; } tracepoint(objectstore, omap_get_keys_exit, 0); @@ -5250,7 +5250,7 @@ int FileStore::omap_get_values(CollectionHandle& ch, const ghobject_t &hoid, goto out; } { - assert(index.index); + ceph_assert(index.index); RWLock::RLocker l((index.index)->access_lock); r = lfn_find(hoid, index); if (r < 0) { @@ -5260,7 +5260,7 @@ int FileStore::omap_get_values(CollectionHandle& ch, const ghobject_t &hoid, } r = object_map->get_values(hoid, keys, out); if (r < 0 && r != -ENOENT) { - assert(!m_filestore_fail_eio || r != -EIO); + ceph_assert(!m_filestore_fail_eio || r != -EIO); where = " (get_values)"; goto out; } @@ -5288,7 +5288,7 @@ int FileStore::omap_check_keys(CollectionHandle& ch, const ghobject_t &hoid, if (r < 0) return r; { - assert(index.index); + ceph_assert(index.index); RWLock::RLocker l((index.index)->access_lock); r = lfn_find(hoid, index); if (r < 0) @@ -5296,7 +5296,7 @@ int FileStore::omap_check_keys(CollectionHandle& ch, const ghobject_t &hoid, } r = object_map->check_keys(hoid, keys, out); if (r < 0 && r != -ENOENT) { - assert(!m_filestore_fail_eio || r != -EIO); + ceph_assert(!m_filestore_fail_eio || r != -EIO); return r; } tracepoint(objectstore, omap_check_keys_exit, 0); @@ -5326,7 +5326,7 @@ ObjectMap::ObjectMapIterator FileStore::get_omap_iterator(const coll_t& _c, return ObjectMap::ObjectMapIterator(); } { - assert(index.index); + ceph_assert(index.index); RWLock::RLocker l((index.index)->access_lock); r = lfn_find(hoid, index); if (r < 0) { @@ -5415,7 +5415,7 @@ int FileStore::_destroy_collection(const coll_t& c) r = get_index(c, &from); if (r < 0) goto out; - assert(from.index); + ceph_assert(from.index); RWLock::WLocker l((from.index)->access_lock); r = from->prep_delete(); @@ -5467,7 +5467,7 @@ int FileStore::_collection_add(const coll_t& c, const coll_t& oldcid, const ghob if (r < 0) { // the source collection/object does not exist. If we are replaying, we // should be safe, so just return 0 and move on. - assert(replaying); + ceph_assert(replaying); dout(10) << __FUNC__ << ": " << c << "/" << o << " from " << oldcid << "/" << o << " (dne, continue replay) " << dendl; return 0; @@ -5536,7 +5536,7 @@ int FileStore::_collection_move_rename(const coll_t& oldcid, const ghobject_t& o << oldcid << "/" << oldoid << " (dne, ignoring enoent)" << dendl; } else { - assert(0 == "ERROR: source must exist"); + ceph_assert(0 == "ERROR: source must exist"); } if (!replaying) { @@ -5621,7 +5621,7 @@ int FileStore::_omap_clear(const coll_t& cid, const ghobject_t &hoid, if (r < 0) return r; { - assert(index.index); + ceph_assert(index.index); RWLock::RLocker l((index.index)->access_lock); r = lfn_find(hoid, index); if (r < 0) @@ -5649,7 +5649,7 @@ int FileStore::_omap_setkeys(const coll_t& cid, const ghobject_t &hoid, return r; } { - assert(index.index); + ceph_assert(index.index); RWLock::RLocker l((index.index)->access_lock); r = lfn_find(hoid, index); if (r < 0) { @@ -5682,7 +5682,7 @@ int FileStore::_omap_rmkeys(const coll_t& cid, const ghobject_t &hoid, if (r < 0) return r; { - assert(index.index); + ceph_assert(index.index); RWLock::RLocker l((index.index)->access_lock); r = lfn_find(hoid, index); if (r < 0) @@ -5722,7 +5722,7 @@ int FileStore::_omap_setheader(const coll_t& cid, const ghobject_t &hoid, if (r < 0) return r; { - assert(index.index); + ceph_assert(index.index); RWLock::RLocker l((index.index)->access_lock); r = lfn_find(hoid, index); if (r < 0) @@ -5742,12 +5742,12 @@ int FileStore::_split_collection(const coll_t& cid, dout(15) << __FUNC__ << ": " << cid << " bits: " << bits << dendl; if (!collection_exists(cid)) { dout(2) << __FUNC__ << ": " << cid << " DNE" << dendl; - assert(replaying); + ceph_assert(replaying); return 0; } if (!collection_exists(dest)) { dout(2) << __FUNC__ << ": " << dest << " DNE" << dendl; - assert(replaying); + ceph_assert(replaying); return 0; } @@ -5771,10 +5771,10 @@ int FileStore::_split_collection(const coll_t& cid, r = get_index(dest, &to); if (!r) { - assert(from.index); + ceph_assert(from.index); RWLock::WLocker l1((from.index)->access_lock); - assert(to.index); + ceph_assert(to.index); RWLock::WLocker l2((to.index)->access_lock); r = from->split(rem, bits, to.index); @@ -5801,7 +5801,7 @@ int FileStore::_split_collection(const coll_t& cid, ++i) { dout(20) << __FUNC__ << ": " << *i << " still in source " << cid << dendl; - assert(!i->match(bits, rem)); + ceph_assert(!i->match(bits, rem)); } objects.clear(); } @@ -5820,7 +5820,7 @@ int FileStore::_split_collection(const coll_t& cid, ++i) { dout(20) << __FUNC__ << ": " << *i << " now in dest " << *i << dendl; - assert(i->match(bits, rem)); + ceph_assert(i->match(bits, rem)); } objects.clear(); } @@ -5855,7 +5855,7 @@ int FileStore::_set_alloc_hint(const coll_t& cid, const ghobject_t& oid, lfn_close(fd); out: dout(10) << __FUNC__ << ": " << cid << "/" << oid << " object_size " << expected_object_size << " write_size " << expected_write_size << " = " << ret << dendl; - assert(!m_filestore_fail_eio || ret != -EIO); + ceph_assert(!m_filestore_fail_eio || ret != -EIO); return ret; } @@ -6188,7 +6188,7 @@ void FileStore::OpSequencer::_register_apply(Op *o) void FileStore::OpSequencer::_unregister_apply(Op *o) { - assert(o->registered_apply); + ceph_assert(o->registered_apply); for (auto& t : o->tls) { for (auto& i : t.get_object_index()) { uint32_t key = i.first.hobj.get_hash(); @@ -6205,7 +6205,7 @@ void FileStore::OpSequencer::_unregister_apply(Op *o) } ++p; } - assert(removed); + ceph_assert(removed); } } } diff --git a/src/os/filestore/FileStore.h b/src/os/filestore/FileStore.h index 84fe1ca76d2e0..2feb1fd2a40f1 100644 --- a/src/os/filestore/FileStore.h +++ b/src/os/filestore/FileStore.h @@ -239,8 +239,8 @@ private: bool _get_max_uncompleted( uint64_t *seq ///< [out] max uncompleted seq ) { - assert(qlock.is_locked()); - assert(seq); + ceph_assert(qlock.is_locked()); + ceph_assert(seq); *seq = 0; if (q.empty() && jq.empty()) return true; @@ -257,8 +257,8 @@ private: bool _get_min_uncompleted( uint64_t *seq ///< [out] min uncompleted seq ) { - assert(qlock.is_locked()); - assert(seq); + ceph_assert(qlock.is_locked()); + ceph_assert(seq); *seq = 0; if (q.empty() && jq.empty()) return true; @@ -306,13 +306,13 @@ private: void wait_for_apply(const ghobject_t& oid); Op *peek_queue() { Mutex::Locker l(qlock); - assert(apply_lock.is_locked()); + ceph_assert(apply_lock.is_locked()); return q.front(); } Op *dequeue(list *to_queue) { - assert(to_queue); - assert(apply_lock.is_locked()); + ceph_assert(to_queue); + ceph_assert(apply_lock.is_locked()); Mutex::Locker l(qlock); Op *o = q.front(); q.pop_front(); @@ -363,7 +363,7 @@ private: id(i), osr_name(osr_name_str.c_str()) {} ~OpSequencer() override { - assert(q.empty()); + ceph_assert(q.empty()); } }; typedef boost::intrusive_ptr OpSequencerRef; @@ -415,7 +415,7 @@ private: store->_finish_op(osr); } void _clear() override { - assert(store->op_queue.empty()); + ceph_assert(store->op_queue.empty()); } } op_wq; @@ -658,7 +658,7 @@ public: void inject_mdata_error(const ghobject_t &oid) override; void compact() override { - assert(object_map); + ceph_assert(object_map); object_map->compact(); } diff --git a/src/os/filestore/HashIndex.cc b/src/os/filestore/HashIndex.cc index 6014804a259fd..2b59f75504963 100644 --- a/src/os/filestore/HashIndex.cc +++ b/src/os/filestore/HashIndex.cc @@ -42,7 +42,7 @@ int hex_to_int(char c) /// int value to hex digit char int_to_hex(int v) { - assert(v < 16); + ceph_assert(v < 16); if (v < 10) return '0' + v; return 'A' + v - 10; @@ -51,7 +51,7 @@ char int_to_hex(int v) /// reverse bits in a nibble (0..15) int reverse_nibble_bits(int in) { - assert(in < 16); + ceph_assert(in < 16); return ((in & 8) >> 3) | ((in & 4) >> 1) | @@ -76,11 +76,11 @@ string reverse_hexdigit_bits_string(string s) /// compare hex digit (as length 1 string) bitwise bool cmp_hexdigit_bitwise(const string& l, const string& r) { - assert(l.length() == 1 && r.length() == 1); + ceph_assert(l.length() == 1 && r.length() == 1); int lv = hex_to_int(l[0]); int rv = hex_to_int(r[0]); - assert(lv < 16); - assert(rv < 16); + ceph_assert(lv < 16); + ceph_assert(rv < 16); return reverse_nibble_bits(lv) < reverse_nibble_bits(rv); } @@ -296,7 +296,7 @@ int HashIndex::_split( uint32_t match, uint32_t bits, CollectionIndex* dest) { - assert(collection_version() == dest->collection_version()); + ceph_assert(collection_version() == dest->collection_version()); unsigned mkdirred = 0; return col_split_level( *this, @@ -580,7 +580,7 @@ int HashIndex::pre_split_folder(uint32_t pg_num, uint64_t expected_num_objs) // the below logic is inspired by rados.h#ceph_stable_mod, // it basically determines how many sub-folders should we // create for splitting - assert(pg_num_bits > 0); // otherwise BAD_SHIFT + ceph_assert(pg_num_bits > 0); // otherwise BAD_SHIFT if (((1 << (pg_num_bits - 1)) | ps) >= pg_num) { ++split_bits; } @@ -593,7 +593,7 @@ int HashIndex::pre_split_folder(uint32_t pg_num, uint64_t expected_num_objs) leavies = leavies >> 4; } for (uint32_t i = 0; i < subs; ++i) { - assert(split_bits <= 4); // otherwise BAD_SHIFT + ceph_assert(split_bits <= 4); // otherwise BAD_SHIFT int v = tmp_id | (i << ((4 - split_bits) % 4)); paths.push_back(to_hex(v)); ret = create_path(paths); @@ -726,13 +726,13 @@ int HashIndex::get_info(const vector &path, subdir_info_s *info) { return r; auto bufiter = buf.cbegin(); info->decode(bufiter); - assert(path.size() == (unsigned)info->hash_level); + ceph_assert(path.size() == (unsigned)info->hash_level); return 0; } int HashIndex::set_info(const vector &path, const subdir_info_s &info) { bufferlist buf; - assert(path.size() == (unsigned)info.hash_level); + ceph_assert(path.size() == (unsigned)info.hash_level); info.encode(buf); return add_attr_path(path, SUBDIR_ATTR, buf); } @@ -924,7 +924,7 @@ string HashIndex::get_hash_str(uint32_t hash) { } string HashIndex::get_path_str(const ghobject_t &oid) { - assert(!oid.is_max()); + ceph_assert(!oid.is_max()); return get_hash_str(oid.hobj.get_hash()); } @@ -1005,7 +1005,7 @@ int HashIndex::list_by_hash(const vector &path, ghobject_t *next, vector *out) { - assert(out); + ceph_assert(out); return list_by_hash_bitwise(path, end, max_count, next, out); } diff --git a/src/os/filestore/HashIndex.h b/src/os/filestore/HashIndex.h index 3eba321885846..509474b97e984 100644 --- a/src/os/filestore/HashIndex.h +++ b/src/os/filestore/HashIndex.h @@ -97,7 +97,7 @@ private: using ceph::decode; __u8 v; decode(v, bl); - assert(v == 1); + ceph_assert(v == 1); decode(objs, bl); decode(subdirs, bl); decode(hash_level, bl); @@ -154,7 +154,7 @@ private: using ceph::decode; __u8 v; decode(v, bl); - assert(v == 1); + ceph_assert(v == 1); decode(op, bl); decode(path, bl); } @@ -386,7 +386,7 @@ private: /// Convert a number to hex string (upper case). static string to_hex(int n) { - assert(n >= 0 && n < 16); + ceph_assert(n >= 0 && n < 16); char c = (n <= 9 ? ('0' + n) : ('A' + n - 10)); string str; str.append(1, c); diff --git a/src/os/filestore/JournalingObjectStore.cc b/src/os/filestore/JournalingObjectStore.cc index 102b0dc060541..7e2b5728c572c 100644 --- a/src/os/filestore/JournalingObjectStore.cc +++ b/src/os/filestore/JournalingObjectStore.cc @@ -80,7 +80,7 @@ int JournalingObjectStore::journal_replay(uint64_t fs_op_seq) dout(3) << "journal_replay: skipping old op seq " << seq << " <= " << op_seq << dendl; continue; } - assert(op_seq == seq-1); + ceph_assert(op_seq == seq-1); dout(3) << "journal_replay: applying op seq " << seq << dendl; auto p = bl.cbegin(); @@ -129,8 +129,8 @@ uint64_t JournalingObjectStore::ApplyManager::op_apply_start(uint64_t op) } dout(10) << "op_apply_start " << op << " open_ops " << open_ops << " -> " << (open_ops+1) << dendl; - assert(!blocked); - assert(op > committed_seq); + ceph_assert(!blocked); + ceph_assert(op > committed_seq); open_ops++; return op; } @@ -142,7 +142,7 @@ void JournalingObjectStore::ApplyManager::op_apply_finish(uint64_t op) << (open_ops-1) << ", max_applied_seq " << max_applied_seq << " -> " << std::max(op, max_applied_seq) << dendl; --open_ops; - assert(open_ops >= 0); + ceph_assert(open_ops >= 0); // signal a blocked commit_start if (blocked) { @@ -170,7 +170,7 @@ void JournalingObjectStore::SubmitManager::op_submit_finish(uint64_t op) if (op != op_submitted + 1) { dout(0) << "op_submit_finish " << op << " expected " << (op_submitted + 1) << ", OUT OF ORDER" << dendl; - assert(0 == "out of order op_submit_finish"); + ceph_assert(0 == "out of order op_submit_finish"); } op_submitted = op; lock.Unlock(); @@ -182,7 +182,7 @@ void JournalingObjectStore::SubmitManager::op_submit_finish(uint64_t op) void JournalingObjectStore::ApplyManager::add_waiter(uint64_t op, Context *c) { Mutex::Locker l(com_lock); - assert(c); + ceph_assert(c); commit_waiters[op].push_back(c); } @@ -200,14 +200,14 @@ bool JournalingObjectStore::ApplyManager::commit_start() << " open ops to drain" << dendl; blocked_cond.Wait(apply_lock); } - assert(open_ops == 0); + ceph_assert(open_ops == 0); dout(10) << "commit_start blocked, all open_ops have completed" << dendl; { Mutex::Locker l(com_lock); if (max_applied_seq == committed_seq) { dout(10) << "commit_start nothing to do" << dendl; blocked = false; - assert(commit_waiters.empty()); + ceph_assert(commit_waiters.empty()); goto out; } diff --git a/src/os/filestore/JournalingObjectStore.h b/src/os/filestore/JournalingObjectStore.h index 17dc2abd7fc6a..c5dac774ae38f 100644 --- a/src/os/filestore/JournalingObjectStore.h +++ b/src/os/filestore/JournalingObjectStore.h @@ -73,8 +73,8 @@ protected: com_lock("JOS::ApplyManager::com_lock", false, true, false, cct), committing_seq(0), committed_seq(0) {} void reset() { - assert(open_ops == 0); - assert(blocked == false); + ceph_assert(open_ops == 0); + ceph_assert(blocked == false); max_applied_seq = 0; committing_seq = 0; committed_seq = 0; diff --git a/src/os/filestore/LFNIndex.cc b/src/os/filestore/LFNIndex.cc index b55658a1469b4..9dfebeb951c3c 100644 --- a/src/os/filestore/LFNIndex.cc +++ b/src/os/filestore/LFNIndex.cc @@ -436,7 +436,7 @@ int LFNIndex::list_objects(const vector &to_list, int max_objs, } else { string long_name = lfn_generate_object_name(obj); if (!lfn_must_hash(long_name)) { - assert(long_name == short_name); + ceph_assert(long_name == short_name); } if (index_version == HASH_INDEX_TAG) get_hobject_from_oinfo(to_list_path.c_str(), short_name.c_str(), &obj); @@ -560,7 +560,7 @@ string LFNIndex::lfn_generate_object_name_keyless(const ghobject_t &oid) char *end = s + sizeof(s); char *t = s; - assert(oid.generation == ghobject_t::NO_GEN); + ceph_assert(oid.generation == ghobject_t::NO_GEN); const char *i = oid.hobj.oid.name.c_str(); // Escape subdir prefix if (oid.hobj.oid.name.substr(0, 4) == "DIR_") { @@ -675,7 +675,7 @@ string LFNIndex::lfn_generate_object_name_poolless(const ghobject_t &oid) if (index_version == HASH_INDEX_TAG) return lfn_generate_object_name_keyless(oid); - assert(oid.generation == ghobject_t::NO_GEN); + ceph_assert(oid.generation == ghobject_t::NO_GEN); string full_name; string::const_iterator i = oid.hobj.oid.name.begin(); if (oid.hobj.oid.name.substr(0, 4) == "DIR_") { @@ -764,7 +764,7 @@ int LFNIndex::lfn_get_name(const vector &path, *hardlink = 0; return 0; } - assert(r > 0); + ceph_assert(r > 0); string lfn(bp.c_str(), bp.length()); if (lfn == full_name) { if (mangled_name) @@ -1292,7 +1292,7 @@ void LFNIndex::build_filename(const char *old_filename, int i, char *filename, i { char hash[FILENAME_HASH_LEN + 1]; - assert(len >= FILENAME_SHORT_LEN + 4); + ceph_assert(len >= FILENAME_SHORT_LEN + 4); strncpy(filename, old_filename, FILENAME_PREFIX_LEN); filename[FILENAME_PREFIX_LEN] = '\0'; @@ -1323,7 +1323,7 @@ bool LFNIndex::short_name_matches(const char *short_name, const char *cand_long_ int index = -1; char buf[FILENAME_SHORT_LEN + 4]; - assert((end - suffix) < (int)sizeof(buf)); + ceph_assert((end - suffix) < (int)sizeof(buf)); int r = sscanf(suffix, "_%d_%s", &index, buf); if (r < 2) return false; @@ -1336,7 +1336,7 @@ bool LFNIndex::short_name_matches(const char *short_name, const char *cand_long_ string LFNIndex::lfn_get_short_name(const ghobject_t &oid, int i) { string long_name = lfn_generate_object_name(oid); - assert(lfn_must_hash(long_name)); + ceph_assert(lfn_must_hash(long_name)); char buf[FILENAME_SHORT_LEN + 4]; build_filename(long_name.c_str(), i, buf, sizeof(buf)); return string(buf); diff --git a/src/os/filestore/LFNIndex.h b/src/os/filestore/LFNIndex.h index 0d068260af9ff..f589854eb0c2c 100644 --- a/src/os/filestore/LFNIndex.h +++ b/src/os/filestore/LFNIndex.h @@ -56,7 +56,7 @@ try { \ if (failed) { \ r = cleanup(); \ - assert(r == 0); \ + ceph_assert(r == 0); \ } \ { x } \ out: \ diff --git a/src/os/filestore/WBThrottle.cc b/src/os/filestore/WBThrottle.cc index ddffffc574757..e2a5d36c72336 100644 --- a/src/os/filestore/WBThrottle.cc +++ b/src/os/filestore/WBThrottle.cc @@ -18,7 +18,7 @@ WBThrottle::WBThrottle(CephContext *cct) : Mutex::Locker l(lock); set_from_conf(); } - assert(cct); + ceph_assert(cct); PerfCountersBuilder b( cct, string("WBThrottle"), l_wbthrottle_first, l_wbthrottle_last); @@ -37,7 +37,7 @@ WBThrottle::WBThrottle(CephContext *cct) : } WBThrottle::~WBThrottle() { - assert(cct); + ceph_assert(cct); cct->get_perfcounters_collection()->remove(logger); delete logger; cct->_conf.remove_observer(this); @@ -85,7 +85,7 @@ const char** WBThrottle::get_tracked_conf_keys() const void WBThrottle::set_from_conf() { - assert(lock.is_locked()); + ceph_assert(lock.is_locked()); if (fs == BTRFS) { size_limits.first = cct->_conf->filestore_wbthrottle_btrfs_bytes_start_flusher; @@ -113,7 +113,7 @@ void WBThrottle::set_from_conf() fd_limits.second = cct->_conf->filestore_wbthrottle_xfs_inodes_hard_limit; } else { - assert(0 == "invalid value for fs"); + ceph_assert(0 == "invalid value for fs"); } cond.Signal(); } @@ -133,13 +133,13 @@ void WBThrottle::handle_conf_change(const ConfigProxy& conf, bool WBThrottle::get_next_should_flush( boost::tuple *next) { - assert(lock.is_locked()); - assert(next); + ceph_assert(lock.is_locked()); + ceph_assert(next); while (!stopping && (!beyond_limit() || pending_wbs.empty())) cond.Wait(lock); if (stopping) return false; - assert(!pending_wbs.empty()); + ceph_assert(!pending_wbs.empty()); ghobject_t obj(pop_object()); ceph::unordered_map >::iterator i = @@ -173,7 +173,7 @@ void *WBThrottle::entry() #ifdef HAVE_POSIX_FADVISE if (cct->_conf->filestore_fadvise && wb.get<2>().nocache) { int fa_r = posix_fadvise(**wb.get<1>(), 0, 0, POSIX_FADV_DONTNEED); - assert(fa_r == 0); + ceph_assert(fa_r == 0); } #endif lock.Lock(); @@ -223,7 +223,7 @@ void WBThrottle::clear() #ifdef HAVE_POSIX_FADVISE if (cct->_conf->filestore_fadvise && i->second.first.nocache) { int fa_r = posix_fadvise(**i->second.second, 0, 0, POSIX_FADV_DONTNEED); - assert(fa_r == 0); + ceph_assert(fa_r == 0); } #endif diff --git a/src/os/filestore/WBThrottle.h b/src/os/filestore/WBThrottle.h index 06e4906e5c663..ef809ea4d77ef 100644 --- a/src/os/filestore/WBThrottle.h +++ b/src/os/filestore/WBThrottle.h @@ -89,7 +89,7 @@ class WBThrottle : Thread, public md_config_obs_t { list lru; ceph::unordered_map::iterator> rev_lru; void remove_object(const ghobject_t &oid) { - assert(lock.is_locked()); + ceph_assert(lock.is_locked()); ceph::unordered_map::iterator>::iterator iter = rev_lru.find(oid); if (iter == rev_lru.end()) @@ -99,14 +99,14 @@ class WBThrottle : Thread, public md_config_obs_t { rev_lru.erase(iter); } ghobject_t pop_object() { - assert(!lru.empty()); + ceph_assert(!lru.empty()); ghobject_t oid(lru.front()); lru.pop_front(); rev_lru.erase(oid); return oid; } void insert_object(const ghobject_t &oid) { - assert(rev_lru.find(oid) == rev_lru.end()); + ceph_assert(rev_lru.find(oid) == rev_lru.end()); lru.push_back(oid); rev_lru.insert(make_pair(oid, --lru.end())); } diff --git a/src/os/filestore/XfsFileStoreBackend.cc b/src/os/filestore/XfsFileStoreBackend.cc index b6d4d3e7401ec..8a236851603cd 100644 --- a/src/os/filestore/XfsFileStoreBackend.cc +++ b/src/os/filestore/XfsFileStoreBackend.cc @@ -144,6 +144,6 @@ int XfsFileStoreBackend::set_alloc_hint(int fd, uint64_t hint) if (!m_has_extsize) return -EOPNOTSUPP; - assert(hint < UINT_MAX); + ceph_assert(hint < UINT_MAX); return set_extsize(fd, hint); } diff --git a/src/os/filestore/ZFSFileStoreBackend.cc b/src/os/filestore/ZFSFileStoreBackend.cc index 45384781e9844..1a6b63bbd7fd2 100644 --- a/src/os/filestore/ZFSFileStoreBackend.cc +++ b/src/os/filestore/ZFSFileStoreBackend.cc @@ -150,7 +150,7 @@ static int list_checkpoints_callback(ZFS::Handle *zh, void *data) list *ls = static_cast *>(data); string str = ZFS::get_name(zh); size_t pos = str.find('@'); - assert(pos != string::npos && pos + 1 != str.length()); + ceph_assert(pos != string::npos && pos + 1 != str.length()); ls->push_back(str.substr(pos + 1)); return 0; } diff --git a/src/os/filestore/chain_xattr.cc b/src/os/filestore/chain_xattr.cc index 97c547e1426f7..aa9ce2502fb02 100644 --- a/src/os/filestore/chain_xattr.cc +++ b/src/os/filestore/chain_xattr.cc @@ -13,6 +13,8 @@ #include #endif +#include "include/assert.h" + /* * chaining xattrs * @@ -33,14 +35,14 @@ void get_raw_xattr_name(const char *name, int i, char *raw_name, int raw_len) switch (*name) { case '@': /* escape it */ pos += 2; - assert (pos < raw_len - 1); + ceph_assert (pos < raw_len - 1); *raw_name = '@'; raw_name++; *raw_name = '@'; break; default: pos++; - assert(pos < raw_len - 1); + ceph_assert(pos < raw_len - 1); *raw_name = *name; break; } @@ -52,7 +54,7 @@ void get_raw_xattr_name(const char *name, int i, char *raw_name, int raw_len) *raw_name = '\0'; } else { int r = snprintf(raw_name, raw_len - pos, "@%d", i); - assert(r < raw_len - pos); + ceph_assert(r < raw_len - pos); } } @@ -78,7 +80,7 @@ static int translate_raw_name(const char *raw_name, char *name, int name_len, bo break; } pos++; - assert(pos < name_len); + ceph_assert(pos < name_len); name++; raw_name++; } @@ -186,7 +188,7 @@ int chain_getxattr_buf(const char *fn, const char *name, bufferptr *bp) } } } - assert(0 == "unreachable"); + ceph_assert(0 == "unreachable"); return 0; } diff --git a/src/os/filestore/chain_xattr.h b/src/os/filestore/chain_xattr.h index 7dba7ef930923..ce7c5582490b5 100644 --- a/src/os/filestore/chain_xattr.h +++ b/src/os/filestore/chain_xattr.h @@ -113,7 +113,7 @@ int chain_setxattr( pos += chunk_size; ret = pos; i++; - assert(size == 0 || !ensure_single_attr); + ceph_assert(size == 0 || !ensure_single_attr); } while (size); if (ret >= 0 && !skip_chain_cleanup) { @@ -157,7 +157,7 @@ int chain_fsetxattr( pos += chunk_size; ret = pos; i++; - assert(size == 0 || !ensure_single_attr); + ceph_assert(size == 0 || !ensure_single_attr); } while (size); if (ret >= 0 && !skip_chain_cleanup) { diff --git a/src/os/fs/FS.cc b/src/os/fs/FS.cc index 9cfa103b939ab..a530b54ab11b0 100644 --- a/src/os/fs/FS.cc +++ b/src/os/fs/FS.cc @@ -113,7 +113,7 @@ int FS::copy_file_range(int to_fd, uint64_t to_offset, int from_fd, uint64_t from_offset, uint64_t from_len) { - assert(0 == "write me"); + ceph_assert(0 == "write me"); } int FS::zero(int fd, uint64_t offset, uint64_t length) diff --git a/src/os/kstore/KStore.cc b/src/os/kstore/KStore.cc index 17424d36953b6..ba1a3127adf5a 100644 --- a/src/os/kstore/KStore.cc +++ b/src/os/kstore/KStore.cc @@ -299,7 +299,7 @@ static void get_object_key(CephContext* cct, const ghobject_t& oid, derr << "key " << pretty_binary_string(*key) << dendl; derr << "oid " << oid << dendl; derr << " t " << t << dendl; - assert(t == oid); + ceph_assert(t == oid); } } } @@ -435,7 +435,7 @@ void KStore::OnodeHashLRU::add(const ghobject_t& oid, OnodeRef o) { std::lock_guard l(lock); dout(30) << __func__ << " " << oid << " " << o << dendl; - assert(onode_map.count(oid) == 0); + ceph_assert(onode_map.count(oid) == 0); onode_map[oid] = o; lru.push_front(*o); } @@ -471,7 +471,7 @@ void KStore::OnodeHashLRU::rename(const ghobject_t& old_oid, po = onode_map.find(old_oid); pn = onode_map.find(new_oid); - assert(po != onode_map.end()); + ceph_assert(po != onode_map.end()); if (pn != onode_map.end()) { lru_list_t::iterator p = lru.iterator_to(*pn->second); lru.erase(p); @@ -502,14 +502,14 @@ bool KStore::OnodeHashLRU::get_next( return false; } ceph::unordered_map::iterator p = onode_map.begin(); - assert(p != onode_map.end()); + ceph_assert(p != onode_map.end()); next->first = p->first; next->second = p->second; return true; } ceph::unordered_map::iterator p = onode_map.find(after); - assert(p != onode_map.end()); // for now + ceph_assert(p != onode_map.end()); // for now lru_list_t::iterator pi = lru.iterator_to(*p->second); ++pi; if (pi == lru.end()) { @@ -546,7 +546,7 @@ int KStore::OnodeHashLRU::trim(int max) lru.erase(p--); } else { lru.erase(p); - assert(num == 1); + ceph_assert(num == 1); } o->get(); // paranoia onode_map.erase(o->oid); @@ -588,7 +588,7 @@ KStore::OnodeRef KStore::Collection::get_onode( const ghobject_t& oid, bool create) { - assert(create ? lock.is_wlocked() : lock.is_locked()); + ceph_assert(create ? lock.is_wlocked() : lock.is_locked()); spg_t pgid; if (cid.is_pg(&pgid)) { @@ -614,7 +614,7 @@ KStore::OnodeRef KStore::Collection::get_onode( ldout(store->cct, 20) << " r " << r << " v.len " << v.length() << dendl; Onode *on; if (v.length() == 0) { - assert(r == -ENOENT); + ceph_assert(r == -ENOENT); if (!create) return OnodeRef(); @@ -623,7 +623,7 @@ KStore::OnodeRef KStore::Collection::get_onode( on->dirty = true; } else { // loaded - assert(r >=0); + ceph_assert(r >=0); on = new Onode(store->cct, oid, key); on->exists = true; auto p = v.cbegin(); @@ -664,9 +664,9 @@ KStore::KStore(CephContext *cct, const string& path) KStore::~KStore() { _shutdown_logger(); - assert(!mounted); - assert(db == NULL); - assert(fsid_fd < 0); + ceph_assert(!mounted); + ceph_assert(db == NULL); + ceph_assert(fsid_fd < 0); } void KStore::_init_logger() @@ -692,7 +692,7 @@ void KStore::_shutdown_logger() int KStore::_open_path() { - assert(path_fd < 0); + ceph_assert(path_fd < 0); path_fd = ::open(path.c_str(), O_DIRECTORY); if (path_fd < 0) { int r = -errno; @@ -711,7 +711,7 @@ void KStore::_close_path() int KStore::_open_fsid(bool create) { - assert(fsid_fd < 0); + ceph_assert(fsid_fd < 0); int flags = O_RDWR; if (create) flags |= O_CREAT; @@ -816,7 +816,7 @@ bool KStore::test_mount_in_use() int KStore::_open_db(bool create) { int r; - assert(!db); + ceph_assert(!db); char fn[PATH_MAX]; snprintf(fn, sizeof(fn), "%s/db", path.c_str()); @@ -883,14 +883,14 @@ int KStore::_open_db(bool create) void KStore::_close_db() { - assert(db); + ceph_assert(db); delete db; db = NULL; } int KStore::_open_collections(int *errors) { - assert(coll_map.empty()); + ceph_assert(coll_map.empty()); KeyValueDB::Iterator it = db->get_iterator(PREFIX_COLL); for (it->upper_bound(string()); it->valid(); @@ -1039,7 +1039,7 @@ int KStore::mount() int KStore::umount() { - assert(mounted); + ceph_assert(mounted); dout(1) << __func__ << dendl; _sync(); @@ -1089,7 +1089,7 @@ int KStore::statfs(struct store_statfs_t* buf0) buf0->reset(); if (::statfs(basedir.c_str(), &buf) < 0) { int r = -errno; - assert(r != -ENOENT); + ceph_assert(r != -ENOENT); return r; } @@ -1146,7 +1146,7 @@ void KStore::_reap_collections() { pair next; while (c->onode_map.get_next(next.first, &next)) { - assert(!next.second->exists); + ceph_assert(!next.second->exists); if (!next.second->flush_txns.empty()) { dout(10) << __func__ << " " << c->cid << " " << next.second->oid << " flush_txns " << next.second->flush_txns << dendl; @@ -1512,10 +1512,10 @@ int KStore::_collection_list( get_object_key(cct, start, &k); if (start.hobj.is_temp()) { temp = true; - assert(k >= temp_start_key && k < temp_end_key); + ceph_assert(k >= temp_start_key && k < temp_end_key); } else { temp = false; - assert(k >= start_key && k < end_key); + ceph_assert(k >= start_key && k < end_key); } dout(20) << " start from " << pretty_binary_string(k) << " temp=" << (int)temp << dendl; @@ -1558,7 +1558,7 @@ int KStore::_collection_list( dout(20) << __func__ << " key " << pretty_binary_string(it->key()) << dendl; ghobject_t oid; int r = get_key_object(it->key(), &oid); - assert(r == 0); + ceph_assert(r == 0); if (ls->size() >= (unsigned)max) { dout(20) << __func__ << " reached max " << max << dendl; *pnext = oid; @@ -1650,7 +1650,7 @@ int KStore::OmapIteratorImpl::next(bool validate) string KStore::OmapIteratorImpl::key() { RWLock::RLocker l(c->lock); - assert(it->valid()); + ceph_assert(it->valid()); string db_key = it->raw_key().second; string user_key; decode_omap_key(db_key, &user_key); @@ -1660,7 +1660,7 @@ string KStore::OmapIteratorImpl::key() bufferlist KStore::OmapIteratorImpl::value() { RWLock::RLocker l(c->lock); - assert(it->valid()); + ceph_assert(it->valid()); return it->value(); } @@ -1701,7 +1701,7 @@ int KStore::omap_get( decode_omap_key(it->key(), &user_key); dout(30) << __func__ << " got " << pretty_binary_string(it->key()) << " -> " << user_key << dendl; - assert(it->key() < tail); + ceph_assert(it->key() < tail); (*out)[user_key] = it->value(); } it->next(); @@ -1778,7 +1778,7 @@ int KStore::omap_get_keys( decode_omap_key(it->key(), &user_key); dout(30) << __func__ << " got " << pretty_binary_string(it->key()) << " -> " << user_key << dendl; - assert(it->key() < tail); + ceph_assert(it->key() < tail); keys->insert(user_key); it->next(); } @@ -1939,7 +1939,7 @@ void KStore::_txc_state_proc(TransContext *txc) std::lock_guard l(kv_lock); if (cct->_conf->kstore_sync_submit_transaction) { int r = db->submit_transaction(txc->t); - assert(r == 0); + ceph_assert(r == 0); } kv_queue.push_back(txc); kv_cond.notify_one(); @@ -1947,7 +1947,7 @@ void KStore::_txc_state_proc(TransContext *txc) } { int r = db->submit_transaction_sync(txc->t); - assert(r == 0); + ceph_assert(r == 0); } break; @@ -1970,7 +1970,7 @@ void KStore::_txc_state_proc(TransContext *txc) default: derr << __func__ << " unexpected txc " << txc << " state " << txc->get_state_name() << dendl; - assert(0 == "unexpected txc state"); + ceph_assert(0 == "unexpected txc state"); return; } } @@ -2023,7 +2023,7 @@ void KStore::_txc_finish_kv(TransContext *txc) void KStore::_txc_finish(TransContext *txc) { dout(20) << __func__ << " " << txc << " onodes " << txc->onodes << dendl; - assert(txc->state == TransContext::STATE_FINISHING); + ceph_assert(txc->state == TransContext::STATE_FINISHING); for (set::iterator p = txc->onodes.begin(); p != txc->onodes.end(); @@ -2031,7 +2031,7 @@ void KStore::_txc_finish(TransContext *txc) std::lock_guard l((*p)->flush_lock); dout(20) << __func__ << " onode " << *p << " had " << (*p)->flush_txns << dendl; - assert((*p)->flush_txns.count(txc)); + ceph_assert((*p)->flush_txns.count(txc)); (*p)->flush_txns.erase(txc); if ((*p)->flush_txns.empty()) { (*p)->flush_cond.notify_all(); @@ -2086,7 +2086,7 @@ void KStore::_kv_sync_thread() dout(10) << __func__ << " start" << dendl; std::unique_lock l(kv_lock); while (true) { - assert(kv_committing.empty()); + ceph_assert(kv_committing.empty()); if (kv_queue.empty()) { if (kv_stop) break; @@ -2109,11 +2109,11 @@ void KStore::_kv_sync_thread() it != kv_committing.end(); ++it) { int r = db->submit_transaction((*it)->t); - assert(r == 0); + ceph_assert(r == 0); } } int r = db->submit_transaction_sync(t); - assert(r == 0); + ceph_assert(r == 0); utime_t finish = ceph_clock_now(); utime_t dur = finish - start; dout(20) << __func__ << " committed " << kv_committing.size() @@ -2222,7 +2222,7 @@ void KStore::_txc_add_transaction(TransContext *txc, Transaction *t) case Transaction::OP_MKCOLL: { - assert(!c); + ceph_assert(!c); coll_t cid = i.get_cid(op->cid); r = _create_collection(txc, cid, op->split_bits, &c); if (!r) @@ -2231,7 +2231,7 @@ void KStore::_txc_add_transaction(TransContext *txc, Transaction *t) break; case Transaction::OP_SPLIT_COLLECTION: - assert(0 == "deprecated"); + ceph_assert(0 == "deprecated"); break; case Transaction::OP_SPLIT_COLLECTION2: @@ -2275,7 +2275,7 @@ void KStore::_txc_add_transaction(TransContext *txc, Transaction *t) break; case Transaction::OP_COLL_RENAME: - assert(0 == "not implemented"); + ceph_assert(0 == "not implemented"); break; } if (r < 0) { @@ -2289,7 +2289,7 @@ void KStore::_txc_add_transaction(TransContext *txc, Transaction *t) f.close_section(); f.flush(*_dout); *_dout << dendl; - assert(0 == "unexpected error"); + ceph_assert(0 == "unexpected error"); } // object operations @@ -2397,7 +2397,7 @@ void KStore::_txc_add_transaction(TransContext *txc, Transaction *t) break; case Transaction::OP_CLONERANGE: - assert(0 == "deprecated"); + ceph_assert(0 == "deprecated"); break; case Transaction::OP_CLONERANGE2: @@ -2412,20 +2412,20 @@ void KStore::_txc_add_transaction(TransContext *txc, Transaction *t) break; case Transaction::OP_COLL_ADD: - assert(0 == "not implemented"); + ceph_assert(0 == "not implemented"); break; case Transaction::OP_COLL_REMOVE: - assert(0 == "not implemented"); + ceph_assert(0 == "not implemented"); break; case Transaction::OP_COLL_MOVE: - assert(0 == "deprecated"); + ceph_assert(0 == "deprecated"); break; case Transaction::OP_COLL_MOVE_RENAME: { - assert(op->cid == op->dest_cid); + ceph_assert(op->cid == op->dest_cid); const ghobject_t& noid = i.get_oid(op->dest_oid); OnodeRef no = c->get_onode(noid, true); r = _rename(txc, c, o, no, noid); @@ -2536,7 +2536,7 @@ void KStore::_txc_add_transaction(TransContext *txc, Transaction *t) f.close_section(); f.flush(*_dout); *_dout << dendl; - assert(0 == "unexpected error"); + ceph_assert(0 == "unexpected error"); } } } @@ -3170,7 +3170,7 @@ int KStore::_clone(TransContext *txc, } else { dout(30) << __func__ << " got header/data " << pretty_binary_string(it->key()) << dendl; - assert(it->key() < tail); + ceph_assert(it->key() < tail); rewrite_omap_key(newo->onode.omap_head, it->key(), &key); txc->t->set(PREFIX_OMAP, key, it->value()); } @@ -3272,9 +3272,9 @@ int KStore::_create_collection( goto out; } auto p = new_coll_map.find(cid); - assert(p != new_coll_map.end()); + ceph_assert(p != new_coll_map.end()); *c = p->second; - assert((*c)->cid == cid); + ceph_assert((*c)->cid == cid); (*c)->cnode.bits = bits; coll_map[cid] = *c; new_coll_map.erase(p); @@ -3359,7 +3359,7 @@ int KStore::_split_collection(TransContext *txc, c->onode_map.clear(); d->onode_map.clear(); c->cnode.bits = bits; - assert(d->cnode.bits == bits); + ceph_assert(d->cnode.bits == bits); r = 0; bufferlist bl; diff --git a/src/os/kstore/KStore.h b/src/os/kstore/KStore.h index fa1c88a6a5b93..f5dacc2266811 100644 --- a/src/os/kstore/KStore.h +++ b/src/os/kstore/KStore.h @@ -267,7 +267,7 @@ public: q_list_t q; ///< transactions ~OpSequencer() { - assert(q.empty()); + ceph_assert(q.empty()); } void queue_new(TransContext *txc) { @@ -290,7 +290,7 @@ public: if (txc->state >= TransContext::STATE_KV_DONE) { return true; } - assert(txc->state < TransContext::STATE_KV_DONE); + ceph_assert(txc->state < TransContext::STATE_KV_DONE); txc->oncommits.push_back(c); return false; } @@ -562,7 +562,7 @@ public: ThreadPool::TPHandle *handle = NULL) override; void compact () override { - assert(db); + ceph_assert(db); db->compact(); } diff --git a/src/os/memstore/MemStore.cc b/src/os/memstore/MemStore.cc index 5e6d30ff6a230..ca384b8c7609f 100644 --- a/src/os/memstore/MemStore.cc +++ b/src/os/memstore/MemStore.cc @@ -68,7 +68,7 @@ int MemStore::_save() dout(20) << __func__ << " coll " << p->first << " " << p->second << dendl; collections.insert(p->first); bufferlist bl; - assert(p->second); + ceph_assert(p->second); p->second->encode(bl); string fn = path + "/" + stringify(p->first); int r = bl.write_file(fn.c_str()); @@ -172,17 +172,17 @@ int MemStore::_load() void MemStore::set_fsid(uuid_d u) { int r = write_meta("fsid", stringify(u)); - assert(r >= 0); + ceph_assert(r >= 0); } uuid_d MemStore::get_fsid() { string fsid_str; int r = read_meta("fsid", &fsid_str); - assert(r >= 0); + ceph_assert(r >= 0); uuid_d uuid; bool b = uuid.parse(fsid_str.c_str()); - assert(b); + ceph_assert(b); return uuid; } @@ -840,7 +840,7 @@ void MemStore::_do_transaction(Transaction& t) break; case Transaction::OP_COLL_MOVE: - assert(0 == "deprecated"); + ceph_assert(0 == "deprecated"); break; case Transaction::OP_COLL_MOVE_RENAME: @@ -868,19 +868,19 @@ void MemStore::_do_transaction(Transaction& t) case Transaction::OP_COLL_SETATTR: { - assert(0 == "not implemented"); + ceph_assert(0 == "not implemented"); } break; case Transaction::OP_COLL_RMATTR: { - assert(0 == "not implemented"); + ceph_assert(0 == "not implemented"); } break; case Transaction::OP_COLL_RENAME: { - assert(0 == "not implemented"); + ceph_assert(0 == "not implemented"); } break; @@ -929,7 +929,7 @@ void MemStore::_do_transaction(Transaction& t) } break; case Transaction::OP_SPLIT_COLLECTION: - assert(0 == "deprecated"); + ceph_assert(0 == "deprecated"); break; case Transaction::OP_SPLIT_COLLECTION2: { @@ -992,7 +992,7 @@ void MemStore::_do_transaction(Transaction& t) f.close_section(); f.flush(*_dout); *_dout << dendl; - assert(0 == "unexpected error"); + ceph_assert(0 == "unexpected error"); } } @@ -1017,7 +1017,7 @@ int MemStore::_write(const coll_t& cid, const ghobject_t& oid, { dout(10) << __func__ << " " << cid << " " << oid << " " << offset << "~" << len << dendl; - assert(len == bl.length()); + ceph_assert(len == bl.length()); CollectionRef c = get_collection(cid); if (!c) @@ -1290,7 +1290,7 @@ int MemStore::_create_collection(const coll_t& cid, int bits) if (!result.second) return -EEXIST; auto p = new_coll_map.find(cid); - assert(p != new_coll_map.end()); + ceph_assert(p != new_coll_map.end()); result.first->second = p->second; result.first->second->bits = bits; new_coll_map.erase(p); @@ -1350,7 +1350,7 @@ int MemStore::_collection_move_rename(const coll_t& oldcid, const ghobject_t& ol return -ENOENT; // note: c and oc may be the same - assert(&(*c) == &(*oc)); + ceph_assert(&(*c) == &(*oc)); c->lock.get_write(); int r = -EEXIST; @@ -1400,7 +1400,7 @@ int MemStore::_split_collection(const coll_t& cid, uint32_t bits, uint32_t match } sc->bits = bits; - assert(dc->bits == (int)bits); + ceph_assert(dc->bits == (int)bits); return 0; } diff --git a/src/os/memstore/PageSet.h b/src/os/memstore/PageSet.h index 57a7223a2efcf..8e24328151b23 100644 --- a/src/os/memstore/PageSet.h +++ b/src/os/memstore/PageSet.h @@ -186,7 +186,7 @@ class PageSet { length -= c; } // make sure we sized the vector correctly - assert(out == range.rend()); + ceph_assert(out == range.rend()); } // return all allocated pages that intersect the range [offset,length) @@ -216,7 +216,7 @@ class PageSet { } void decode(bufferlist::const_iterator &p) { using ceph::decode; - assert(empty()); + ceph_assert(empty()); decode(page_size, p); unsigned count; decode(count, p); -- 2.39.5