From c1179cd446bef7f08363baf2f5cec3ced07ced1d Mon Sep 17 00:00:00 2001 From: "Adam C. Emerson" Date: Thu, 23 Aug 2018 11:26:18 -0400 Subject: [PATCH] osdc: Use ceph_assert for asserts. Signed-off-by: Adam C. Emerson --- src/osdc/Filer.cc | 28 ++--- src/osdc/Filer.h | 4 +- src/osdc/Journaler.cc | 116 +++++++++---------- src/osdc/Journaler.h | 12 +- src/osdc/ObjectCacher.cc | 238 +++++++++++++++++++-------------------- src/osdc/ObjectCacher.h | 24 ++-- src/osdc/Objecter.cc | 188 +++++++++++++++---------------- src/osdc/Objecter.h | 12 +- src/osdc/Striper.cc | 18 +-- 9 files changed, 320 insertions(+), 320 deletions(-) diff --git a/src/osdc/Filer.cc b/src/osdc/Filer.cc index df414adff2513..83370d8c08bd6 100644 --- a/src/osdc/Filer.cc +++ b/src/osdc/Filer.cc @@ -46,7 +46,7 @@ public: void finish(int r) override { if (r == -ENOENT) { r = 0; - assert(size == 0); + ceph_assert(size == 0); } bool probe_complete; @@ -57,7 +57,7 @@ public: } probe_complete = filer->_probed(probe, oid, size, mtime, pl); - assert(!pl.owns_lock()); + ceph_assert(!pl.owns_lock()); } if (probe_complete) { probe->onfinish->complete(probe->err); @@ -81,7 +81,7 @@ int Filer::probe(inodeno_t ino, << " starting from " << start_from << dendl; - assert(snapid); // (until there is a non-NOSNAP write) + ceph_assert(snapid); // (until there is a non-NOSNAP write) Probe *probe = new Probe(ino, *layout, snapid, start_from, end, pmtime, flags, fwd, onfinish); @@ -104,7 +104,7 @@ int Filer::probe(inodeno_t ino, << " starting from " << start_from << dendl; - assert(snapid); // (until there is a non-NOSNAP write) + ceph_assert(snapid); // (until there is a non-NOSNAP write) Probe *probe = new Probe(ino, *layout, snapid, start_from, end, pmtime, flags, fwd, onfinish); @@ -123,7 +123,7 @@ int Filer::probe_impl(Probe* probe, file_layout_t *layout, if (start_from % period) probe->probing_len += period - (start_from % period); } else { - assert(start_from > *end); + ceph_assert(start_from > *end); if (start_from % period) probe->probing_len -= period - (start_from % period); probe->probing_off -= probe->probing_len; @@ -131,7 +131,7 @@ int Filer::probe_impl(Probe* probe, file_layout_t *layout, Probe::unique_lock pl(probe->lock); _probe(probe, pl); - assert(!pl.owns_lock()); + ceph_assert(!pl.owns_lock()); return 0; } @@ -143,7 +143,7 @@ int Filer::probe_impl(Probe* probe, file_layout_t *layout, */ void Filer::_probe(Probe *probe, Probe::unique_lock& pl) { - assert(pl.owns_lock() && pl.mutex() == &probe->lock); + ceph_assert(pl.owns_lock() && pl.mutex() == &probe->lock); ldout(cct, 10) << "_probe " << hex << probe->ino << dec << " " << probe->probing_off << "~" << probe->probing_len @@ -182,7 +182,7 @@ void Filer::_probe(Probe *probe, Probe::unique_lock& pl) bool Filer::_probed(Probe *probe, const object_t& oid, uint64_t size, ceph::real_time mtime, Probe::unique_lock& pl) { - assert(pl.owns_lock() && pl.mutex() == &probe->lock); + ceph_assert(pl.owns_lock() && pl.mutex() == &probe->lock); ldout(cct, 10) << "_probed " << probe->ino << " object " << oid << " has size " << size << " mtime " << mtime << dendl; @@ -191,7 +191,7 @@ bool Filer::_probed(Probe *probe, const object_t& oid, uint64_t size, if (mtime > probe->max_mtime) probe->max_mtime = mtime; - assert(probe->ops.count(oid)); + ceph_assert(probe->ops.count(oid)); probe->ops.erase(oid); if (!probe->ops.empty()) { @@ -221,7 +221,7 @@ bool Filer::_probed(Probe *probe, const object_t& oid, uint64_t size, << dendl; if (!probe->found_size) { - assert(probe->known_size[p->oid] <= shouldbe); + ceph_assert(probe->known_size[p->oid] <= shouldbe); if ((probe->fwd && probe->known_size[p->oid] == shouldbe) || (!probe->fwd && probe->known_size[p->oid] == 0 && @@ -264,16 +264,16 @@ bool Filer::_probed(Probe *probe, const object_t& oid, uint64_t size, uint64_t period = probe->layout.get_period(); if (probe->fwd) { probe->probing_off += probe->probing_len; - assert(probe->probing_off % period == 0); + ceph_assert(probe->probing_off % period == 0); probe->probing_len = period; } else { // previous period. - assert(probe->probing_off % period == 0); + ceph_assert(probe->probing_off % period == 0); probe->probing_len = period; probe->probing_off -= period; } _probe(probe, pl); - assert(!pl.owns_lock()); + ceph_assert(!pl.owns_lock()); return false; } else if (probe->pmtime) { ldout(cct, 10) << "_probed found mtime " << probe->max_mtime << dendl; @@ -317,7 +317,7 @@ int Filer::purge_range(inodeno_t ino, int flags, Context *oncommit) { - assert(num_obj > 0); + ceph_assert(num_obj > 0); // single object? easy! if (num_obj == 1) { diff --git a/src/osdc/Filer.h b/src/osdc/Filer.h index 00b6caa8f26aa..ce26a547188c1 100644 --- a/src/osdc/Filer.h +++ b/src/osdc/Filer.h @@ -126,7 +126,7 @@ class Filer { int flags, Context *onfinish, int op_flags = 0) { - assert(snap); // (until there is a non-NOSNAP write) + ceph_assert(snap); // (until there is a non-NOSNAP write) vector extents; Striper::file_to_extents(cct, ino, layout, offset, len, 0, extents); objecter->sg_read(extents, snap, bl, flags, onfinish, op_flags); @@ -143,7 +143,7 @@ class Filer { __u32 truncate_seq, Context *onfinish, int op_flags = 0) { - assert(snap); // (until there is a non-NOSNAP write) + ceph_assert(snap); // (until there is a non-NOSNAP write) vector extents; Striper::file_to_extents(cct, ino, layout, offset, len, truncate_size, extents); diff --git a/src/osdc/Journaler.cc b/src/osdc/Journaler.cc index 8713384499d41..7a80953e6b665 100644 --- a/src/osdc/Journaler.cc +++ b/src/osdc/Journaler.cc @@ -58,7 +58,7 @@ void Journaler::create(file_layout_t *l, stream_format_t const sf) { lock_guard lk(lock); - assert(!readonly); + ceph_assert(!readonly); state = STATE_ACTIVE; stream_format = sf; @@ -163,8 +163,8 @@ void Journaler::recover(Context *onread) } ldout(cct, 1) << "recover start" << dendl; - assert(state != STATE_ACTIVE); - assert(readonly); + ceph_assert(state != STATE_ACTIVE); + ceph_assert(readonly); if (onread) waitfor_recover.push_back(wrap_finisher(onread)); @@ -183,7 +183,7 @@ void Journaler::recover(Context *onread) void Journaler::_read_head(Context *on_finish, bufferlist *bl) { // lock is locked - assert(state == STATE_READHEAD || state == STATE_REREADHEAD); + ceph_assert(state == STATE_READHEAD || state == STATE_REREADHEAD); object_t oid = file_object_t(ino, 0); object_locator_t oloc(pg_pool); @@ -207,7 +207,7 @@ void Journaler::reread_head(Context *onfinish) void Journaler::_reread_head(Context *onfinish) { ldout(cct, 10) << "reread_head" << dendl; - assert(state == STATE_ACTIVE); + ceph_assert(state == STATE_ACTIVE); state = STATE_REREADHEAD; C_RereadHead *fin = new C_RereadHead(this, onfinish); @@ -223,7 +223,7 @@ void Journaler::_finish_reread_head(int r, bufferlist& bl, Context *finish) } //read on-disk header into - assert(bl.length() || r < 0 ); + ceph_assert(bl.length() || r < 0 ); // unpack header if (r == 0) { @@ -252,7 +252,7 @@ void Journaler::_finish_read_head(int r, bufferlist& bl) if (is_stopping()) return; - assert(state == STATE_READHEAD); + ceph_assert(state == STATE_READHEAD); if (r!=0) { ldout(cct, 0) << "error getting journal off disk" << dendl; @@ -320,7 +320,7 @@ void Journaler::_probe(Context *finish, uint64_t *end) { // lock is locked ldout(cct, 1) << "probing for end of the log" << dendl; - assert(state == STATE_PROBING || state == STATE_REPROBING); + ceph_assert(state == STATE_PROBING || state == STATE_REPROBING); // probe the log filer.probe(ino, &layout, CEPH_NOSNAP, write_pos, end, true, 0, wrap_finisher(finish)); @@ -329,7 +329,7 @@ void Journaler::_probe(Context *finish, uint64_t *end) void Journaler::_reprobe(C_OnFinisher *finish) { ldout(cct, 10) << "reprobe" << dendl; - assert(state == STATE_ACTIVE); + ceph_assert(state == STATE_ACTIVE); state = STATE_REPROBING; C_ReProbe *fin = new C_ReProbe(this, finish); @@ -346,7 +346,7 @@ void Journaler::_finish_reprobe(int r, uint64_t new_end, return; } - assert(new_end >= write_pos || r < 0); + ceph_assert(new_end >= write_pos || r < 0); ldout(cct, 1) << "_finish_reprobe new_end = " << new_end << " (header had " << write_pos << ")." << dendl; @@ -361,7 +361,7 @@ void Journaler::_finish_probe_end(int r, uint64_t end) if (is_stopping()) return; - assert(state == STATE_PROBING); + ceph_assert(state == STATE_PROBING); if (r < 0) { // error in probing goto out; } @@ -371,7 +371,7 @@ void Journaler::_finish_probe_end(int r, uint64_t end) << write_pos << "). log was empty. recovered." << dendl; ceph_abort(); // hrm. } else { - assert(end >= write_pos); + ceph_assert(end >= write_pos); ldout(cct, 1) << "_finish_probe_end write_pos = " << end << " (header had " << write_pos << "). recovered." << dendl; @@ -404,7 +404,7 @@ void Journaler::reread_head_and_probe(Context *onfinish) { lock_guard l(lock); - assert(state == STATE_ACTIVE); + ceph_assert(state == STATE_ACTIVE); _reread_head(new C_RereadHeadProbe(this, wrap_finisher(onfinish))); } @@ -417,7 +417,7 @@ void Journaler::_finish_reread_head_and_probe(int r, C_OnFinisher *onfinish) return; } - assert(!r); //if we get an error, we're boned + ceph_assert(!r); //if we get an error, we're boned _reprobe(onfinish); } @@ -445,8 +445,8 @@ void Journaler::write_head(Context *oncommit) void Journaler::_write_head(Context *oncommit) { - assert(!readonly); - assert(state == STATE_ACTIVE); + ceph_assert(!readonly); + ceph_assert(state == STATE_ACTIVE); last_written.trimmed_pos = trimmed_pos; last_written.expire_pos = expire_pos; last_written.unused_field = expire_pos; @@ -455,8 +455,8 @@ void Journaler::_write_head(Context *oncommit) ldout(cct, 10) << "write_head " << last_written << dendl; // Avoid persisting bad pointers in case of bugs - assert(last_written.write_pos >= last_written.expire_pos); - assert(last_written.expire_pos >= last_written.trimmed_pos); + ceph_assert(last_written.write_pos >= last_written.expire_pos); + ceph_assert(last_written.expire_pos >= last_written.trimmed_pos); last_wrote_head = ceph::real_clock::now(); @@ -483,7 +483,7 @@ void Journaler::_finish_write_head(int r, Header &wrote, handle_write_error(r); return; } - assert(!readonly); + ceph_assert(!readonly); ldout(cct, 10) << "_finish_write_head " << wrote << dendl; last_committed = wrote; if (oncommit) { @@ -511,7 +511,7 @@ public: void Journaler::_finish_flush(int r, uint64_t start, ceph::real_time stamp) { lock_guard l(lock); - assert(!readonly); + ceph_assert(!readonly); if (r < 0) { lderr(cct) << "_finish_flush got " << cpp_strerror(r) << dendl; @@ -519,7 +519,7 @@ void Journaler::_finish_flush(int r, uint64_t start, ceph::real_time stamp) return; } - assert(start < flush_pos); + ceph_assert(start < flush_pos); // calc latency? if (logger) { @@ -529,7 +529,7 @@ void Journaler::_finish_flush(int r, uint64_t start, ceph::real_time stamp) // adjust safe_pos auto it = pending_safe.find(start); - assert(it != pending_safe.end()); + ceph_assert(it != pending_safe.end()); pending_safe.erase(it); if (pending_safe.empty()) safe_pos = next_safe_pos; @@ -563,7 +563,7 @@ uint64_t Journaler::append_entry(bufferlist& bl) { unique_lock l(lock); - assert(!readonly); + ceph_assert(!readonly); uint32_t s = bl.length(); // append @@ -583,7 +583,7 @@ uint64_t Journaler::append_entry(bufferlist& bl) // flush previous object? uint64_t su = get_layout_period(); - assert(su > 0); + ceph_assert(su > 0); uint64_t write_off = write_pos % su; uint64_t write_obj = write_pos / su; uint64_t flush_obj = flush_pos / su; @@ -611,12 +611,12 @@ void Journaler::_do_flush(unsigned amount) return; if (write_pos == flush_pos) return; - assert(write_pos > flush_pos); - assert(!readonly); + ceph_assert(write_pos > flush_pos); + ceph_assert(!readonly); // flush uint64_t len = write_pos - flush_pos; - assert(len == write_buf.length()); + ceph_assert(len == write_buf.length()); if (amount && amount < len) len = amount; @@ -679,7 +679,7 @@ void Journaler::_do_flush(unsigned amount) wrap_finisher(onsafe), write_iohint); flush_pos += len; - assert(write_buf.length() == write_pos - flush_pos); + ceph_assert(write_buf.length() == write_pos - flush_pos); write_buf_throttle.put(len); ldout(cct, 20) << "write_buf_throttle put, len " << len << dendl; @@ -704,11 +704,11 @@ void Journaler::wait_for_flush(Context *onsafe) void Journaler::_wait_for_flush(Context *onsafe) { - assert(!readonly); + ceph_assert(!readonly); // all flushed and safe? if (write_pos == safe_pos) { - assert(write_buf.length() == 0); + ceph_assert(write_buf.length() == 0); ldout(cct, 10) << "flush nothing to flush, (prezeroing/prezero)/write/flush/safe " "pointers at " << "(" << prezeroing_pos << "/" << prezero_pos << ")/" @@ -737,10 +737,10 @@ void Journaler::flush(Context *onsafe) void Journaler::_flush(C_OnFinisher *onsafe) { - assert(!readonly); + ceph_assert(!readonly); if (write_pos == flush_pos) { - assert(write_buf.length() == 0); + ceph_assert(write_buf.length() == 0); ldout(cct, 10) << "flush nothing to flush, (prezeroing/prezero)/write/" "flush/safe pointers at " << "(" << prezeroing_pos << "/" << prezero_pos << ")/" << write_pos << "/" << flush_pos << "/" << safe_pos @@ -780,7 +780,7 @@ struct C_Journaler_Prezero : public Context { void Journaler::_issue_prezero() { - assert(prezeroing_pos >= flush_pos); + ceph_assert(prezeroing_pos >= flush_pos); uint64_t num_periods = cct->_conf.get_val("journaler_prezero_periods"); /* @@ -834,7 +834,7 @@ void Journaler::_finish_prezero(int r, uint64_t start, uint64_t len) return; } - assert(r == 0 || r == -ENOENT); + ceph_assert(r == 0 || r == -ENOENT); if (start == prezero_pos) { prezero_pos += len; @@ -866,7 +866,7 @@ void Journaler::_finish_prezero(int r, uint64_t start, uint64_t len) void Journaler::wait_for_prezero(Context *onfinish) { - assert(onfinish); + ceph_assert(onfinish); lock_guard l(lock); if (prezero_pos == prezeroing_pos) { @@ -967,7 +967,7 @@ void Journaler::_assimilate_prefetch() << p->second.length() << dendl; received_pos += p->second.length(); read_buf.claim_append(p->second); - assert(received_pos <= requested_pos); + ceph_assert(received_pos <= requested_pos); prefetch_buf.erase(p); got_any = true; } @@ -1000,11 +1000,11 @@ void Journaler::_issue_read(uint64_t len) { // stuck at safe_pos? (this is needed if we are reading the tail of // a journal we are also writing to) - assert(requested_pos <= safe_pos); + ceph_assert(requested_pos <= safe_pos); if (requested_pos == safe_pos) { ldout(cct, 10) << "_issue_read requested_pos = safe_pos = " << safe_pos << ", waiting" << dendl; - assert(write_pos > requested_pos); + ceph_assert(write_pos > requested_pos); if (pending_safe.empty()) { _flush(NULL); } @@ -1129,8 +1129,8 @@ bool Journaler::_is_readable() // adjust write_pos prezeroing_pos = prezero_pos = write_pos = flush_pos = safe_pos = next_safe_pos = read_pos; - assert(write_buf.length() == 0); - assert(waitfor_safe.empty()); + ceph_assert(write_buf.length() == 0); + ceph_assert(waitfor_safe.empty()); // reset read state requested_pos = received_pos = read_pos; @@ -1238,7 +1238,7 @@ bool Journaler::try_read_entry(bufferlist& bl) try { consumed = journal_stream.read(read_buf, &bl, &start_ptr); if (stream_format >= JOURNAL_FORMAT_RESILIENT) { - assert(start_ptr == read_pos); + ceph_assert(start_ptr == read_pos); } } catch (const buffer::error &e) { lderr(cct) << __func__ << ": decode error from journal_stream" << dendl; @@ -1273,7 +1273,7 @@ void Journaler::wait_for_readable(Context *onreadable) return; } - assert(on_readable == 0); + ceph_assert(on_readable == 0); if (!readable) { ldout(cct, 10) << "wait_for_readable at " << read_pos << " onreadable " << onreadable << dendl; @@ -1316,7 +1316,7 @@ void Journaler::_trim() if (is_stopping()) return; - assert(!readonly); + ceph_assert(!readonly); uint64_t period = get_layout_period(); uint64_t trim_to = last_committed.expire_pos; trim_to -= trim_to % period; @@ -1336,9 +1336,9 @@ void Journaler::_trim() } // trim - assert(trim_to <= write_pos); - assert(trim_to <= expire_pos); - assert(trim_to > trimming_pos); + ceph_assert(trim_to <= write_pos); + ceph_assert(trim_to <= expire_pos); + ceph_assert(trim_to > trimming_pos); ldout(cct, 10) << "trim trimming to " << trim_to << ", trimmed/trimming/expire are " << trimmed_pos << "/" << trimming_pos << "/" << expire_pos @@ -1358,7 +1358,7 @@ void Journaler::_finish_trim(int r, uint64_t to) { lock_guard l(lock); - assert(!readonly); + ceph_assert(!readonly); ldout(cct, 10) << "_finish_trim trimmed_pos was " << trimmed_pos << ", trimmed/trimming/expire now " << to << "/" << trimming_pos << "/" << expire_pos @@ -1369,10 +1369,10 @@ void Journaler::_finish_trim(int r, uint64_t to) return; } - assert(r >= 0 || r == -ENOENT); + ceph_assert(r >= 0 || r == -ENOENT); - assert(to <= trimming_pos); - assert(to > trimmed_pos); + ceph_assert(to <= trimming_pos); + ceph_assert(to > trimmed_pos); trimmed_pos = to; } @@ -1392,7 +1392,7 @@ void Journaler::handle_write_error(int r) lderr(cct) << __func__ << ": multiple write errors, handler already called" << dendl; } else { - assert(0 == "unhandled write error"); + ceph_assert(0 == "unhandled write error"); } } @@ -1407,7 +1407,7 @@ void Journaler::handle_write_error(int r) */ bool JournalStream::readable(bufferlist &read_buf, uint64_t *need) const { - assert(need != NULL); + ceph_assert(need != NULL); uint32_t entry_size = 0; uint64_t entry_sentinel = 0; @@ -1464,9 +1464,9 @@ bool JournalStream::readable(bufferlist &read_buf, uint64_t *need) const size_t JournalStream::read(bufferlist &from, bufferlist *entry, uint64_t *start_ptr) { - assert(start_ptr != NULL); - assert(entry != NULL); - assert(entry->length() == 0); + ceph_assert(start_ptr != NULL); + ceph_assert(entry != NULL); + ceph_assert(entry->length() == 0); uint32_t entry_size = 0; @@ -1477,7 +1477,7 @@ size_t JournalStream::read(bufferlist &from, bufferlist *entry, decode(entry_sentinel, from_ptr); // Assertion instead of clean check because of precondition of this // fn is that readable() already passed - assert(entry_sentinel == sentinel); + ceph_assert(entry_sentinel == sentinel); } decode(entry_size, from_ptr); @@ -1504,7 +1504,7 @@ size_t JournalStream::read(bufferlist &from, bufferlist *entry, size_t JournalStream::write(bufferlist &entry, bufferlist *to, uint64_t const &start_ptr) { - assert(to != NULL); + ceph_assert(to != NULL); uint32_t const entry_size = entry.length(); if (format >= JOURNAL_FORMAT_RESILIENT) { @@ -1539,7 +1539,7 @@ size_t JournalStream::write(bufferlist &entry, bufferlist *to, */ void Journaler::set_write_error_handler(Context *c) { lock_guard l(lock); - assert(!on_write_error); + ceph_assert(!on_write_error); on_write_error = wrap_finisher(c); called_write_error = false; } diff --git a/src/osdc/Journaler.h b/src/osdc/Journaler.h index d001d20383b28..0704ee1f98edc 100644 --- a/src/osdc/Journaler.h +++ b/src/osdc/Journaler.h @@ -240,7 +240,7 @@ private: */ void _do_delayed_flush() { - assert(delay_flush_event != NULL); + ceph_assert(delay_flush_event != NULL); lock_guard l(lock); delay_flush_event = NULL; _do_flush(); @@ -369,7 +369,7 @@ private: // only init_headers when following or first reading off-disk void init_headers(Header& h) { - assert(readonly || + ceph_assert(readonly || state == STATE_READHEAD || state == STATE_REREADHEAD); last_written = last_committed = h; @@ -426,7 +426,7 @@ public: */ void reset() { lock_guard l(lock); - assert(state == STATE_ACTIVE); + ceph_assert(state == STATE_ACTIVE); readonly = true; delay_flush_event = NULL; @@ -442,7 +442,7 @@ public: requested_pos = 0; received_pos = 0; fetch_len = 0; - assert(!on_readable); + ceph_assert(!on_readable); expire_pos = 0; trimming_pos = 0; trimmed_pos = 0; @@ -475,7 +475,7 @@ public: void set_read_pos(uint64_t p) { lock_guard l(lock); // we can't cope w/ in-progress read right now. - assert(requested_pos == received_pos); + ceph_assert(requested_pos == received_pos); read_pos = requested_pos = received_pos = p; read_buf.clear(); } @@ -500,7 +500,7 @@ public: void trim_tail() { lock_guard l(lock); - assert(!readonly); + ceph_assert(!readonly); _issue_prezero(); } diff --git a/src/osdc/ObjectCacher.cc b/src/osdc/ObjectCacher.cc index e99b1e13e4eef..3c2f0d3038ee4 100644 --- a/src/osdc/ObjectCacher.cc +++ b/src/osdc/ObjectCacher.cc @@ -94,7 +94,7 @@ public: ObjectCacher::BufferHead *ObjectCacher::Object::split(BufferHead *left, loff_t off) { - assert(oc->lock.is_locked()); + ceph_assert(oc->lock.is_locked()); ldout(oc->cct, 20) << "split " << *left << " at " << off << dendl; // split off right @@ -126,7 +126,7 @@ ObjectCacher::BufferHead *ObjectCacher::Object::split(BufferHead *left, bufferlist bl; bl.claim(left->bl); if (bl.length()) { - assert(bl.length() == (left->length() + right->length())); + ceph_assert(bl.length() == (left->length() + right->length())); right->bl.substr_of(bl, left->length(), right->length()); left->bl.substr_of(bl, 0, left->length()); } @@ -143,7 +143,7 @@ ObjectCacher::BufferHead *ObjectCacher::Object::split(BufferHead *left, ldout(oc->cct, 20) << "split moving waiters at byte " << p->first << " to right bh" << dendl; right->waitfor_read[p->first].swap( p->second ); - assert(p->second.empty()); + ceph_assert(p->second.empty()); } left->waitfor_read.erase(start_remove, left->waitfor_read.end()); } @@ -156,7 +156,7 @@ ObjectCacher::BufferHead *ObjectCacher::Object::split(BufferHead *left, void ObjectCacher::Object::merge_left(BufferHead *left, BufferHead *right) { - assert(oc->lock.is_locked()); + ceph_assert(oc->lock.is_locked()); ldout(oc->cct, 10) << "merge_left " << *left << " + " << *right << dendl; if (left->get_journal_tid() == 0) { @@ -206,7 +206,7 @@ bool ObjectCacher::Object::can_merge_bh(BufferHead *left, BufferHead *right) void ObjectCacher::Object::try_merge_bh(BufferHead *bh) { - assert(oc->lock.is_locked()); + ceph_assert(oc->lock.is_locked()); ldout(oc->cct, 10) << "try_merge_bh " << *bh << dendl; // do not merge rx buffers; last_read_tid may not match @@ -215,7 +215,7 @@ void ObjectCacher::Object::try_merge_bh(BufferHead *bh) // to the left? map::iterator p = data.find(bh->start()); - assert(p->second == bh); + ceph_assert(p->second == bh); if (p != data.begin()) { --p; if (can_merge_bh(p->second, bh)) { @@ -226,7 +226,7 @@ void ObjectCacher::Object::try_merge_bh(BufferHead *bh) } } // to the right? - assert(p->second == bh); + ceph_assert(p->second == bh); ++p; if (p != data.end() && can_merge_bh(bh, p->second)) merge_left(bh, p->second); @@ -237,7 +237,7 @@ void ObjectCacher::Object::try_merge_bh(BufferHead *bh) */ bool ObjectCacher::Object::is_cached(loff_t cur, loff_t left) const { - assert(oc->lock.is_locked()); + ceph_assert(oc->lock.is_locked()); map::const_iterator p = data_lower_bound(cur); while (left > 0) { if (p == data.end()) @@ -265,7 +265,7 @@ bool ObjectCacher::Object::is_cached(loff_t cur, loff_t left) const */ bool ObjectCacher::Object::include_all_cached_data(loff_t off, loff_t len) { - assert(oc->lock.is_locked()); + ceph_assert(oc->lock.is_locked()); if (data.empty()) return true; map::iterator first = data.begin(); @@ -286,7 +286,7 @@ int ObjectCacher::Object::map_read(ObjectExtent &ex, map& rx, map& errors) { - assert(oc->lock.is_locked()); + ceph_assert(oc->lock.is_locked()); ldout(oc->cct, 10) << "map_read " << ex.oid << " " << ex.offset << "~" << ex.length << dendl; @@ -311,7 +311,7 @@ int ObjectCacher::Object::map_read(ObjectExtent &ex, ldout(oc->cct, 20) << "map_read miss " << left << " left, " << *n << dendl; } cur += left; - assert(cur == (loff_t)ex.offset + (loff_t)ex.length); + ceph_assert(cur == (loff_t)ex.offset + (loff_t)ex.length); break; // no more. } @@ -376,13 +376,13 @@ void ObjectCacher::Object::audit_buffers() lderr(oc->cct) << "AUDIT FAILURE: map position " << it->first << " does not match bh start position: " << *it->second << dendl; - assert(it->first == it->second->start()); + ceph_assert(it->first == it->second->start()); } if (it->first < offset) { lderr(oc->cct) << "AUDIT FAILURE: " << it->first << " " << *it->second << " overlaps with previous bh " << *((--it)->second) << dendl; - assert(it->first >= offset); + ceph_assert(it->first >= offset); } BufferHead *bh = it->second; map >::const_iterator w_it; @@ -392,8 +392,8 @@ void ObjectCacher::Object::audit_buffers() w_it->first >= bh->start() + bh->length()) { lderr(oc->cct) << "AUDIT FAILURE: waiter at " << w_it->first << " is not within bh " << *bh << dendl; - assert(w_it->first >= bh->start()); - assert(w_it->first < bh->start() + bh->length()); + ceph_assert(w_it->first >= bh->start()); + ceph_assert(w_it->first < bh->start() + bh->length()); } } offset = it->first + it->second->length(); @@ -410,7 +410,7 @@ void ObjectCacher::Object::audit_buffers() ObjectCacher::BufferHead *ObjectCacher::Object::map_write(ObjectExtent &ex, ceph_tid_t tid) { - assert(oc->lock.is_locked()); + ceph_assert(oc->lock.is_locked()); BufferHead *final = 0; ldout(oc->cct, 10) << "map_write oex " << ex.oid @@ -450,23 +450,23 @@ ObjectCacher::BufferHead *ObjectCacher::Object::map_write(ObjectExtent &ex, ldout(oc->cct, 10) << "map_write bh " << *bh << " intersected" << dendl; if (p->first < cur) { - assert(final == 0); + ceph_assert(final == 0); if (cur + max >= bh->end()) { // we want right bit (one splice) final = split(bh, cur); // just split it, take right half. replace_journal_tid(final, tid); ++p; - assert(p->second == final); + ceph_assert(p->second == final); } else { // we want middle bit (two splices) final = split(bh, cur); ++p; - assert(p->second == final); + ceph_assert(p->second == final); split(final, cur+max); replace_journal_tid(final, tid); } } else { - assert(p->first == cur); + ceph_assert(p->first == cur); if (bh->length() <= max) { // whole bufferhead, piece of cake. } else { @@ -477,7 +477,7 @@ ObjectCacher::BufferHead *ObjectCacher::Object::map_write(ObjectExtent &ex, oc->mark_dirty(bh); oc->mark_dirty(final); --p; // move iterator back to final - assert(p->second == final); + ceph_assert(p->second == final); replace_journal_tid(bh, tid); merge_left(final, bh); } else { @@ -516,8 +516,8 @@ ObjectCacher::BufferHead *ObjectCacher::Object::map_write(ObjectExtent &ex, } // set version - assert(final); - assert(final->get_journal_tid() == tid); + ceph_assert(final); + ceph_assert(final->get_journal_tid() == tid); ldout(oc->cct, 10) << "map_write final is " << *final << dendl; return final; @@ -527,7 +527,7 @@ void ObjectCacher::Object::replace_journal_tid(BufferHead *bh, ceph_tid_t tid) { ceph_tid_t bh_tid = bh->get_journal_tid(); - assert(tid == 0 || bh_tid <= tid); + ceph_assert(tid == 0 || bh_tid <= tid); if (bh_tid != 0 && bh_tid != tid) { // inform journal that it should not expect a writeback from this extent oc->writeback_handler.overwrite_extent(get_oid(), bh->start(), @@ -538,7 +538,7 @@ void ObjectCacher::Object::replace_journal_tid(BufferHead *bh, void ObjectCacher::Object::truncate(loff_t s) { - assert(oc->lock.is_locked()); + ceph_assert(oc->lock.is_locked()); ldout(oc->cct, 10) << "truncate " << *this << " to " << s << dendl; while (!data.empty()) { @@ -553,8 +553,8 @@ void ObjectCacher::Object::truncate(loff_t s) } // remove bh entirely - assert(bh->start() >= s); - assert(bh->waitfor_read.empty()); + ceph_assert(bh->start() >= s); + ceph_assert(bh->waitfor_read.empty()); replace_journal_tid(bh, 0); oc->bh_remove(this, bh); delete bh; @@ -564,7 +564,7 @@ void ObjectCacher::Object::truncate(loff_t s) void ObjectCacher::Object::discard(loff_t off, loff_t len, C_GatherBuilder* commit_gather) { - assert(oc->lock.is_locked()); + ceph_assert(oc->lock.is_locked()); ldout(oc->cct, 10) << "discard " << *this << " " << off << "~" << len << dendl; @@ -590,7 +590,7 @@ void ObjectCacher::Object::discard(loff_t off, loff_t len, continue; } - assert(bh->start() >= off); + ceph_assert(bh->start() >= off); if (bh->end() > off + len) { split(bh, off + len); } @@ -612,7 +612,7 @@ void ObjectCacher::Object::discard(loff_t off, loff_t len, // we should mark all Rx bh to zero continue; } else { - assert(bh->waitfor_read.empty()); + ceph_assert(bh->waitfor_read.empty()); } oc->bh_remove(this, bh); @@ -663,11 +663,11 @@ ObjectCacher::~ObjectCacher() = objects.begin(); i != objects.end(); ++i) - assert(i->empty()); - assert(bh_lru_rest.lru_get_size() == 0); - assert(bh_lru_dirty.lru_get_size() == 0); - assert(ob_lru.lru_get_size() == 0); - assert(dirty_or_tx_bh.empty()); + ceph_assert(i->empty()); + ceph_assert(bh_lru_rest.lru_get_size() == 0); + ceph_assert(bh_lru_dirty.lru_get_size() == 0); + ceph_assert(ob_lru.lru_get_size() == 0); + ceph_assert(dirty_or_tx_bh.empty()); } void ObjectCacher::perf_start() @@ -706,7 +706,7 @@ void ObjectCacher::perf_start() void ObjectCacher::perf_stop() { - assert(perfcounter); + ceph_assert(perfcounter); cct->get_perfcounters_collection()->remove(perfcounter); delete perfcounter; } @@ -720,7 +720,7 @@ ObjectCacher::Object *ObjectCacher::get_object(sobject_t oid, uint64_t truncate_seq) { // XXX: Add handling of nspace in object_locator_t in cache - assert(lock.is_locked()); + ceph_assert(lock.is_locked()); // have it? if ((uint32_t)l.pool < objects.size()) { if (objects[l.pool].count(oid)) { @@ -744,9 +744,9 @@ ObjectCacher::Object *ObjectCacher::get_object(sobject_t oid, void ObjectCacher::close_object(Object *ob) { - assert(lock.is_locked()); + ceph_assert(lock.is_locked()); ldout(cct, 10) << "close_object " << *ob << dendl; - assert(ob->can_close()); + ceph_assert(ob->can_close()); // ok! ob_lru.lru_remove(ob); @@ -758,7 +758,7 @@ void ObjectCacher::close_object(Object *ob) void ObjectCacher::bh_read(BufferHead *bh, int op_flags, const ZTracer::Trace &parent_trace) { - assert(lock.is_locked()); + ceph_assert(lock.is_locked()); ldout(cct, 7) << "bh_read on " << *bh << " outstanding reads " << reads_outstanding << dendl; @@ -790,7 +790,7 @@ void ObjectCacher::bh_read_finish(int64_t poolid, sobject_t oid, uint64_t length, bufferlist &bl, int r, bool trust_enoent) { - assert(lock.is_locked()); + ceph_assert(lock.is_locked()); ldout(cct, 7) << "bh_read_finish " << oid << " tid " << tid @@ -919,9 +919,9 @@ void ObjectCacher::bh_read_finish(int64_t poolid, sobject_t oid, continue; } - assert(opos >= bh->start()); - assert(bh->start() == opos); // we don't merge rx bh's... yet! - assert(bh->length() <= start+(loff_t)length-opos); + ceph_assert(opos >= bh->start()); + ceph_assert(bh->start() == opos); // we don't merge rx bh's... yet! + ceph_assert(bh->length() <= start+(loff_t)length-opos); if (bh->error < 0) err = bh->error; @@ -974,7 +974,7 @@ void ObjectCacher::bh_write_adjacencies(BufferHead *bh, ceph::real_time cutoff, int count = 0; int64_t total_len = 0; set::iterator it = dirty_or_tx_bh.find(bh); - assert(it != dirty_or_tx_bh.end()); + ceph_assert(it != dirty_or_tx_bh.end()); for (set::iterator p = it; p != dirty_or_tx_bh.end(); ++p) { @@ -1038,7 +1038,7 @@ public: }; void ObjectCacher::bh_write_scattered(list& blist) { - assert(lock.is_locked()); + ceph_assert(lock.is_locked()); Object *ob = blist.front()->ob; ob->get(); @@ -1055,8 +1055,8 @@ void ObjectCacher::bh_write_scattered(list& blist) for (list::iterator p = blist.begin(); p != blist.end(); ++p) { BufferHead *bh = *p; ldout(cct, 7) << "bh_write_scattered " << *bh << dendl; - assert(bh->ob == ob); - assert(bh->bl.length() == bh->length()); + ceph_assert(bh->ob == ob); + ceph_assert(bh->bl.length() == bh->length()); ranges.push_back(pair(bh->start(), bh->length())); int n = io_vec.size(); @@ -1091,7 +1091,7 @@ void ObjectCacher::bh_write_scattered(list& blist) void ObjectCacher::bh_write(BufferHead *bh, const ZTracer::Trace &parent_trace) { - assert(lock.is_locked()); + ceph_assert(lock.is_locked()); ldout(cct, 7) << "bh_write " << *bh << dendl; bh->ob->get(); @@ -1133,7 +1133,7 @@ void ObjectCacher::bh_write_commit(int64_t poolid, sobject_t oid, vector >& ranges, ceph_tid_t tid, int r) { - assert(lock.is_locked()); + ceph_assert(lock.is_locked()); ldout(cct, 7) << "bh_write_commit " << oid << " tid " << tid << " ranges " << ranges << " returned " << r << dendl; @@ -1180,14 +1180,14 @@ void ObjectCacher::bh_write_commit(int64_t poolid, sobject_t oid, // make sure bh tid matches if (bh->last_write_tid != tid) { - assert(bh->last_write_tid > tid); + ceph_assert(bh->last_write_tid > tid); ldout(cct, 10) << "bh_write_commit newer tid on " << *bh << dendl; continue; } // we don't merge tx buffers. tx buffer should be within the range - assert(bh->start() >= start); - assert(bh->end() <= start+(loff_t)length); + ceph_assert(bh->start() >= start); + ceph_assert(bh->end() <= start+(loff_t)length); if (r >= 0) { // ok! mark bh clean and error-free @@ -1213,7 +1213,7 @@ void ObjectCacher::bh_write_commit(int64_t poolid, sobject_t oid, } // update last_commit. - assert(ob->last_commit_tid < tid); + ceph_assert(ob->last_commit_tid < tid); ob->last_commit_tid = tid; // waiters? @@ -1239,8 +1239,8 @@ void ObjectCacher::bh_write_commit(int64_t poolid, sobject_t oid, void ObjectCacher::flush(ZTracer::Trace *trace, loff_t amount) { - assert(trace != nullptr); - assert(lock.is_locked()); + ceph_assert(trace != nullptr); + ceph_assert(lock.is_locked()); ceph::real_time cutoff = ceph::real_clock::now(); ldout(cct, 10) << "flush " << amount << dendl; @@ -1270,7 +1270,7 @@ void ObjectCacher::flush(ZTracer::Trace *trace, loff_t amount) void ObjectCacher::trim() { - assert(lock.is_locked()); + ceph_assert(lock.is_locked()); ldout(cct, 10) << "trim start: bytes: max " << max_size << " clean " << get_stat_clean() << ", objects: max " << max_objects << " current " << ob_lru.lru_get_size() << dendl; @@ -1285,7 +1285,7 @@ void ObjectCacher::trim() break; ldout(cct, 10) << "trim trimming " << *bh << dendl; - assert(bh->is_clean() || bh->is_zero() || bh->is_error()); + ceph_assert(bh->is_clean() || bh->is_zero() || bh->is_error()); Object *ob = bh->ob; bh_remove(ob, bh); @@ -1320,7 +1320,7 @@ void ObjectCacher::trim() bool ObjectCacher::is_cached(ObjectSet *oset, vector& extents, snapid_t snapid) { - assert(lock.is_locked()); + ceph_assert(lock.is_locked()); for (vector::iterator ex_it = extents.begin(); ex_it != extents.end(); ++ex_it) { @@ -1362,8 +1362,8 @@ int ObjectCacher::readx(OSDRead *rd, ObjectSet *oset, Context *onfinish, int ObjectCacher::_readx(OSDRead *rd, ObjectSet *oset, Context *onfinish, bool external_call, ZTracer::Trace *trace) { - assert(trace != nullptr); - assert(lock.is_locked()); + ceph_assert(trace != nullptr); + ceph_assert(lock.is_locked()); bool success = true; int error = 0; uint64_t bytes_in_cache = 0; @@ -1378,7 +1378,7 @@ int ObjectCacher::_readx(OSDRead *rd, ObjectSet *oset, Context *onfinish, * passed in a single ObjectExtent. Any caller who wants ENOENT instead of * zeroed buffers needs to feed single extents into readx(). */ - assert(!oset->return_enoent || rd->extents.size() == 1); + ceph_assert(!oset->return_enoent || rd->extents.size() == 1); for (vector::iterator ex_it = rd->extents.begin(); ex_it != rd->extents.end(); @@ -1527,7 +1527,7 @@ int ObjectCacher::_readx(OSDRead *rd, ObjectSet *oset, Context *onfinish, touch_bh(bh_it->second); } else { - assert(!hits.empty()); + ceph_assert(!hits.empty()); // make a plain list for (map::iterator bh_it = hits.begin(); @@ -1561,14 +1561,14 @@ int ObjectCacher::_readx(OSDRead *rd, ObjectSet *oset, Context *onfinish, // - the buffer frags need not be (and almost certainly aren't) loff_t opos = ex_it->offset; map::iterator bh_it = hits.begin(); - assert(bh_it->second->start() <= opos); + ceph_assert(bh_it->second->start() <= opos); uint64_t bhoff = opos - bh_it->second->start(); vector >::iterator f_it = ex_it->buffer_extents.begin(); uint64_t foff = 0; while (1) { BufferHead *bh = bh_it->second; - assert(opos == (loff_t)(bh->start() + bhoff)); + ceph_assert(opos == (loff_t)(bh->start() + bhoff)); uint64_t len = std::min(f_it->second - foff, bh->length() - bhoff); ldout(cct, 10) << "readx rmap opos " << opos << ": " << *bh << " +" @@ -1603,8 +1603,8 @@ int ObjectCacher::_readx(OSDRead *rd, ObjectSet *oset, Context *onfinish, if (f_it == ex_it->buffer_extents.end()) break; } - assert(f_it == ex_it->buffer_extents.end()); - assert(opos == (loff_t)ex_it->offset + (loff_t)ex_it->length); + ceph_assert(f_it == ex_it->buffer_extents.end()); + ceph_assert(opos == (loff_t)ex_it->offset + (loff_t)ex_it->length); } if (dontneed && o->include_all_cached_data(ex_it->offset, ex_it->length)) @@ -1643,12 +1643,12 @@ int ObjectCacher::_readx(OSDRead *rd, ObjectSet *oset, Context *onfinish, for (map::iterator i = stripe_map.begin(); i != stripe_map.end(); ++i) { - assert(pos == i->first); + ceph_assert(pos == i->first); ldout(cct, 10) << "readx adding buffer len " << i->second.length() << " at " << pos << dendl; pos += i->second.length(); rd->bl->claim_append(i->second); - assert(rd->bl->length() == pos); + ceph_assert(rd->bl->length() == pos); } ldout(cct, 10) << "readx result is " << rd->bl->length() << dendl; } else if (!error) { @@ -1660,7 +1660,7 @@ int ObjectCacher::_readx(OSDRead *rd, ObjectSet *oset, Context *onfinish, // done with read. int ret = error ? error : pos; ldout(cct, 20) << "readx done " << rd << " " << ret << dendl; - assert(pos <= (uint64_t) INT_MAX); + ceph_assert(pos <= (uint64_t) INT_MAX); delete rd; @@ -1685,7 +1685,7 @@ void ObjectCacher::retry_waiting_reads() int ObjectCacher::writex(OSDWrite *wr, ObjectSet *oset, Context *onfreespace, ZTracer::Trace *parent_trace) { - assert(lock.is_locked()); + ceph_assert(lock.is_locked()); ceph::real_time now = ceph::real_clock::now(); uint64_t bytes_written = 0; uint64_t bytes_written_in_flush = 0; @@ -1730,7 +1730,7 @@ int ObjectCacher::writex(OSDWrite *wr, ObjectSet *oset, Context *onfreespace, << f_it->second << " into " << *bh << " at " << opos << dendl; uint64_t bhoff = opos - bh->start(); - assert(f_it->second <= bh->length() - bhoff); + ceph_assert(f_it->second <= bh->length() - bhoff); // get the frag we're mapping in bufferlist frag; @@ -1798,7 +1798,7 @@ void ObjectCacher::C_WaitForWrite::finish(int r) void ObjectCacher::maybe_wait_for_writeback(uint64_t len, ZTracer::Trace *trace) { - assert(lock.is_locked()); + ceph_assert(lock.is_locked()); ceph::mono_time start = ceph::mono_clock::now(); int blocked = 0; // wait for writeback? @@ -1845,8 +1845,8 @@ void ObjectCacher::maybe_wait_for_writeback(uint64_t len, int ObjectCacher::_wait_for_write(OSDWrite *wr, uint64_t len, ObjectSet *oset, ZTracer::Trace *trace, Context *onfreespace) { - assert(lock.is_locked()); - assert(trace != nullptr); + ceph_assert(lock.is_locked()); + ceph_assert(trace != nullptr); int ret = 0; if (max_dirty > 0 && !(wr->fadvise_flags & LIBRADOS_OP_FLAG_FADVISE_FUA)) { @@ -1855,7 +1855,7 @@ int ObjectCacher::_wait_for_write(OSDWrite *wr, uint64_t len, ObjectSet *oset, if (onfreespace) onfreespace->complete(0); } else { - assert(onfreespace); + ceph_assert(onfreespace); finisher.queue(new C_WaitForWrite(this, len, *trace, onfreespace)); } } else { @@ -1864,9 +1864,9 @@ int ObjectCacher::_wait_for_write(OSDWrite *wr, uint64_t len, ObjectSet *oset, bool done = false; Context *fin = block_writes_upfront ? new C_Cond(&cond, &done, &ret) : onfreespace; - assert(fin); + ceph_assert(fin); bool flushed = flush_set(oset, wr->extents, trace, fin); - assert(!flushed); // we just dirtied it, and didn't drop our lock! + ceph_assert(!flushed); // we just dirtied it, and didn't drop our lock! ldout(cct, 10) << "wait_for_write waiting on write-thru of " << len << " bytes" << dendl; if (block_writes_upfront) { @@ -1972,7 +1972,7 @@ void ObjectCacher::flusher_entry() bool ObjectCacher::set_is_empty(ObjectSet *oset) { - assert(lock.is_locked()); + ceph_assert(lock.is_locked()); if (oset->objects.empty()) return true; @@ -1985,7 +1985,7 @@ bool ObjectCacher::set_is_empty(ObjectSet *oset) bool ObjectCacher::set_is_cached(ObjectSet *oset) { - assert(lock.is_locked()); + ceph_assert(lock.is_locked()); if (oset->objects.empty()) return false; @@ -2006,7 +2006,7 @@ bool ObjectCacher::set_is_cached(ObjectSet *oset) bool ObjectCacher::set_is_dirty_or_committing(ObjectSet *oset) { - assert(lock.is_locked()); + ceph_assert(lock.is_locked()); if (oset->objects.empty()) return false; @@ -2030,7 +2030,7 @@ bool ObjectCacher::set_is_dirty_or_committing(ObjectSet *oset) // purge. non-blocking. violently removes dirty buffers from cache. void ObjectCacher::purge(Object *ob) { - assert(lock.is_locked()); + ceph_assert(lock.is_locked()); ldout(cct, 10) << "purge " << *ob << dendl; ob->truncate(0); @@ -2044,8 +2044,8 @@ void ObjectCacher::purge(Object *ob) bool ObjectCacher::flush(Object *ob, loff_t offset, loff_t length, ZTracer::Trace *trace) { - assert(trace != nullptr); - assert(lock.is_locked()); + ceph_assert(trace != nullptr); + ceph_assert(lock.is_locked()); list blist; bool clean = true; ldout(cct, 10) << "flush " << *ob << " " << offset << "~" << length << dendl; @@ -2080,7 +2080,7 @@ bool ObjectCacher::flush(Object *ob, loff_t offset, loff_t length, bool ObjectCacher::_flush_set_finish(C_GatherBuilder *gather, Context *onfinish) { - assert(lock.is_locked()); + ceph_assert(lock.is_locked()); if (gather->has_subs()) { gather->set_finisher(onfinish); gather->activate(); @@ -2096,8 +2096,8 @@ bool ObjectCacher::_flush_set_finish(C_GatherBuilder *gather, // returns true if already flushed bool ObjectCacher::flush_set(ObjectSet *oset, Context *onfinish) { - assert(lock.is_locked()); - assert(onfinish != NULL); + ceph_assert(lock.is_locked()); + ceph_assert(onfinish != NULL); if (oset->objects.empty()) { ldout(cct, 10) << "flush_set on " << oset << " dne" << dendl; onfinish->complete(0); @@ -2199,9 +2199,9 @@ bool ObjectCacher::flush_set(ObjectSet *oset, Context *onfinish) bool ObjectCacher::flush_set(ObjectSet *oset, vector& exv, ZTracer::Trace *trace, Context *onfinish) { - assert(lock.is_locked()); - assert(trace != nullptr); - assert(onfinish != NULL); + ceph_assert(lock.is_locked()); + ceph_assert(trace != nullptr); + ceph_assert(onfinish != NULL); if (oset->objects.empty()) { ldout(cct, 10) << "flush_set on " << oset << " dne" << dendl; onfinish->complete(0); @@ -2241,8 +2241,8 @@ bool ObjectCacher::flush_set(ObjectSet *oset, vector& exv, // returns true if already flushed bool ObjectCacher::flush_all(Context *onfinish) { - assert(lock.is_locked()); - assert(onfinish != NULL); + ceph_assert(lock.is_locked()); + ceph_assert(onfinish != NULL); ldout(cct, 10) << "flush_all " << dendl; @@ -2296,7 +2296,7 @@ bool ObjectCacher::flush_all(Context *onfinish) void ObjectCacher::purge_set(ObjectSet *oset) { - assert(lock.is_locked()); + ceph_assert(lock.is_locked()); if (oset->objects.empty()) { ldout(cct, 10) << "purge_set on " << oset << " dne" << dendl; return; @@ -2313,7 +2313,7 @@ void ObjectCacher::purge_set(ObjectSet *oset) // Although we have purged rather than flushed, caller should still // drop any resources associate with dirty data. - assert(oset->dirty_or_tx == 0); + ceph_assert(oset->dirty_or_tx == 0); if (flush_set_callback && were_dirty) { flush_set_callback(flush_set_callback_arg, oset); } @@ -2322,7 +2322,7 @@ void ObjectCacher::purge_set(ObjectSet *oset) loff_t ObjectCacher::release(Object *ob) { - assert(lock.is_locked()); + ceph_assert(lock.is_locked()); list clean; loff_t o_unclean = 0; @@ -2346,7 +2346,7 @@ loff_t ObjectCacher::release(Object *ob) if (ob->can_close()) { ldout(cct, 10) << "release trimming " << *ob << dendl; close_object(ob); - assert(o_unclean == 0); + ceph_assert(o_unclean == 0); return 0; } @@ -2364,7 +2364,7 @@ loff_t ObjectCacher::release(Object *ob) loff_t ObjectCacher::release_set(ObjectSet *oset) { - assert(lock.is_locked()); + ceph_assert(lock.is_locked()); // return # bytes not clean (and thus not released). loff_t unclean = 0; @@ -2403,7 +2403,7 @@ loff_t ObjectCacher::release_set(ObjectSet *oset) uint64_t ObjectCacher::release_all() { - assert(lock.is_locked()); + ceph_assert(lock.is_locked()); ldout(cct, 10) << "release_all" << dendl; uint64_t unclean = 0; @@ -2439,7 +2439,7 @@ uint64_t ObjectCacher::release_all() void ObjectCacher::clear_nonexistence(ObjectSet *oset) { - assert(lock.is_locked()); + ceph_assert(lock.is_locked()); ldout(cct, 10) << "clear_nonexistence() " << oset << dendl; for (xlist::iterator p = oset->objects.begin(); @@ -2464,7 +2464,7 @@ void ObjectCacher::clear_nonexistence(ObjectSet *oset) */ void ObjectCacher::discard_set(ObjectSet *oset, const vector& exls) { - assert(lock.is_locked()); + ceph_assert(lock.is_locked()); bool was_dirty = oset->dirty_or_tx > 0; _discard(oset, exls, nullptr); @@ -2480,7 +2480,7 @@ void ObjectCacher::discard_writeback(ObjectSet *oset, const vector& exls, Context* on_finish) { - assert(lock.is_locked()); + ceph_assert(lock.is_locked()); bool was_dirty = oset->dirty_or_tx > 0; C_GatherBuilder gather(cct); @@ -2490,7 +2490,7 @@ void ObjectCacher::discard_writeback(ObjectSet *oset, bool flushed = was_dirty && oset->dirty_or_tx == 0; gather.set_finisher(new FunctionContext( [this, oset, flushed, on_finish](int) { - assert(lock.is_locked()); + ceph_assert(lock.is_locked()); if (flushed && flush_set_callback) flush_set_callback(flush_set_callback_arg, oset); if (on_finish) @@ -2527,7 +2527,7 @@ void ObjectCacher::_discard(ObjectSet *oset, const vector& exls, void ObjectCacher::_discard_finish(ObjectSet *oset, bool was_dirty, Context* on_finish) { - assert(lock.is_locked()); + ceph_assert(lock.is_locked()); // did we truncate off dirty data? if (flush_set_callback && was_dirty && oset->dirty_or_tx == 0) { @@ -2542,7 +2542,7 @@ void ObjectCacher::_discard_finish(ObjectSet *oset, bool was_dirty, void ObjectCacher::verify_stats() const { - assert(lock.is_locked()); + ceph_assert(lock.is_locked()); ldout(cct, 10) << "verify_stats" << dendl; loff_t clean = 0, zero = 0, dirty = 0, rx = 0, tx = 0, missing = 0, @@ -2592,18 +2592,18 @@ void ObjectCacher::verify_stats() const ldout(cct, 10) << " clean " << clean << " rx " << rx << " tx " << tx << " dirty " << dirty << " missing " << missing << " error " << error << dendl; - assert(clean == stat_clean); - assert(rx == stat_rx); - assert(tx == stat_tx); - assert(dirty == stat_dirty); - assert(missing == stat_missing); - assert(zero == stat_zero); - assert(error == stat_error); + ceph_assert(clean == stat_clean); + ceph_assert(rx == stat_rx); + ceph_assert(tx == stat_tx); + ceph_assert(dirty == stat_dirty); + ceph_assert(missing == stat_missing); + ceph_assert(zero == stat_zero); + ceph_assert(error == stat_error); } void ObjectCacher::bh_stat_add(BufferHead *bh) { - assert(lock.is_locked()); + ceph_assert(lock.is_locked()); switch (bh->get_state()) { case BufferHead::STATE_MISSING: stat_missing += bh->length(); @@ -2631,7 +2631,7 @@ void ObjectCacher::bh_stat_add(BufferHead *bh) stat_error += bh->length(); break; default: - assert(0 == "bh_stat_add: invalid bufferhead state"); + ceph_assert(0 == "bh_stat_add: invalid bufferhead state"); } if (get_stat_dirty_waiting() > 0) stat_cond.Signal(); @@ -2639,7 +2639,7 @@ void ObjectCacher::bh_stat_add(BufferHead *bh) void ObjectCacher::bh_stat_sub(BufferHead *bh) { - assert(lock.is_locked()); + ceph_assert(lock.is_locked()); switch (bh->get_state()) { case BufferHead::STATE_MISSING: stat_missing -= bh->length(); @@ -2667,13 +2667,13 @@ void ObjectCacher::bh_stat_sub(BufferHead *bh) stat_error -= bh->length(); break; default: - assert(0 == "bh_stat_sub: invalid bufferhead state"); + ceph_assert(0 == "bh_stat_sub: invalid bufferhead state"); } } void ObjectCacher::bh_set_state(BufferHead *bh, int s) { - assert(lock.is_locked()); + ceph_assert(lock.is_locked()); int state = bh->get_state(); // move between lru lists? if (s == BufferHead::STATE_DIRTY && state != BufferHead::STATE_DIRTY) { @@ -2712,7 +2712,7 @@ void ObjectCacher::bh_set_state(BufferHead *bh, int s) void ObjectCacher::bh_add(Object *ob, BufferHead *bh) { - assert(lock.is_locked()); + ceph_assert(lock.is_locked()); ldout(cct, 30) << "bh_add " << *ob << " " << *bh << dendl; ob->add_bh(bh); if (bh->is_dirty()) { @@ -2733,8 +2733,8 @@ void ObjectCacher::bh_add(Object *ob, BufferHead *bh) void ObjectCacher::bh_remove(Object *ob, BufferHead *bh) { - assert(lock.is_locked()); - assert(bh->get_journal_tid() == 0); + ceph_assert(lock.is_locked()); + ceph_assert(bh->get_journal_tid() == 0); ldout(cct, 30) << "bh_remove " << *ob << " " << *bh << dendl; ob->remove_bh(bh); if (bh->is_dirty()) { diff --git a/src/osdc/ObjectCacher.h b/src/osdc/ObjectCacher.h index 60f049ef55d5b..81cda17aef625 100644 --- a/src/osdc/ObjectCacher.h +++ b/src/osdc/ObjectCacher.h @@ -179,12 +179,12 @@ class ObjectCacher { // reference counting int get() { - assert(ref >= 0); + ceph_assert(ref >= 0); if (ref == 0) lru_pin(); return ++ref; } int put() { - assert(ref > 0); + ceph_assert(ref > 0); if (ref == 1) lru_unpin(); --ref; return ref; @@ -271,9 +271,9 @@ class ObjectCacher { } ~Object() { reads.clear(); - assert(ref == 0); - assert(data.empty()); - assert(dirty_or_tx == 0); + ceph_assert(ref == 0); + ceph_assert(data.empty()); + ceph_assert(dirty_or_tx == 0); set_item.remove_myself(); } @@ -289,8 +289,8 @@ class ObjectCacher { bool can_close() const { if (lru_is_expireable()) { - assert(data.empty()); - assert(waitfor_commit.empty()); + ceph_assert(data.empty()); + ceph_assert(waitfor_commit.empty()); return true; } return false; @@ -326,11 +326,11 @@ class ObjectCacher { void add_bh(BufferHead *bh) { if (data.empty()) get(); - assert(data.count(bh->start()) == 0); + ceph_assert(data.count(bh->start()) == 0); data[bh->start()] = bh; } void remove_bh(BufferHead *bh) { - assert(data.count(bh->start())); + ceph_assert(data.count(bh->start())); data.erase(bh->start()); if (data.empty()) put(); @@ -359,12 +359,12 @@ class ObjectCacher { // reference counting int get() { - assert(ref >= 0); + ceph_assert(ref >= 0); if (ref == 0) lru_pin(); return ++ref; } int put() { - assert(ref > 0); + ceph_assert(ref > 0); if (ref == 1) lru_unpin(); --ref; return ref; @@ -588,7 +588,7 @@ class ObjectCacher { flusher_thread.create("flusher"); } void stop() { - assert(flusher_thread.is_started()); + ceph_assert(flusher_thread.is_started()); lock.Lock(); // hmm.. watch out for deadlock! flusher_stop = true; flusher_cond.Signal(); diff --git a/src/osdc/Objecter.cc b/src/osdc/Objecter.cc index c6e4e8cd5d70c..ff7db68156fcd 100644 --- a/src/osdc/Objecter.cc +++ b/src/osdc/Objecter.cc @@ -237,7 +237,7 @@ void Objecter::update_crush_location() */ void Objecter::init() { - assert(!initialized); + ceph_assert(!initialized); if (!logger) { PerfCountersBuilder pcb(cct, "objecter", l_osdc_first, l_osdc_last); @@ -407,7 +407,7 @@ void Objecter::start(const OSDMap* o) void Objecter::shutdown() { - assert(initialized); + ceph_assert(initialized); unique_lock wl(rwlock); @@ -526,7 +526,7 @@ void Objecter::shutdown() void Objecter::_send_linger(LingerOp *info, shunique_lock& sul) { - assert(sul.owns_lock() && sul.mutex() == &rwlock); + ceph_assert(sul.owns_lock() && sul.mutex() == &rwlock); vector opv; Context *oncommit = NULL; @@ -765,7 +765,7 @@ void Objecter::_linger_cancel(LingerOp *info) linger_ops.erase(info->linger_id); linger_ops_set.erase(info); - assert(linger_ops.size() == linger_ops_set.size()); + ceph_assert(linger_ops.size() == linger_ops_set.size()); info->canceled = true; info->put(); @@ -798,7 +798,7 @@ Objecter::LingerOp *Objecter::linger_register(const object_t& oid, << dendl; linger_ops[info->linger_id] = info; linger_ops_set.insert(info); - assert(linger_ops.size() == linger_ops_set.size()); + ceph_assert(linger_ops.size() == linger_ops_set.size()); info->get(); // for the caller return info; @@ -857,9 +857,9 @@ ceph_tid_t Objecter::linger_notify(LingerOp *info, void Objecter::_linger_submit(LingerOp *info, shunique_lock& sul) { - assert(sul.owns_lock() && sul.mutex() == &rwlock); - assert(info->linger_id); - assert(info->ctx_budget != -1); // caller needs to have taken budget already! + ceph_assert(sul.owns_lock() && sul.mutex() == &rwlock); + ceph_assert(info->linger_id); + ceph_assert(info->ctx_budget != -1); // caller needs to have taken budget already! // Populate Op::target OSDSession *s = NULL; @@ -867,7 +867,7 @@ void Objecter::_linger_submit(LingerOp *info, shunique_lock& sul) // Create LingerOp<->OSDSession relation int r = _get_session(info->target.osd, &s, sul); - assert(r == 0); + ceph_assert(r == 0); OSDSession::unique_lock sl(s->lock); _session_linger_op_assign(s, info); sl.unlock(); @@ -937,7 +937,7 @@ void Objecter::_do_watch_notify(LingerOp *info, MWatchNotify *m) ldout(cct, 10) << __func__ << " " << *m << dendl; shared_lock l(rwlock); - assert(initialized); + ceph_assert(initialized); if (info->canceled) { l.unlock(); @@ -945,9 +945,9 @@ void Objecter::_do_watch_notify(LingerOp *info, MWatchNotify *m) } // notify completion? - assert(info->is_watch); - assert(info->watch_context); - assert(m->opcode != CEPH_WATCH_EVENT_DISCONNECT); + ceph_assert(info->is_watch); + ceph_assert(info->watch_context); + ceph_assert(m->opcode != CEPH_WATCH_EVENT_DISCONNECT); l.unlock(); @@ -1023,7 +1023,7 @@ void Objecter::_scan_requests( shunique_lock& sul, const mempool::osdmap::map *gap_removed_snaps) { - assert(sul.owns_lock() && sul.mutex() == &rwlock); + ceph_assert(sul.owns_lock() && sul.mutex() == &rwlock); list unregister_lingers; @@ -1033,7 +1033,7 @@ void Objecter::_scan_requests( map::iterator lp = s->linger_ops.begin(); while (lp != s->linger_ops.end()) { LingerOp *op = lp->second; - assert(op->session == s); + ceph_assert(op->session == s); // check_linger_pool_dne() may touch linger_ops; prevent iterator // invalidation ++lp; @@ -1142,7 +1142,7 @@ void Objecter::handle_osd_map(MOSDMap *m) if (!initialized) return; - assert(osdmap); + ceph_assert(osdmap); if (m->fsid != monc->get_fsid()) { ldout(cct, 0) << "handle_osd_map fsid " << m->fsid @@ -1249,7 +1249,7 @@ void Objecter::handle_osd_map(MOSDMap *m) } } - assert(e == osdmap->get_epoch()); + ceph_assert(e == osdmap->get_epoch()); } } else { @@ -1313,7 +1313,7 @@ void Objecter::handle_osd_map(MOSDMap *m) bool mapped_session = false; if (!s) { int r = _map_session(&op->target, &s, sul); - assert(r == 0); + ceph_assert(r == 0); mapped_session = true; } else { get_session(s); @@ -1341,8 +1341,8 @@ void Objecter::handle_osd_map(MOSDMap *m) _calc_target(&op->target, nullptr); OSDSession *s = NULL; const int r = _get_session(op->target.osd, &s, sul); - assert(r == 0); - assert(s != NULL); + ceph_assert(r == 0); + ceph_assert(s != NULL); op->session = s; put_session(s); } @@ -1559,8 +1559,8 @@ void Objecter::_check_op_pool_dne(Op *op, unique_lock *sl) OSDSession *s = op->session; if (s) { - assert(s != NULL); - assert(sl->mutex() == &s->lock); + ceph_assert(s != NULL); + ceph_assert(sl->mutex() == &s->lock); bool session_locked = sl->owns_lock(); if (!session_locked) { sl->lock(); @@ -1778,7 +1778,7 @@ void Objecter::_command_cancel_map_check(CommandOp *c) */ int Objecter::_get_session(int osd, OSDSession **session, shunique_lock& sul) { - assert(sul && sul.mutex() == &rwlock); + ceph_assert(sul && sul.mutex() == &rwlock); if (osd < 0) { *session = homeless_session; @@ -1823,7 +1823,7 @@ void Objecter::put_session(Objecter::OSDSession *s) void Objecter::get_session(Objecter::OSDSession *s) { - assert(s != NULL); + ceph_assert(s != NULL); if (!s->is_homeless()) { ldout(cct, 20) << __func__ << " s=" << s << " osd=" << s->osd << " " @@ -2078,7 +2078,7 @@ void Objecter::_kick_requests(OSDSession *session, j != session->linger_ops.end(); ++j) { LingerOp *op = j->second; op->get(); - assert(lresend.count(j->first) == 0); + ceph_assert(lresend.count(j->first) == 0); lresend[j->first] = op; } @@ -2098,7 +2098,7 @@ void Objecter::_kick_requests(OSDSession *session, void Objecter::_linger_ops_resend(map& lresend, unique_lock& ul) { - assert(ul.owns_lock()); + ceph_assert(ul.owns_lock()); shunique_lock sul(std::move(ul)); while (!lresend.empty()) { LingerOp *op = lresend.begin()->second; @@ -2113,7 +2113,7 @@ void Objecter::_linger_ops_resend(map& lresend, void Objecter::start_tick() { - assert(tick_event == 0); + ceph_assert(tick_event == 0); tick_event = timer.add_event(ceph::make_timespan(cct->_conf->objecter_tick_interval), &Objecter::tick, this); @@ -2152,7 +2152,7 @@ void Objecter::tick() p != s->ops.end(); ++p) { Op *op = p->second; - assert(op->session); + ceph_assert(op->session); if (op->stamp < cutoff) { ldout(cct, 2) << " tid " << p->first << " on osd." << op->session->osd << " is laggy" << dendl; @@ -2165,7 +2165,7 @@ void Objecter::tick() ++p) { LingerOp *op = p->second; LingerOp::unique_lock wl(op->watch_lock); - assert(op->session); + ceph_assert(op->session); ldout(cct, 10) << " pinging osd that serves lingering tid " << p->first << " (osd." << op->session->osd << ")" << dendl; found = true; @@ -2176,7 +2176,7 @@ void Objecter::tick() p != s->command_ops.end(); ++p) { CommandOp *op = p->second; - assert(op->session); + ceph_assert(op->session); ldout(cct, 10) << " pinging osd that serves command tid " << p->first << " (osd." << op->session->osd << ")" << dendl; found = true; @@ -2275,11 +2275,11 @@ void Objecter::_op_submit_with_budget(Op *op, shunique_lock& sul, ceph_tid_t *ptid, int *ctx_budget) { - assert(initialized); + ceph_assert(initialized); - assert(op->ops.size() == op->out_bl.size()); - assert(op->ops.size() == op->out_rval.size()); - assert(op->ops.size() == op->out_handler.size()); + ceph_assert(op->ops.size() == op->out_bl.size()); + ceph_assert(op->ops.size() == op->out_rval.size()); + ceph_assert(op->ops.size() == op->out_handler.size()); // throttle. before we look at any state, because // _take_op_budget() may drop our lock while it blocks. @@ -2384,7 +2384,7 @@ void Objecter::_op_submit(Op *op, shunique_lock& sul, ceph_tid_t *ptid) ldout(cct, 10) << __func__ << " op " << op << dendl; // pick target - assert(op->session == NULL); + ceph_assert(op->session == NULL); OSDSession *s = NULL; bool check_for_latest_map = _calc_target(&op->target, nullptr) @@ -2415,17 +2415,17 @@ void Objecter::_op_submit(Op *op, shunique_lock& sul, ceph_tid_t *ptid) } } if (r == -EAGAIN) { - assert(s == NULL); + ceph_assert(s == NULL); r = _get_session(op->target.osd, &s, sul); } - assert(r == 0); - assert(s); // may be homeless + ceph_assert(r == 0); + ceph_assert(s); // may be homeless _send_op_account(op); // send? - assert(op->target.flags & (CEPH_OSD_FLAG_READ|CEPH_OSD_FLAG_WRITE)); + ceph_assert(op->target.flags & (CEPH_OSD_FLAG_READ|CEPH_OSD_FLAG_WRITE)); if (osdmap_full_try) { op->target.flags |= CEPH_OSD_FLAG_FULL_TRY; @@ -2497,7 +2497,7 @@ void Objecter::_op_submit(Op *op, shunique_lock& sul, ceph_tid_t *ptid) int Objecter::op_cancel(OSDSession *s, ceph_tid_t tid, int r) { - assert(initialized); + ceph_assert(initialized); OSDSession::unique_lock sl(s->lock); @@ -2624,7 +2624,7 @@ epoch_t Objecter::op_cancel_writes(int r, int64_t pool) int cancel_result = op_cancel(s, *titer, r); // We hold rwlock across search and cancellation, so cancels // should always succeed - assert(cancel_result == 0); + ceph_assert(cancel_result == 0); } if (!found && to_cancel.size()) found = true; @@ -2840,9 +2840,9 @@ int Objecter::_calc_target(op_target_t *t, Connection *con, bool any_change) pg_t pgid; if (t->precalc_pgid) { - assert(t->flags & CEPH_OSD_FLAG_IGNORE_OVERLAY); - assert(t->base_oid.name.empty()); // make sure this is a pg op - assert(t->base_oloc.pool == (int64_t)t->base_pgid.pool()); + ceph_assert(t->flags & CEPH_OSD_FLAG_IGNORE_OVERLAY); + ceph_assert(t->base_oid.name.empty()); // make sure this is a pg op + ceph_assert(t->base_oloc.pool == (int64_t)t->base_pgid.pool()); pgid = t->base_pgid; } else { int ret = osdmap->object_locator_to_pg(t->target_oid, t->target_oloc, @@ -2962,7 +2962,7 @@ int Objecter::_calc_target(op_target_t *t, Connection *con, bool any_change) t->used_replica = true; } } - assert(best >= 0); + ceph_assert(best >= 0); osd = acting[best]; } else { osd = acting_primary; @@ -2989,8 +2989,8 @@ int Objecter::_map_session(op_target_t *target, OSDSession **s, void Objecter::_session_op_assign(OSDSession *to, Op *op) { // to->lock is locked - assert(op->session == NULL); - assert(op->tid); + ceph_assert(op->session == NULL); + ceph_assert(op->tid); get_session(to); op->session = to; @@ -3005,7 +3005,7 @@ void Objecter::_session_op_assign(OSDSession *to, Op *op) void Objecter::_session_op_remove(OSDSession *from, Op *op) { - assert(op->session == from); + ceph_assert(op->session == from); // from->lock is locked if (from->is_homeless()) { @@ -3022,7 +3022,7 @@ void Objecter::_session_op_remove(OSDSession *from, Op *op) void Objecter::_session_linger_op_assign(OSDSession *to, LingerOp *op) { // to lock is locked unique - assert(op->session == NULL); + ceph_assert(op->session == NULL); if (to->is_homeless()) { num_homeless_ops++; @@ -3038,7 +3038,7 @@ void Objecter::_session_linger_op_assign(OSDSession *to, LingerOp *op) void Objecter::_session_linger_op_remove(OSDSession *from, LingerOp *op) { - assert(from == op->session); + ceph_assert(from == op->session); // from->lock is locked unique if (from->is_homeless()) { @@ -3055,7 +3055,7 @@ void Objecter::_session_linger_op_remove(OSDSession *from, LingerOp *op) void Objecter::_session_command_op_remove(OSDSession *from, CommandOp *op) { - assert(from == op->session); + ceph_assert(from == op->session); // from->lock is locked if (from->is_homeless()) { @@ -3072,8 +3072,8 @@ void Objecter::_session_command_op_remove(OSDSession *from, CommandOp *op) void Objecter::_session_command_op_assign(OSDSession *to, CommandOp *op) { // to->lock is locked - assert(op->session == NULL); - assert(op->tid); + ceph_assert(op->session == NULL); + ceph_assert(op->tid); if (to->is_homeless()) { num_homeless_ops++; @@ -3099,7 +3099,7 @@ int Objecter::_recalc_linger_op_target(LingerOp *linger_op, OSDSession *s = NULL; r = _get_session(linger_op->target.osd, &s, sul); - assert(r == 0); + ceph_assert(r == 0); if (linger_op->session != s) { // NB locking two sessions (s and linger_op->session) at the @@ -3121,7 +3121,7 @@ void Objecter::_cancel_linger_op(Op *op) { ldout(cct, 15) << "cancel_op " << op->tid << dendl; - assert(!op->should_resend); + ceph_assert(!op->should_resend); if (op->onfinish) { delete op->onfinish; num_in_flight--; @@ -3150,7 +3150,7 @@ void Objecter::_finish_op(Op *op, int r) logger->dec(l_osdc_op_active); - assert(check_latest_map_ops.find(op->tid) == check_latest_map_ops.end()); + ceph_assert(check_latest_map_ops.find(op->tid) == check_latest_map_ops.end()); inflight_ops--; @@ -3240,7 +3240,7 @@ void Objecter::_send_op(Op *op) } } - assert(op->tid > 0); + ceph_assert(op->tid > 0); MOSDOp *m = _prepare_osd_op(op); if (op->target.actual_pgid != m->get_spg()) { @@ -3256,7 +3256,7 @@ void Objecter::_send_op(Op *op) << dendl; ConnectionRef con = op->session->con; - assert(con); + ceph_assert(con); // preallocated rx buffer? if (op->con) { @@ -3305,7 +3305,7 @@ void Objecter::_throttle_op(Op *op, shunique_lock& sul, int op_budget) { - assert(sul && sul.mutex() == &rwlock); + ceph_assert(sul && sul.mutex() == &rwlock); bool locked_for_write = sul.owns_lock(); if (!op_budget) @@ -3477,8 +3477,8 @@ void Objecter::handle_osd_op_reply(MOSDOpReply *m) vector::iterator pb = op->out_bl.begin(); vector::iterator pr = op->out_rval.begin(); vector::iterator ph = op->out_handler.begin(); - assert(op->out_bl.size() == op->out_rval.size()); - assert(op->out_bl.size() == op->out_handler.size()); + ceph_assert(op->out_bl.size() == op->out_rval.size()); + ceph_assert(op->out_bl.size() == op->out_handler.size()); vector::iterator p = out_ops.begin(); for (unsigned i = 0; p != out_ops.end() && pb != op->out_bl.end(); @@ -3596,7 +3596,7 @@ void Objecter::handle_osd_backoff(MOSDBackoff *m) << " [" << b->begin << "," << b->end << ")" << dendl; auto spgp = s->backoffs.find(b->pgid); - assert(spgp != s->backoffs.end()); + ceph_assert(spgp != s->backoffs.end()); spgp->second.erase(b->begin); if (spgp->second.empty()) { s->backoffs.erase(spgp); @@ -4087,11 +4087,11 @@ void Objecter::handle_pool_op_reply(MPoolOpReply *m) // map epoch changed, probably because a MOSDMap message // sneaked in. Do caller-specified callback now or else // we lose it forever. - assert(op->onfinish); + ceph_assert(op->onfinish); op->onfinish->complete(m->replyCode); } } else { - assert(op->onfinish); + ceph_assert(op->onfinish); op->onfinish->complete(m->replyCode); } op->onfinish = NULL; @@ -4117,7 +4117,7 @@ done: int Objecter::pool_op_cancel(ceph_tid_t tid, int r) { - assert(initialized); + ceph_assert(initialized); unique_lock wl(rwlock); @@ -4222,7 +4222,7 @@ void Objecter::handle_get_pool_stats_reply(MGetPoolStatsReply *m) int Objecter::pool_stat_op_cancel(ceph_tid_t tid, int r) { - assert(initialized); + ceph_assert(initialized); unique_lock wl(rwlock); @@ -4322,7 +4322,7 @@ void Objecter::handle_fs_stats_reply(MStatfsReply *m) int Objecter::statfs_op_cancel(ceph_tid_t tid, int r) { - assert(initialized); + ceph_assert(initialized); unique_lock wl(rwlock); @@ -4815,7 +4815,7 @@ void Objecter::submit_command(CommandOp *c, ceph_tid_t *ptid) int Objecter::_calc_command_target(CommandOp *c, shunique_lock& sul) { - assert(sul.owns_lock() && sul.mutex() == &rwlock); + ceph_assert(sul.owns_lock() && sul.mutex() == &rwlock); c->map_check_error = 0; @@ -4853,7 +4853,7 @@ int Objecter::_calc_command_target(CommandOp *c, shunique_lock& sul) OSDSession *s; int r = _get_session(c->target.osd, &s, sul); - assert(r != -EAGAIN); /* shouldn't happen as we're holding the write lock */ + ceph_assert(r != -EAGAIN); /* shouldn't happen as we're holding the write lock */ if (c->session != s) { put_session(s); @@ -4871,11 +4871,11 @@ int Objecter::_calc_command_target(CommandOp *c, shunique_lock& sul) void Objecter::_assign_command_session(CommandOp *c, shunique_lock& sul) { - assert(sul.owns_lock() && sul.mutex() == &rwlock); + ceph_assert(sul.owns_lock() && sul.mutex() == &rwlock); OSDSession *s; int r = _get_session(c->target.osd, &s, sul); - assert(r != -EAGAIN); /* shouldn't happen as we're holding the write lock */ + ceph_assert(r != -EAGAIN); /* shouldn't happen as we're holding the write lock */ if (c->session != s) { if (c->session) { @@ -4894,8 +4894,8 @@ void Objecter::_assign_command_session(CommandOp *c, void Objecter::_send_command(CommandOp *c) { ldout(cct, 10) << "_send_command " << c->tid << dendl; - assert(c->session); - assert(c->session->con); + ceph_assert(c->session); + ceph_assert(c->session->con); MCommand *m = new MCommand(monc->monmap.fsid); m->cmd = c->cmd; m->set_data(c->inbl); @@ -4906,7 +4906,7 @@ void Objecter::_send_command(CommandOp *c) int Objecter::command_op_cancel(OSDSession *s, ceph_tid_t tid, int r) { - assert(initialized); + ceph_assert(initialized); unique_lock wl(rwlock); @@ -4952,31 +4952,31 @@ Objecter::OSDSession::~OSDSession() { // Caller is responsible for re-assigning or // destroying any ops that were assigned to us - assert(ops.empty()); - assert(linger_ops.empty()); - assert(command_ops.empty()); + ceph_assert(ops.empty()); + ceph_assert(linger_ops.empty()); + ceph_assert(command_ops.empty()); } Objecter::~Objecter() { delete osdmap; - assert(homeless_session->get_nref() == 1); - assert(num_homeless_ops == 0); + ceph_assert(homeless_session->get_nref() == 1); + ceph_assert(num_homeless_ops == 0); homeless_session->put(); - assert(osd_sessions.empty()); - assert(poolstat_ops.empty()); - assert(statfs_ops.empty()); - assert(pool_ops.empty()); - assert(waiting_for_map.empty()); - assert(linger_ops.empty()); - assert(check_latest_map_lingers.empty()); - assert(check_latest_map_ops.empty()); - assert(check_latest_map_commands.empty()); + ceph_assert(osd_sessions.empty()); + ceph_assert(poolstat_ops.empty()); + ceph_assert(statfs_ops.empty()); + ceph_assert(pool_ops.empty()); + ceph_assert(waiting_for_map.empty()); + ceph_assert(linger_ops.empty()); + ceph_assert(check_latest_map_lingers.empty()); + ceph_assert(check_latest_map_ops.empty()); + ceph_assert(check_latest_map_commands.empty()); - assert(!m_request_state_hook); - assert(!logger); + ceph_assert(!m_request_state_hook); + ceph_assert(!logger); } /** @@ -5049,7 +5049,7 @@ void Objecter::enumerate_objects( hobject_t *next, Context *on_finish) { - assert(result); + ceph_assert(result); if (!end.is_max() && start > end) { lderr(cct) << __func__ << ": start " << start << " > end " << end << dendl; @@ -5069,7 +5069,7 @@ void Objecter::enumerate_objects( } shared_lock rl(rwlock); - assert(osdmap->get_epoch()); + ceph_assert(osdmap->get_epoch()); if (!osdmap->test_flag(CEPH_OSDMAP_SORTBITWISE)) { rl.unlock(); lderr(cct) << __func__ << ": SORTBITWISE cluster flag not set" << dendl; @@ -5123,7 +5123,7 @@ void Objecter::_enumerate_reply( return; } - assert(next != NULL); + ceph_assert(next != NULL); // Decode the results auto iter = bl.cbegin(); @@ -5260,7 +5260,7 @@ namespace { { OSDOp& osd_op = op->add_op(CEPH_OSD_OP_SCRUBLS); op->flags |= CEPH_OSD_FLAG_PGOP; - assert(interval); + ceph_assert(interval); arg.encode(osd_op.indata); unsigned p = op->ops.size() - 1; auto *h = new C_ObjectOperation_scrub_ls{interval, items, rval}; diff --git a/src/osdc/Objecter.h b/src/osdc/Objecter.h index bc13542fa4d53..80673c6a77223 100644 --- a/src/osdc/Objecter.h +++ b/src/osdc/Objecter.h @@ -84,7 +84,7 @@ struct ObjectOperation { } void set_last_op_flags(int flags) { - assert(!ops.empty()); + ceph_assert(!ops.empty()); ops.rbegin()->op.flags = flags; } @@ -1735,7 +1735,7 @@ public: } void finished_async() { unique_lock l(watch_lock); - assert(!watch_pending_async.empty()); + ceph_assert(!watch_pending_async.empty()); watch_pending_async.pop_front(); } @@ -1999,7 +1999,7 @@ private: int calc_op_budget(const vector& ops); void _throttle_op(Op *op, shunique_lock& sul, int op_size = 0); int _take_op_budget(Op *op, shunique_lock& sul) { - assert(sul && sul.mutex() == &rwlock); + ceph_assert(sul && sul.mutex() == &rwlock); int op_budget = calc_op_budget(op->ops); if (keep_balanced_budget) { _throttle_op(op, sul, op_budget); @@ -2013,7 +2013,7 @@ private: int take_linger_budget(LingerOp *info); friend class WatchContext; // to invoke put_up_budget_bytes void put_op_budget_bytes(int op_budget) { - assert(op_budget >= 0); + ceph_assert(op_budget >= 0); op_throttle_bytes.put(op_budget); op_throttle_ops.put(1); } @@ -2225,7 +2225,7 @@ public: void osd_command(int osd, const std::vector& cmd, const bufferlist& inbl, ceph_tid_t *ptid, bufferlist *poutbl, string *prs, Context *onfinish) { - assert(osd >= 0); + ceph_assert(osd >= 0); CommandOp *c = new CommandOp( osd, cmd, @@ -3038,7 +3038,7 @@ public: bit != p->buffer_extents.end(); ++bit) bl.copy(bit->first, bit->second, cur); - assert(cur.length() == p->length); + ceph_assert(cur.length() == p->length); write_trunc(p->oid, p->oloc, p->offset, p->length, snapc, cur, mtime, flags, p->truncate_size, trunc_seq, oncommit ? gcom.new_sub():0, diff --git a/src/osdc/Striper.cc b/src/osdc/Striper.cc index 66a4e44e35074..74350a2b01e22 100644 --- a/src/osdc/Striper.cc +++ b/src/osdc/Striper.cc @@ -50,7 +50,7 @@ void Striper::file_to_extents( ldout(cct, 10) << "file_to_extents " << offset << "~" << len << " format " << object_format << dendl; - assert(len > 0); + ceph_assert(len > 0); /* * we want only one extent per object! this means that each extent @@ -61,7 +61,7 @@ void Striper::file_to_extents( __u32 object_size = layout->object_size; __u32 su = layout->stripe_unit; __u32 stripe_count = layout->stripe_count; - assert(object_size >= su); + ceph_assert(object_size >= su); if (stripe_count == 1) { ldout(cct, 20) << " sc is one, reset su to os" << dendl; su = object_size; @@ -171,7 +171,7 @@ void Striper::extent_to_file(CephContext *cct, file_layout_t *layout, __u32 object_size = layout->object_size; __u32 su = layout->stripe_unit; __u32 stripe_count = layout->stripe_count; - assert(object_size >= su); + ceph_assert(object_size >= su); uint64_t stripes_per_object = object_size / su; ldout(cct, 20) << " stripes_per_object " << stripes_per_object << dendl; @@ -209,7 +209,7 @@ uint64_t Striper::object_truncate_size(CephContext *cct, __u32 object_size = layout->object_size; __u32 su = layout->stripe_unit; __u32 stripe_count = layout->stripe_count; - assert(object_size >= su); + ceph_assert(object_size >= su); uint64_t stripes_per_object = object_size / su; uint64_t objectsetno = objectno / stripe_count; @@ -326,7 +326,7 @@ void Striper::StripedReadResult::add_partial_sparse_result( } } - assert(s->first <= bl_off); + ceph_assert(s->first <= bl_off); size_t left = (s->first + s->second) - bl_off; size_t actual = std::min(left, tlen); @@ -376,7 +376,7 @@ void Striper::StripedReadResult::assemble_result(CephContext *cct, void Striper::StripedReadResult::assemble_result(CephContext *cct, char *buffer, size_t length) { - assert(buffer && length == total_intended_len); + ceph_assert(buffer && length == total_intended_len); map >::reverse_iterator p = partial.rbegin(); if (p == partial.rend()) @@ -389,11 +389,11 @@ void Striper::StripedReadResult::assemble_result(CephContext *cct, char *buffer, ldout(cct, 20) << "assemble_result(" << this << ") " << p->first << "~" << p->second.second << " " << p->second.first.length() << " bytes" << dendl; - assert(p->first == end - p->second.second); + ceph_assert(p->first == end - p->second.second); end = p->first; size_t len = p->second.first.length(); - assert(curr >= p->second.second); + ceph_assert(curr >= p->second.second); curr -= p->second.second; if (len < p->second.second) { if (len) @@ -405,6 +405,6 @@ void Striper::StripedReadResult::assemble_result(CephContext *cct, char *buffer, ++p; } partial.clear(); - assert(curr == 0); + ceph_assert(curr == 0); } -- 2.39.5