const std::string &prefix, ///< [in] Prefix/CF ==> MUST match some established merge operator
const std::string &key, ///< [in] Key to be merged
const bufferlist &value ///< [in] value to be merged into key
- ) { assert(0 == "Not implemented"); }
+ ) { ceph_assert(0 == "Not implemented"); }
virtual ~TransactionImpl() {}
};
int KineticStore::open(ostream &out, const vector<ColumnFamily>& cfs)
{
if (!cfs.empty()) {
- assert(0 == "Not implemented");
+ ceph_assert(0 == "Not implemented");
}
return do_open(out, false);
}
int KineticStore::create_and_open(ostream &out, const vector<ColumnFamily>& cfs)
{
if (!cfs.empty()) {
- assert(0 == "Not implemented");
+ ceph_assert(0 == "Not implemented");
}
return do_open(out, true);
}
record);
dout(30) << "kinetic after put of " << it->key << dendl;
} else {
- assert(it->type == KINETIC_OP_DELETE);
+ ceph_assert(it->type == KINETIC_OP_DELETE);
dout(30) << "kinetic before delete" << dendl;
status = kinetic_conn->Delete(it->key, "",
kinetic::WriteMode::IGNORE_VERSION);
int LevelDBStore::open(ostream &out, const vector<ColumnFamily>& cfs) {
if (!cfs.empty()) {
- assert(0 == "Not implemented");
+ ceph_assert(0 == "Not implemented");
}
return do_open(out, false);
}
int LevelDBStore::create_and_open(ostream &out, const vector<ColumnFamily>& cfs) {
if (!cfs.empty()) {
- assert(0 == "Not implemented");
+ ceph_assert(0 == "Not implemented");
}
return do_open(out, true);
}
filterpolicy.reset(_filterpolicy);
ldoptions.filter_policy = filterpolicy.get();
#else
- assert(0 == "bloom size set but installed leveldb doesn't support bloom filters");
+ ceph_assert(0 == "bloom size set but installed leveldb doesn't support bloom filters");
#endif
}
if (options.compression_enabled)
const string &key,
bufferlist *out)
{
- assert(out && (out->length() == 0));
+ ceph_assert(out && (out->length() == 0));
utime_t start = ceph_clock_now();
int r = 0;
string value, k;
static void split_key(const string& raw_key, string *prefix, string *key)
{
size_t pos = raw_key.find(KEY_DELIM, 0);
- assert(pos != std::string::npos);
+ ceph_assert(pos != std::string::npos);
*prefix = raw_key.substr(0, pos);
*key = raw_key.substr(pos + 1, raw_key.length());
}
int MemDB::open(ostream &out, const vector<ColumnFamily>& cfs) {
if (!cfs.empty()) {
- assert(0 == "Not implemented");
+ ceph_assert(0 == "Not implemented");
}
return do_open(out, false);
}
int MemDB::create_and_open(ostream &out, const vector<ColumnFamily>& cfs) {
if (!cfs.empty()) {
- assert(0 == "Not implemented");
+ ceph_assert(0 == "Not implemented");
}
return do_open(out, true);
}
_merge(merge_op);
} else {
ms_op_t rm_op = op.second;
- assert(op.first == MDBTransactionImpl::DELETE);
+ ceph_assert(op.first == MDBTransactionImpl::DELETE);
_rmkey(rm_op);
}
}
/*
* delete and free existing key.
*/
- assert(m_total_bytes >= bl_old.length());
+ ceph_assert(m_total_bytes >= bl_old.length());
m_total_bytes -= bl_old.length();
m_map.erase(key);
}
bufferlist bl_old;
if (_get(op.first.first, op.first.second, &bl_old)) {
- assert(m_total_bytes >= bl_old.length());
+ ceph_assert(m_total_bytes >= bl_old.length());
m_total_bytes -= bl_old.length();
}
iterator_seq_no++;
* find the operator for this prefix
*/
std::shared_ptr<MergeOperator> mop = _find_merge_op(prefix);
- assert(mop);
+ ceph_assert(mop);
/*
* call the merge operator with value and non value
bl_old.clear();
}
- assert((int64_t)m_total_bytes + bytes_adjusted >= 0);
+ ceph_assert((int64_t)m_total_bytes + bytes_adjusted >= 0);
m_total_bytes += bytes_adjusted;
iterator_seq_no++;
return 0;
if (this_seq_no != *global_seq_no) {
auto key = m_key_value.first;
- assert(!key.empty());
+ ceph_assert(!key.empty());
bool restart_iter = false;
if (!m_using_btree) {
std::shared_ptr<KeyValueDB::MergeOperator> mop)
{
// If you fail here, it's because you can't do this on an open database
- assert(db == nullptr);
+ ceph_assert(db == nullptr);
merge_ops.push_back(std::make_pair(prefix,mop));
return 0;
}
const string &cf_name,
rocksdb::ColumnFamilyOptions *cf_opt)
{
- assert(cf_opt != nullptr);
+ ceph_assert(cf_opt != nullptr);
cf_opt->merge_operator.reset();
for (auto& i : merge_ops) {
if (i.first == cf_name) {
}
}
}
- assert(default_cf != nullptr);
+ ceph_assert(default_cf != nullptr);
PerfCountersBuilder plb(g_ceph_context, "rocksdb", l_rocksdb_first, l_rocksdb_last);
plb.add_u64_counter(l_rocksdb_gets, "get", "Gets");
const string &key,
bufferlist *out)
{
- assert(out && (out->length() == 0));
+ ceph_assert(out && (out->length() == 0));
utime_t start = ceph_clock_now();
int r = 0;
string value;
size_t keylen,
bufferlist *out)
{
- assert(out && (out->length() == 0));
+ ceph_assert(out && (out->length() == 0));
utime_t start = ceph_clock_now();
int r = 0;
string value;
int RocksDBStore::RocksDBWholeSpaceIteratorImpl::seek_to_first()
{
dbiter->SeekToFirst();
- assert(!dbiter->status().IsIOError());
+ ceph_assert(!dbiter->status().IsIOError());
return dbiter->status().ok() ? 0 : -1;
}
int RocksDBStore::RocksDBWholeSpaceIteratorImpl::seek_to_first(const string &prefix)
{
rocksdb::Slice slice_prefix(prefix);
dbiter->Seek(slice_prefix);
- assert(!dbiter->status().IsIOError());
+ ceph_assert(!dbiter->status().IsIOError());
return dbiter->status().ok() ? 0 : -1;
}
int RocksDBStore::RocksDBWholeSpaceIteratorImpl::seek_to_last()
{
dbiter->SeekToLast();
- assert(!dbiter->status().IsIOError());
+ ceph_assert(!dbiter->status().IsIOError());
return dbiter->status().ok() ? 0 : -1;
}
int RocksDBStore::RocksDBWholeSpaceIteratorImpl::seek_to_last(const string &prefix)
if (valid()) {
dbiter->Next();
}
- assert(!dbiter->status().IsIOError());
+ ceph_assert(!dbiter->status().IsIOError());
return dbiter->status().ok() ? 0 : -1;
}
int RocksDBStore::RocksDBWholeSpaceIteratorImpl::prev()
if (valid()) {
dbiter->Prev();
}
- assert(!dbiter->status().IsIOError());
+ ceph_assert(!dbiter->status().IsIOError());
return dbiter->status().ok() ? 0 : -1;
}
string RocksDBStore::RocksDBWholeSpaceIteratorImpl::key()
count++;
}
}
- assert(elems_ == count);
+ ceph_assert(elems_ == count);
delete[] list_;
list_ = new_list;
length_ = new_length;
BinnedLRUCacheShard::~BinnedLRUCacheShard() {}
bool BinnedLRUCacheShard::Unref(BinnedLRUHandle* e) {
- assert(e->refs > 0);
+ ceph_assert(e->refs > 0);
e->refs--;
return e->refs == 0;
}
std::lock_guard<std::mutex> l(mutex_);
while (lru_.next != &lru_) {
BinnedLRUHandle* old = lru_.next;
- assert(old->InCache());
- assert(old->refs ==
+ ceph_assert(old->InCache());
+ ceph_assert(old->refs ==
1); // LRU list contains elements which may be evicted
LRU_Remove(old);
table_.Remove(old->key(), old->hash);
}
void BinnedLRUCacheShard::LRU_Remove(BinnedLRUHandle* e) {
- assert(e->next != nullptr);
- assert(e->prev != nullptr);
+ ceph_assert(e->next != nullptr);
+ ceph_assert(e->prev != nullptr);
if (lru_low_pri_ == e) {
lru_low_pri_ = e->prev;
}
e->prev = e->next = nullptr;
lru_usage_ -= e->charge;
if (e->InHighPriPool()) {
- assert(high_pri_pool_usage_ >= e->charge);
+ ceph_assert(high_pri_pool_usage_ >= e->charge);
high_pri_pool_usage_ -= e->charge;
}
}
void BinnedLRUCacheShard::LRU_Insert(BinnedLRUHandle* e) {
- assert(e->next == nullptr);
- assert(e->prev == nullptr);
+ ceph_assert(e->next == nullptr);
+ ceph_assert(e->prev == nullptr);
if (high_pri_pool_ratio_ > 0 && e->IsHighPri()) {
// Inset "e" to head of LRU list.
e->next = &lru_;
while (high_pri_pool_usage_ > high_pri_pool_capacity_) {
// Overflow last entry in high-pri pool to low-pri pool.
lru_low_pri_ = lru_low_pri_->next;
- assert(lru_low_pri_ != &lru_);
+ ceph_assert(lru_low_pri_ != &lru_);
lru_low_pri_->SetInHighPriPool(false);
high_pri_pool_usage_ -= lru_low_pri_->charge;
}
ceph::autovector<BinnedLRUHandle*>* deleted) {
while (usage_ + charge > capacity_ && lru_.next != &lru_) {
BinnedLRUHandle* old = lru_.next;
- assert(old->InCache());
- assert(old->refs == 1); // LRU list contains elements which may be evicted
+ ceph_assert(old->InCache());
+ ceph_assert(old->refs == 1); // LRU list contains elements which may be evicted
LRU_Remove(old);
table_.Remove(old->key(), old->hash);
old->SetInCache(false);
std::lock_guard<std::mutex> l(mutex_);
BinnedLRUHandle* e = table_.Lookup(key, hash);
if (e != nullptr) {
- assert(e->InCache());
+ ceph_assert(e->InCache());
if (e->refs == 1) {
LRU_Remove(e);
}
if (usage_ > capacity_ || force_erase) {
// the cache is full
// The LRU list must be empty since the cache is full
- assert(!(usage_ > capacity_) || lru_.next == &lru_);
+ ceph_assert(!(usage_ > capacity_) || lru_.next == &lru_);
// take this opportunity and remove the item
table_.Remove(e->key(), e->hash);
e->SetInCache(false);
size_t BinnedLRUCacheShard::GetPinnedUsage() const {
std::lock_guard<std::mutex> l(mutex_);
- assert(usage_ >= lru_usage_);
+ ceph_assert(usage_ >= lru_usage_);
return usage_ - lru_usage_;
}
#include "ShardedCache.h"
#include "common/autovector.h"
+#include "include/assert.h"
namespace rocksdb_cache {
void SetHit() { flags |= 8; }
void Free() {
- assert((refs == 1 && InCache()) || (refs == 0 && !InCache()));
+ ceph_assert((refs == 1 && InCache()) || (refs == 0 && !InCache()));
if (deleter) {
(*deleter)(key(), value);
}
BinnedLRUHandle* h = list_[i];
while (h != nullptr) {
auto n = h->next_hash;
- assert(h->InCache());
+ ceph_assert(h->InCache());
func(h);
h = n;
}