}
struct XorMergeOperator : public KeyValueDB::MergeOperator {
- virtual void merge_nonexistent(
+ void merge_nonexistent(
const char *rdata, size_t rlen, std::string *new_value) override {
*new_value = std::string(rdata, rlen);
}
- virtual void merge(
+ void merge(
const char *ldata, size_t llen,
const char *rdata, size_t rlen,
- std::string *new_value) {
+ std::string *new_value) override {
assert(llen == rlen);
*new_value = std::string(ldata, llen);
for (size_t i = 0; i < rlen; ++i) {
}
// We use each operator name and each prefix to construct the
// overall RocksDB operator name for consistency check at open time.
- virtual string name() const {
+ string name() const override {
return "bitwise_xor";
}
};
// If an error was encountered, returns a non-OK status.
//
// REQUIRES: External synchronization
- rocksdb::Status Read(size_t n, rocksdb::Slice* result, char* scratch) {
+ rocksdb::Status Read(size_t n, rocksdb::Slice* result, char* scratch) override {
int r = fs->read(h, &h->buf, h->buf.pos, n, NULL, scratch);
assert(r >= 0);
*result = rocksdb::Slice(scratch, r);
// file, and Skip will return OK.
//
// REQUIRES: External synchronization
- rocksdb::Status Skip(uint64_t n) {
+ rocksdb::Status Skip(uint64_t n) override {
h->buf.skip(n);
return rocksdb::Status::OK();
}
// Remove any kind of caching of data from the offset to offset+length
// of this file. If the length is 0, then it refers to the end of file.
// If the system is not caching the file contents, then this is a noop.
- rocksdb::Status InvalidateCache(size_t offset, size_t length) {
+ rocksdb::Status InvalidateCache(size_t offset, size_t length) override {
fs->invalidate_cache(h->file, offset, length);
return rocksdb::Status::OK();
}
//
// Safe for concurrent use by multiple threads.
rocksdb::Status Read(uint64_t offset, size_t n, rocksdb::Slice* result,
- char* scratch) const {
+ char* scratch) const override {
int r = fs->read_random(h, offset, n, scratch);
assert(r >= 0);
*result = rocksdb::Slice(scratch, r);
// Used by the file_reader_writer to decide if the ReadAhead wrapper
// should simply forward the call and do not enact buffering or locking.
- bool ShouldForwardRawRequest() const {
+ bool ShouldForwardRawRequest() const override {
return false;
}
// For cases when read-ahead is implemented in the platform dependent
// layer
- void EnableReadAhead() {}
+ void EnableReadAhead() override {}
// Tries to get an unique ID for this file that will be the same each time
// the file is opened (and will stay the same while the file is open).
// a single varint.
//
// Note: these IDs are only valid for the duration of the process.
- size_t GetUniqueId(char* id, size_t max_size) const {
+ size_t GetUniqueId(char* id, size_t max_size) const override {
return snprintf(id, max_size, "%016llx",
(unsigned long long)h->file->fnode.ino);
};
//enum AccessPattern { NORMAL, RANDOM, SEQUENTIAL, WILLNEED, DONTNEED };
- void Hint(AccessPattern pattern) {
+ void Hint(AccessPattern pattern) override {
if (pattern == RANDOM)
h->buf.max_prefetch = 4096;
else if (pattern == SEQUENTIAL)
// Remove any kind of caching of data from the offset to offset+length
// of this file. If the length is 0, then it refers to the end of file.
// If the system is not caching the file contents, then this is a noop.
- rocksdb::Status InvalidateCache(size_t offset, size_t length) {
+ rocksdb::Status InvalidateCache(size_t offset, size_t length) override {
fs->invalidate_cache(h->file, offset, length);
return rocksdb::Status::OK();
}
return c_DefaultPageSize;
}*/
- rocksdb::Status Append(const rocksdb::Slice& data) {
+ rocksdb::Status Append(const rocksdb::Slice& data) override {
h->append(data.data(), data.size());
return rocksdb::Status::OK();
}
// to simple append as most of the tests are buffered by default
rocksdb::Status PositionedAppend(
const rocksdb::Slice& /* data */,
- uint64_t /* offset */) {
+ uint64_t /* offset */) override {
return rocksdb::Status::NotSupported();
}
// before closing. It is not always possible to keep track of the file
// size due to whole pages writes. The behavior is undefined if called
// with other writes to follow.
- rocksdb::Status Truncate(uint64_t size) {
+ rocksdb::Status Truncate(uint64_t size) override {
// we mirror the posix env, which does nothing here; instead, it
// truncates to the final size on close. whatever!
return rocksdb::Status::OK();
// return err_to_status(r);
}
- rocksdb::Status Close() {
+ rocksdb::Status Close() override {
Flush();
// mimic posix env, here. shrug.
return rocksdb::Status::OK();
}
- rocksdb::Status Flush() {
+ rocksdb::Status Flush() override {
fs->flush(h);
return rocksdb::Status::OK();
}
- rocksdb::Status Sync() { // sync data
+ rocksdb::Status Sync() override { // sync data
fs->fsync(h);
return rocksdb::Status::OK();
}
// true if Sync() and Fsync() are safe to call concurrently with Append()
// and Flush().
- bool IsSyncThreadSafe() const {
+ bool IsSyncThreadSafe() const override {
return true;
}
/*
* Get the size of valid data in the file.
*/
- uint64_t GetFileSize() {
+ uint64_t GetFileSize() override {
return h->file->fnode.size + h->buffer.length();;
}
// For documentation, refer to RandomAccessFile::GetUniqueId()
- size_t GetUniqueId(char* id, size_t max_size) const {
+ size_t GetUniqueId(char* id, size_t max_size) const override {
return snprintf(id, max_size, "%016llx",
(unsigned long long)h->file->fnode.ino);
}
// of this file. If the length is 0, then it refers to the end of file.
// If the system is not caching the file contents, then this is a noop.
// This call has no effect on dirty pages in the cache.
- rocksdb::Status InvalidateCache(size_t offset, size_t length) {
+ rocksdb::Status InvalidateCache(size_t offset, size_t length) override {
fs->invalidate_cache(h->file, offset, length);
return rocksdb::Status::OK();
}
explicit BlueRocksDirectory(BlueFS *f) : fs(f) {}
// Fsync directory. Can be called concurrently from multiple threads.
- rocksdb::Status Fsync() {
+ rocksdb::Status Fsync() override {
// it is sufficient to flush the log.
fs->sync_metadata();
return rocksdb::Status::OK();
// merge operators
struct Int64ArrayMergeOperator : public KeyValueDB::MergeOperator {
- virtual void merge_nonexistent(
+ void merge_nonexistent(
const char *rdata, size_t rlen, std::string *new_value) override {
*new_value = std::string(rdata, rlen);
}
- virtual void merge(
+ void merge(
const char *ldata, size_t llen,
const char *rdata, size_t rlen,
- std::string *new_value) {
+ std::string *new_value) override {
assert(llen == rlen);
assert((rlen % 8) == 0);
new_value->resize(rlen);
}
// We use each operator name and each prefix to construct the
// overall RocksDB operator name for consistency check at open time.
- virtual string name() const {
+ string name() const override {
return "int64_array";
}
};
C_JournaledAhead(FileStore *f, FileStore::OpSequencer *os, FileStore::Op *o, Context *ondisk):
fs(f), osr(os), o(o), ondisk(ondisk) { }
- void finish(int r) {
+ void finish(int r) override {
fs->_journaled_ahead(osr, o, ondisk);
}
};
{
}
- void finish(int r) {
+ void finish(int r) override {
BackTrace *bt = new BackTrace(1);
generic_dout(-1) << "FileStore: sync_entry timed out after "
<< m_commit_timeo << " seconds.\n";
OmapIteratorImpl(CollectionRef c, ObjectRef o)
: c(c), o(o), it(o->omap.begin()) {}
- int seek_to_first() {
+ int seek_to_first() override {
std::lock_guard<std::mutex>(o->omap_mutex);
it = o->omap.begin();
return 0;
}
- int upper_bound(const string &after) {
+ int upper_bound(const string &after) override {
std::lock_guard<std::mutex>(o->omap_mutex);
it = o->omap.upper_bound(after);
return 0;
}
- int lower_bound(const string &to) {
+ int lower_bound(const string &to) override {
std::lock_guard<std::mutex>(o->omap_mutex);
it = o->omap.lower_bound(to);
return 0;
}
- bool valid() {
+ bool valid() override {
std::lock_guard<std::mutex>(o->omap_mutex);
return it != o->omap.end();
}
- int next(bool validate=true) {
+ int next(bool validate=true) override {
std::lock_guard<std::mutex>(o->omap_mutex);
++it;
return 0;
}
- string key() {
+ string key() override {
std::lock_guard<std::mutex>(o->omap_mutex);
return it->first;
}
- bufferlist value() {
+ bufferlist value() override {
std::lock_guard<std::mutex>(o->omap_mutex);
return it->second;
}
- int status() {
+ int status() override {
return 0;
}
};
set<int> want;
OnRecoveryReadComplete(ECBackend *pg, const hobject_t &hoid)
: pg(pg), hoid(hoid) {}
- void finish(pair<RecoveryMessages *, ECBackend::read_result_t &> &in) {
+ void finish(pair<RecoveryMessages *, ECBackend::read_result_t &> &in) override {
ECBackend::read_result_t &res = in.second;
if (!(res.r == 0 && res.errors.empty())) {
pg->_failed_push(hoid, in);
map<int, MOSDPGPushReply*> &in) : l(l), epoch(epoch) {
replies.swap(in);
}
- void finish(int) {
+ void finish(int) override {
for (map<int, MOSDPGPushReply*>::iterator i = replies.begin();
i != replies.end();
++i) {
eversion_t last_complete)
: pg(pg), msg(msg), tid(tid),
version(version), last_complete(last_complete) {}
- void finish(int) {
+ void finish(int) override {
if (msg)
msg->mark_event("sub_op_committed");
pg->sub_write_committed(tid, version, last_complete);
ceph_tid_t tid,
eversion_t version)
: pg(pg), msg(msg), tid(tid), version(version) {}
- void finish(int) {
+ void finish(int) override {
if (msg)
msg->mark_event("sub_op_applied");
pg->sub_write_applied(tid, version);
ECBackend *ec;
ceph_tid_t tid;
FinishReadOp(ECBackend *ec, ceph_tid_t tid) : ec(ec), tid(tid) {}
- void finish(ThreadPool::TPHandle &handle) {
+ void finish(ThreadPool::TPHandle &handle) override {
auto ropiter = ec->tid_to_read_map.find(tid);
assert(ropiter != ec->tid_to_read_map.end());
int priority = ropiter->second.priority;
ECBackend::ClientAsyncReadStatus *status,
const list<boost::tuple<uint64_t, uint64_t, uint32_t> > &to_read)
: hoid(hoid), ec(ec), status(status), to_read(to_read) {}
- void finish(pair<RecoveryMessages *, ECBackend::read_result_t &> &in) {
+ void finish(pair<RecoveryMessages *, ECBackend::read_result_t &> &in) override {
ECBackend::read_result_t &res = in.second;
extent_map result;
if (res.r != 0)
PGRef pg;
public:
explicit AgentTimeoutCB(PGRef _pg) : pg(_pg) {}
- void finish(int) {
+ void finish(int) override {
pg->agent_choose_mode_restart();
}
};
public:
explicit OSDSocketHook(OSD *o) : osd(o) {}
bool call(std::string command, cmdmap_t& cmdmap, std::string format,
- bufferlist& out) {
+ bufferlist& out) override {
stringstream ss;
bool r = osd->asok_command(command, cmdmap, format, ss);
out.append(ss);
OSD *osd;
uint64_t oldest, newest;
explicit C_OSD_GetVersion(OSD *o) : osd(o), oldest(0), newest(0) {}
- void finish(int r) {
+ void finish(int r) override {
if (r >= 0)
osd->_got_mon_epochs(oldest, newest);
}
MOSDMap *msg;
C_OnMapCommit(OSD *o, epoch_t f, epoch_t l, MOSDMap *m)
: osd(o), first(f), last(l), msg(m) {}
- void finish(int r) {
+ void finish(int r) override {
osd->_committed_osd_maps(first, last, msg);
}
};
const list<OSDMapRef> &pinned_maps,
epoch_t e)
: service(service), pinned_maps(pinned_maps), e(e) {}
- void finish(int r) {
+ void finish(int r) override {
service->clear_map_bl_cache_pins(e);
}
};
C_OpenPGs(set<PGRef>& p, ObjectStore *s, OSD* o) : store(s), osd(o) {
pgs.swap(p);
}
- void finish(int r) {
+ void finish(int r) override {
RWLock::RLocker l(osd->pg_map_lock);
for (auto p : pgs) {
if (osd->pg_map.count(p->info.pgid)) {
osd(osd), name(n), con(con), osdmap(osdmap), map_epoch(map_epoch) {
}
- void finish(ThreadPool::TPHandle& tp) {
+ void finish(ThreadPool::TPHandle& tp) override {
Session *session = static_cast<Session *>(
con->get_priv());
epoch_t last_sent_epoch;
set<boost::intrusive_ptr<PG> > pgs;
C_CompleteSplits(OSD *osd, const set<boost::intrusive_ptr<PG> > &in)
: osd(osd), pgs(in) {}
- void finish(int r) {
+ void finish(int r) override {
Mutex::Locker l(osd->osd_lock);
if (osd->is_stopping())
return;
}
protected:
- virtual void dump_item(const CrushTreeDumper::Item &qi, TextTable *tbl) {
+ void dump_item(const CrushTreeDumper::Item &qi, TextTable *tbl) override {
*tbl << qi.id
<< weightf_t(qi.weight);
}
protected:
- virtual void dump_item_fields(const CrushTreeDumper::Item &qi, Formatter *f) {
+ void dump_item_fields(const CrushTreeDumper::Item &qi, Formatter *f) override {
Parent::dump_item_fields(qi, f);
if (!qi.is_bucket())
{
epoch_t activation_epoch;
C_PG_ActivateCommitted(PG *p, epoch_t e, epoch_t ae)
: pg(p), epoch(e), activation_epoch(ae) {}
- void finish(int r) {
+ void finish(int r) override {
pg->_activate_committed(epoch, activation_epoch);
}
};
struct C_PG_FinishRecovery : public Context {
PGRef pg;
explicit C_PG_FinishRecovery(PG *p) : pg(p) {}
- void finish(int r) {
+ void finish(int r) override {
pg->_finish_recovery(this);
}
};
PGBackend *pg,
ObjectStore::Transaction *t)
: soid(soid), pg(pg), t(t) {}
- void rmobject(version_t old_version) {
+ void rmobject(version_t old_version) override {
pg->trim_rollback_object(
soid,
old_version,
* results.get<1>() is a pointer to a CopyResults object, which you are
* responsible for deleting.
*/
- virtual void finish(CopyCallbackResults results_) = 0;
+ void finish(CopyCallbackResults results_) override = 0;
public:
/// Provide the final size of the copied object to the CopyCallback
- virtual ~CopyCallback() {}
+ ~CopyCallback() {}
};
template <typename T>
public:
BlessedGenContext(PrimaryLogPG *pg, GenContext<T> *c, epoch_t e)
: pg(pg), c(c), e(e) {}
- void finish(T t) {
+ void finish(T t) override {
pg->lock();
if (pg->pg_has_reset_since(e))
c.reset();
public:
BlessedContext(PrimaryLogPG *pg, Context *c, epoch_t e)
: pg(pg), c(c), e(e) {}
- void finish(int r) {
+ void finish(int r) override {
pg->lock();
if (pg->pg_has_reset_since(e))
c.reset();
public:
C_PG_ObjectContext(PrimaryLogPG *p, ObjectContext *o) :
pg(p), obc(o) {}
- void finish(int r) {
+ void finish(int r) override {
pg->object_context_destructor_callback(obc);
}
};
ObjectContextRef o,
ObjectContextRef o2 = ObjectContextRef(),
ObjectContextRef o3 = ObjectContextRef()) : obc(o), obc2(o2), obc3(o3) {}
- void finish(int r) {
+ void finish(int r) override {
obc->ondisk_write_unlock();
if (obc2)
obc2->ondisk_write_unlock();
OnReadComplete(
PrimaryLogPG *pg,
PrimaryLogPG::OpContext *ctx) : pg(pg), opcontext(ctx) {}
- void finish(int r) {
+ void finish(int r) override {
if (r < 0)
opcontext->async_read_result = r;
opcontext->finish_read(pg);
public:
C_OSD_AppliedRecoveredObject(PrimaryLogPG *p, ObjectContextRef o) :
pg(p), obc(o) {}
- void finish(int r) {
+ void finish(int r) override {
pg->_applied_recovered_object(obc);
}
};
PrimaryLogPG *p, epoch_t epoch, eversion_t lc) :
pg(p), epoch(epoch), last_complete(lc) {
}
- void finish(int r) {
+ void finish(int r) override {
pg->_committed_pushed_object(epoch, last_complete);
}
};
public:
explicit C_OSD_AppliedRecoveredObjectReplica(PrimaryLogPG *p) :
pg(p) {}
- void finish(int r) {
+ void finish(int r) override {
pg->_applied_recovered_object_replica();
}
};
ctx(ctx_) {}
~CopyFromCallback() {}
- virtual void finish(PrimaryLogPG::CopyCallbackResults results_) {
+ void finish(PrimaryLogPG::CopyCallbackResults results_) override {
results = results_.get<1>();
int r = results_.get<0>();
retval = r;
class PGLSPlainFilter : public PGLSFilter {
string val;
public:
- virtual int init(bufferlist::iterator ¶ms)
+ int init(bufferlist::iterator ¶ms) override
{
try {
::decode(xattr, params);
return 0;
}
- virtual ~PGLSPlainFilter() {}
- virtual bool filter(const hobject_t &obj, bufferlist& xattr_data,
- bufferlist& outdata);
+ ~PGLSPlainFilter() {}
+ bool filter(const hobject_t &obj, bufferlist& xattr_data,
+ bufferlist& outdata) override;
};
class PGLSParentFilter : public PGLSFilter {
PGLSParentFilter(CephContext* cct) : cct(cct) {
xattr = "_parent";
}
- virtual int init(bufferlist::iterator ¶ms)
+ int init(bufferlist::iterator ¶ms) override
{
try {
::decode(parent_ino, params);
return 0;
}
- virtual ~PGLSParentFilter() {}
- virtual bool filter(const hobject_t &obj, bufferlist& xattr_data,
- bufferlist& outdata);
+ ~PGLSParentFilter() {}
+ bool filter(const hobject_t &obj, bufferlist& xattr_data,
+ bufferlist& outdata) override;
};
bool PGLSParentFilter::filter(const hobject_t &obj,
: pg(p), oid(o), last_peering_reset(lpr),
tid(0), prdop(prd), start(ceph_clock_now())
{}
- void finish(int r) {
+ void finish(int r) override {
if (prdop->canceled)
return;
pg->lock();
: pg(p), oid(o), last_peering_reset(lpr),
tid(0), pwop(pw)
{}
- void finish(int r) {
+ void finish(int r) override {
if (pwop->canceled)
return;
pg->lock();
pg(pg_),
start(ceph_clock_now()) {}
- virtual void finish(PrimaryLogPG::CopyCallbackResults results) {
+ void finish(PrimaryLogPG::CopyCallbackResults results) override {
PrimaryLogPG::CopyResults *results_data = results.get<1>();
int r = results.get<0>();
pg->finish_promote(r, results_data, obc);
OSDService *osd, hobject_t soid, __le32 flags) :
r(r), rval(rv), outdatap(blp), maybe_crc(mc),
size(size), osd(osd), soid(soid), flags(flags) {}
- void finish(int len) {
+ void finish(int len) override {
*rval = len;
*r = len;
if (len < 0)
ceph_le64& len;
ToSparseReadResult(bufferlist& bl, uint64_t offset, ceph_le64& len):
data_bl(bl), data_offset(offset),len(len) {}
- void finish(int r) {
+ void finish(int r) override {
if (r < 0) return;
len = r;
bufferlist outdata;
: pg(p), oid(o), last_peering_reset(lpr),
tid(0), cop(c)
{}
- void finish(int r) {
+ void finish(int r) override {
if (r == -ECANCELED)
return;
pg->lock();
size_t len;
C_CopyFrom_AsyncReadCb(OSDOp *osd_op, uint64_t features, bool classic) :
osd_op(osd_op), features(features), classic(classic), len(0) {}
- void finish(int r) {
+ void finish(int r) override {
assert(len > 0);
assert(len <= reply_obj.data.length());
bufferlist bl;
: pg(p), oid(o), last_peering_reset(lpr),
tid(0), start(ceph_clock_now())
{}
- void finish(int r) {
+ void finish(int r) override {
if (r == -ECANCELED)
return;
pg->lock();
public:
C_OSD_RepopApplied(PrimaryLogPG *pg, PrimaryLogPG::RepGather *repop)
: pg(pg), repop(repop) {}
- void finish(int) {
+ void finish(int) override {
pg->repop_all_applied(repop.get());
}
};
public:
C_OSD_RepopCommit(PrimaryLogPG *pg, PrimaryLogPG::RepGather *repop)
: pg(pg), repop(repop) {}
- void finish(int) {
+ void finish(int) override {
pg->repop_all_committed(repop.get());
}
};
PGBackend::Listener *pg,
Message *reply,
ConnectionRef conn) : pg(pg), reply(reply), conn(conn) {}
- void finish(int) {
+ void finish(int) override {
pg->send_message_osd_cluster(reply, conn.get());
}
};
PG_RecoveryQueueAsync(
PGBackend::Listener *pg,
GenContext<ThreadPool::TPHandle&> *c) : pg(pg), c(c) {}
- void finish(int) {
+ void finish(int) override {
pg->schedule_recovery_work(c.release());
}
};
RepModifyRef rm;
C_OSD_RepModifyApply(ReplicatedBackend *pg, RepModifyRef r)
: pg(pg), rm(r) {}
- void finish(int r) {
+ void finish(int r) override {
pg->sub_op_modify_applied(rm);
}
};
RepModifyRef rm;
C_OSD_RepModifyCommit(ReplicatedBackend *pg, RepModifyRef r)
: pg(pg), rm(r) {}
- void finish(int r) {
+ void finish(int r) override {
pg->sub_op_modify_commit(rm);
}
};
int r;
Context *c;
AsyncReadCallback(int r, Context *c) : r(r), c(c) {}
- void finish(ThreadPool::TPHandle&) {
+ void finish(ThreadPool::TPHandle&) override {
c->complete(r);
c = NULL;
}
public:
C_OSD_OnOpCommit(ReplicatedBackend *pg, ReplicatedBackend::InProgressOp *op)
: pg(pg), op(op) {}
- void finish(int) {
+ void finish(int) override {
pg->op_commit(op);
}
};
public:
C_OSD_OnOpApplied(ReplicatedBackend *pg, ReplicatedBackend::InProgressOp *op)
: pg(pg), op(op) {}
- void finish(int) {
+ void finish(int) override {
pg->op_applied(op);
}
};
C_ReplicatedBackend_OnPullComplete(ReplicatedBackend *bc, int priority)
: bc(bc), priority(priority) {}
- void finish(ThreadPool::TPHandle &handle) {
+ void finish(ThreadPool::TPHandle &handle) override {
ReplicatedBackend::RPGHandle *h = bc->_open_recovery_op();
for (auto &&i: to_continue) {
if (!bc->start_pushes(i.hoid, i.obc, h)) {
bool canceled; // protected by notif lock
public:
explicit NotifyTimeoutCB(NotifyRef notif) : notif(notif), canceled(false) {}
- void finish(int) {
+ void finish(int) override {
notif->osd->watch_lock.Unlock();
notif->lock.Lock();
if (!canceled)
notif->lock.Unlock();
notif->osd->watch_lock.Lock();
}
- void cancel() {
+ void cancel() override {
assert(notif->lock.is_locked_by_me());
canceled = true;
}
public:
bool canceled; // protected by watch->pg->lock
explicit HandleWatchTimeout(WatchRef watch) : watch(watch), canceled(false) {}
- void cancel() {
+ void cancel() override {
canceled = true;
}
- void finish(int) { ceph_abort(); /* not used */ }
- void complete(int) {
+ void finish(int) override { ceph_abort(); /* not used */ }
+ void complete(int) override {
OSDService *osd(watch->osd);
ldout(osd->cct, 10) << "HandleWatchTimeout" << dendl;
boost::intrusive_ptr<PrimaryLogPG> pg(watch->pg);
public:
bool canceled;
explicit HandleDelayedWatchTimeout(WatchRef watch) : watch(watch), canceled(false) {}
- void cancel() {
+ void cancel() override {
canceled = true;
}
- void finish(int) {
+ void finish(int) override {
OSDService *osd(watch->osd);
dout(10) << "HandleWatchTimeoutDelayed" << dendl;
assert(watch->pg->is_locked());
ceph::real_time mtime;
C_Probe(Filer *f, Probe *p, object_t o) : filer(f), probe(p), oid(o),
size(0) {}
- void finish(int r) {
+ void finish(int r) override {
if (r == -ENOENT) {
r = 0;
assert(size == 0);
Filer *filer;
PurgeRange *pr;
C_PurgeRange(Filer *f, PurgeRange *p) : filer(f), pr(p) {}
- void finish(int r) {
+ void finish(int r) override {
filer->_do_purge_range(pr, 1);
}
};
Journaler *journaler;
public:
C_DelayFlush(Journaler *j) : journaler(j) {}
- void finish(int r) {
+ void finish(int r) override {
journaler->_do_delayed_flush();
}
};
public:
bufferlist bl;
explicit C_ReadHead(Journaler *l) : ls(l) {}
- void finish(int r) {
+ void finish(int r) override {
ls->_finish_read_head(r, bl);
}
};
bufferlist bl;
C_RereadHead(Journaler *l, Context *onfinish_) : ls (l),
onfinish(onfinish_) {}
- void finish(int r) {
+ void finish(int r) override {
ls->_finish_reread_head(r, bl, onfinish);
}
};
public:
uint64_t end;
explicit C_ProbeEnd(Journaler *l) : ls(l), end(-1) {}
- void finish(int r) {
+ void finish(int r) override {
ls->_finish_probe_end(r, end);
}
};
uint64_t end;
C_ReProbe(Journaler *l, C_OnFinisher *onfinish_) :
ls(l), onfinish(onfinish_), end(0) {}
- void finish(int r) {
+ void finish(int r) override {
ls->_finish_reprobe(r, end, onfinish);
}
};
public:
C_RereadHeadProbe(Journaler *l, C_OnFinisher *finish) :
ls(l), final_finish(finish) {}
- void finish(int r) {
+ void finish(int r) override {
ls->_finish_reread_head_and_probe(r, final_finish);
}
};
C_OnFinisher *oncommit;
C_WriteHead(Journaler *l, Header& h_, C_OnFinisher *c) : ls(l), h(h_),
oncommit(c) {}
- void finish(int r) {
+ void finish(int r) override {
ls->_finish_write_head(r, h, oncommit);
}
};
public:
C_Flush(Journaler *l, int64_t s, ceph::real_time st)
: ls(l), start(s), stamp(st) {}
- void finish(int r) {
+ void finish(int r) override {
ls->_finish_flush(r, start, stamp);
}
};
uint64_t from, len;
C_Journaler_Prezero(Journaler *j, uint64_t f, uint64_t l)
: journaler(j), from(f), len(l) {}
- void finish(int r) {
+ void finish(int r) override {
journaler->_finish_prezero(r, from, len);
}
};
public:
bufferlist bl;
C_Read(Journaler *j, uint64_t o, uint64_t l) : ls(j), offset(o), length(l) {}
- void finish(int r) {
+ void finish(int r) override {
ls->_finish_read(r, offset, length, bl);
}
};
public:
explicit C_RetryRead(Journaler *l) : ls(l) {}
- void finish(int r) {
+ void finish(int r) override {
// Should only be called from waitfor_safe i.e. already inside lock
// (ls->lock is locked
ls->_prefetch();
C_OnFinisher *completion;
public:
C_EraseFinish(Journaler *j, C_OnFinisher *c) : journaler(j), completion(c) {}
- void finish(int r) {
+ void finish(int r) override {
journaler->_finish_erase(r, completion);
}
};
uint64_t to;
public:
C_Trim(Journaler *l, int64_t t) : ls(l), to(t) {}
- void finish(int r) {
+ void finish(int r) override {
ls->_finish_trim(r, to);
}
};
ob->reads.push_back(&set_item);
}
- void finish(int r) {
+ void finish(int r) override {
oc->bh_read_finish(poolid, oid, tid, start, length, bl, r, trust_enoent);
// object destructor clears the list
public:
C_RetryRead(ObjectCacher *_oc, OSDRead *r, ObjectSet *os, Context *c)
: oc(_oc), rd(r), oset(os), onfinish(c) {}
- void finish(int r) {
+ void finish(int r) override {
if (r < 0) {
if (onfinish)
onfinish->complete(r);
oc(c), poolid(_poolid), oid(o), tid(0) {
ranges.swap(_ranges);
}
- void finish(int r) {
+ void finish(int r) override {
oc->bh_write_commit(poolid, oid, ranges, tid, r);
}
};
public:
C_WaitForWrite(ObjectCacher *oc, uint64_t len, Context *onfinish) :
m_oc(oc), m_len(len), m_onfinish(onfinish) {}
- void finish(int r);
+ void finish(int r) override;
private:
ObjectCacher *m_oc;
uint64_t m_len;
public:
explicit RequestStateHook(Objecter *objecter);
bool call(std::string command, cmdmap_t& cmdmap, std::string format,
- bufferlist& out);
+ bufferlist& out) override;
};
/**
public:
C_TwoContexts(Context *first, Context *second) :
first(first), second(second) {}
- void finish(int r) {
+ void finish(int r) override {
first->complete(r);
second->complete(r);
first = NULL;
second = NULL;
}
- virtual ~C_TwoContexts() {
+ ~C_TwoContexts() {
delete first;
delete second;
}
info->get();
info->_queued_async();
}
- void finish(int r) {
+ void finish(int r) override {
Objecter::unique_lock wl(objecter->rwlock);
bool canceled = info->canceled;
wl.unlock();
info->_queued_async();
msg->get();
}
- void finish(int r) {
+ void finish(int r) override {
objecter->_do_watch_notify(info, msg);
}
};
Context *fin;
C_Objecter_GetVersion(Objecter *o, Context *c)
: objecter(o), oldest(0), newest(0), fin(c) {}
- void finish(int r) {
+ void finish(int r) override {
if (r >= 0) {
objecter->get_latest_version(oldest, newest, fin);
} else if (r == -EAGAIN) { // try again as instructed
snapid_t *psnapid;
Context *fin;
C_SelfmanagedSnap(snapid_t *ps, Context *f) : psnapid(ps), fin(f) {}
- void finish(int r) {
+ void finish(int r) override {
if (r == 0) {
bufferlist::iterator p = bl.begin();
::decode(*psnapid, p);
epoch(0), budget(0)
{}
- void finish(int r) {
+ void finish(int r) override {
objecter->_enumerate_reply(
bl, r, end, pool_id, budget, epoch, result, next, on_finish);
}
list<pair<pair<coll_t, string>, uint64_t> > in_flight)
: tracker(tracker), in_flight(in_flight) {}
- void finish(int r) {
+ void finish(int r) override {
for (list<pair<pair<coll_t, string>, uint64_t> >::iterator i =
in_flight.begin();
i != in_flight.end();
list<pair<pair<coll_t, string>, uint64_t> > in_flight)
: tracker(tracker), in_flight(in_flight) {}
- void finish(int r) {
+ void finish(int r) override {
for (list<pair<pair<coll_t, string>, uint64_t> >::iterator i =
in_flight.begin();
i != in_flight.end();
unsigned seq;
int64_t poolid;
explicit MixedGenerator(int64_t p) : seq(0), poolid(p) {}
- ghobject_t create_object(gen_type *gen) {
+ ghobject_t create_object(gen_type *gen) override {
char buf[100];
snprintf(buf, sizeof(buf), "OBJ_%u", seq);
string name(buf);
C_SyntheticOnReadable(SyntheticWorkloadState *state, ghobject_t hoid)
: state(state), hoid(hoid) {}
- void finish(int r) {
+ void finish(int r) override {
Mutex::Locker locker(state->lock);
EnterExit ee("onreadable finish");
ASSERT_TRUE(state->in_flight_objects.count(hoid));
ghobject_t oid, ghobject_t noid)
: state(state), oid(oid), noid(noid) {}
- void finish(int r) {
+ void finish(int r) override {
Mutex::Locker locker(state->lock);
EnterExit ee("stash finish");
ASSERT_TRUE(state->in_flight_objects.count(oid));
ghobject_t oid, ghobject_t noid)
: state(state), oid(oid), noid(noid) {}
- void finish(int r) {
+ void finish(int r) override {
Mutex::Locker locker(state->lock);
EnterExit ee("clone finish");
ASSERT_TRUE(state->in_flight_objects.count(oid));
db.reset(NULL);
}
- virtual void SetUp() {
+ void SetUp() override {
int r = ::mkdir("kv_test_temp_dir", 0777);
if (r < 0 && errno != EEXIST) {
r = -errno;
}
init();
}
- virtual void TearDown() {
+ void TearDown() override {
fini();
rm_r("kv_test_temp_dir");
}
}
struct AppendMOP : public KeyValueDB::MergeOperator {
- virtual void merge_nonexistent(
+ void merge_nonexistent(
const char *rdata, size_t rlen, std::string *new_value) override {
*new_value = "?" + std::string(rdata, rlen);
}
- virtual void merge(
+ void merge(
const char *ldata, size_t llen,
const char *rdata, size_t rlen,
- std::string *new_value) {
+ std::string *new_value) override {
*new_value = std::string(ldata, llen) + std::string(rdata, rlen);
}
// We use each operator name and each prefix to construct the
// overall RocksDB operator name for consistency check at open time.
- virtual string name() const {
+ string name() const override {
return "Append";
}
};
uint32_t index_version)
: LFNIndex(cct, collection, base_path, index_version) {}
- virtual uint32_t collection_version() {
+ uint32_t collection_version() override {
return index_version;
}
- int cleanup() { return 0; }
+ int cleanup() override { return 0; }
- virtual int _split(
+ int _split(
uint32_t match,
uint32_t bits,
CollectionIndex* dest
- ) { return 0; }
+ ) override { return 0; }
void test_generate_and_parse(const ghobject_t &hoid, const std::string &mangled_expected) {
const std::string mangled_name = lfn_generate_object_name(hoid);
}
protected:
- virtual int _init() { return 0; }
+ int _init() override { return 0; }
- virtual int _created(
+ int _created(
const vector<string> &path,
const ghobject_t &hoid,
const string &mangled_name
- ) { return 0; }
+ ) override { return 0; }
- virtual int _remove(
+ int _remove(
const vector<string> &path,
const ghobject_t &hoid,
const string &mangled_name
- ) { return 0; }
+ ) override { return 0; }
- virtual int _lookup(
+ int _lookup(
const ghobject_t &hoid,
vector<string> *path,
string *mangled_name,
int *exists
- ) { return 0; }
+ ) override { return 0; }
- virtual int _collection_list_partial(
+ int _collection_list_partial(
const ghobject_t &start,
const ghobject_t &end,
bool sort_bitwise,
int max_count,
vector<ghobject_t> *ls,
ghobject_t *next
- ) { return 0; }
- virtual int _pre_hash_collection(
+ ) override { return 0; }
+ int _pre_hash_collection(
uint32_t pg_num,
uint64_t expected_num_objs
- ) { return 0; }
+ ) override { return 0; }
};
CollectionIndex::HOBJECT_WITH_POOL) {
}
- virtual void SetUp() {
+ void SetUp() override {
::chmod("PATH_1", 0700);
ASSERT_EQ(0, ::system("rm -fr PATH_1"));
ASSERT_EQ(0, ::mkdir("PATH_1", 0700));
}
- virtual void TearDown() {
+ void TearDown() override {
ASSERT_EQ(0, ::system("rm -fr PATH_1"));
}
};
class PGLogTest : public ::testing::Test, protected PGLog {
public:
PGLogTest() : PGLog(g_ceph_context) {}
- virtual void SetUp() { }
+ void SetUp() override { }
- virtual void TearDown() {
+ void TearDown() override {
clear();
}
list<pg_log_entry_t> rolledback;
void rollback(
- const pg_log_entry_t &entry) {
+ const pg_log_entry_t &entry) override {
rolledback.push_back(entry);
}
void rollforward(
- const pg_log_entry_t &entry) {}
+ const pg_log_entry_t &entry) override {}
void remove(
- const hobject_t &hoid) {
+ const hobject_t &hoid) override {
removed.insert(hoid);
}
void try_stash(const hobject_t &, version_t) override {
// lost/unfound cases are not tested yet
}
void trim(
- const pg_log_entry_t &entry) {}
+ const pg_log_entry_t &entry) override {}
};
template <typename missing_t>
explicit TestHandler(list<hobject_t> &removed) : removed(removed) {}
void rollback(
- const pg_log_entry_t &entry) {}
+ const pg_log_entry_t &entry) override {}
void rollforward(
- const pg_log_entry_t &entry) {}
+ const pg_log_entry_t &entry) override {}
void remove(
- const hobject_t &hoid) {
+ const hobject_t &hoid) override {
removed.push_back(hoid);
}
void cant_rollback(const pg_log_entry_t &entry) {}
// lost/unfound cases are not tested yet
}
void trim(
- const pg_log_entry_t &entry) {}
+ const pg_log_entry_t &entry) override {}
};
TEST_F(PGLogTest, rewind_divergent_log) {
}
}
- TestOp *next(RadosTestContext &context)
+ TestOp *next(RadosTestContext &context) override
{
TestOp *retval = NULL;
{
}
- virtual void *entry() {
+ void *entry() override {
obc.ondisk_read_lock();
return NULL;
}
{
}
- virtual void *entry() {
+ void *entry() override {
obc.ondisk_write_lock();
return NULL;
}
bufferlist *pbl, uint64_t delay_ns=0)
: m_cct(cct), m_con(c), m_delay(delay_ns * std::chrono::nanoseconds(1)),
m_lock(lock), m_bl(pbl), m_off(off) {}
- void finish(int r) {
+ void finish(int r) override {
std::this_thread::sleep_for(m_delay);
if (m_bl) {
buffer::ptr bp(r);
: wb(mwb), m_cct(cct), m_con(c),
m_delay(delay_ns * std::chrono::nanoseconds(1)),
m_lock(lock), m_oid(oid), m_off(off), m_len(len), m_bl(pbl) {}
- void finish(int r) {
+ void finish(int r) override {
std::this_thread::sleep_for(m_delay);
m_lock->Lock();
r = wb->read_object_data(m_oid, m_off, m_len, m_bl);
: wb(mwb), m_cct(cct), m_con(c),
m_delay(delay_ns * std::chrono::nanoseconds(1)),
m_lock(lock), m_oid(oid), m_off(off), m_len(len), m_bl(bl) {}
- void finish(int r) {
+ void finish(int r) override {
std::this_thread::sleep_for(m_delay);
m_lock->Lock();
wb->write_object_data(m_oid, m_off, m_len, m_bl);
public:
C_Count(op_data *op, atomic_t *outstanding)
: m_op(op), m_outstanding(outstanding) {}
- void finish(int r) {
+ void finish(int r) override {
m_op->done.inc();
assert(m_outstanding->read() > 0);
m_outstanding->dec();