#define MAX_FLUSH_UNDER_LOCK 20 ///< max bh's we start writeback on
#define BUFFER_MEMORY_WEIGHT CEPH_PAGE_SHIFT // memory usage of BufferHead, count in (1<<n)
+ /// while holding the lock
using std::chrono::seconds;
- /// while holding the lock
+using std::list;
+using std::map;
+using std::make_pair;
+using std::pair;
+using std::set;
+using std::string;
+using std::vector;
+
+using ceph::bufferlist;
+
+using namespace std::literals;
/*** ObjectCacher::BufferHead ***/
// move read waiters
if (!left->waitfor_read.empty()) {
- map<loff_t, list<Context*> >::iterator start_remove
- = left->waitfor_read.begin();
+ auto start_remove = left->waitfor_read.begin();
while (start_remove != left->waitfor_read.end() &&
start_remove->first < right->start())
++start_remove;
- for (map<loff_t, list<Context*> >::iterator p = start_remove;
- p != left->waitfor_read.end(); ++p) {
+ for (auto p = start_remove; p != left->waitfor_read.end(); ++p) {
ldout(oc->cct, 20) << "split moving waiters at byte " << p->first
<< " to right bh" << dendl;
right->waitfor_read[p->first].swap( p->second );
left->set_nocache(right->get_nocache() ? left->get_nocache() : false);
// waiters
- for (map<loff_t, list<Context*> >::iterator p = right->waitfor_read.begin();
+ for (auto p = right->waitfor_read.begin();
p != right->waitfor_read.end();
++p)
left->waitfor_read[p->first].splice(left->waitfor_read[p->first].begin(),
return;
// to the left?
- map<loff_t,BufferHead*>::iterator p = data.find(bh->start());
+ auto p = data.find(bh->start());
ceph_assert(p->second == bh);
if (p != data.begin()) {
--p;
bool ObjectCacher::Object::is_cached(loff_t cur, loff_t left) const
{
ceph_assert(ceph_mutex_is_locked(oc->lock));
- map<loff_t, BufferHead*>::const_iterator p = data_lower_bound(cur);
+ auto p = data_lower_bound(cur);
while (left > 0) {
if (p == data.end())
return false;
ceph_assert(ceph_mutex_is_locked(oc->lock));
if (data.empty())
return true;
- map<loff_t, BufferHead*>::iterator first = data.begin();
- map<loff_t, BufferHead*>::reverse_iterator last = data.rbegin();
+ auto first = data.begin();
+ auto last = data.rbegin();
if (first->second->start() >= off && last->second->end() <= (off + len))
return true;
else
loff_t cur = ex.offset;
loff_t left = ex.length;
- map<loff_t, BufferHead*>::const_iterator p = data_lower_bound(ex.offset);
+ auto p = data_lower_bound(ex.offset);
while (left > 0) {
// at end?
if (p == data.end()) {
void ObjectCacher::Object::audit_buffers()
{
loff_t offset = 0;
- for (map<loff_t, BufferHead*>::const_iterator it = data.begin();
- it != data.end(); ++it) {
+ for (auto it = data.begin(); it != data.end(); ++it) {
if (it->first != it->second->start()) {
lderr(oc->cct) << "AUDIT FAILURE: map position " << it->first
<< " does not match bh start position: "
ceph_assert(it->first >= offset);
}
BufferHead *bh = it->second;
- map<loff_t, list<Context*> >::const_iterator w_it;
- for (w_it = bh->waitfor_read.begin();
+ for (auto w_it = bh->waitfor_read.begin();
w_it != bh->waitfor_read.end(); ++w_it) {
if (w_it->first < bh->start() ||
w_it->first >= bh->start() + bh->length()) {
loff_t cur = ex.offset;
loff_t left = ex.length;
- map<loff_t, BufferHead*>::const_iterator p = data_lower_bound(ex.offset);
+ auto p = data_lower_bound(ex.offset);
while (left > 0) {
loff_t max = left;
complete = false;
}
- map<loff_t, BufferHead*>::const_iterator p = data_lower_bound(off);
+ auto p = data_lower_bound(off);
while (p != data.end()) {
BufferHead *bh = p->second;
if (bh->start() >= off + len)
finisher.stop();
perf_stop();
// we should be empty.
- for (vector<ceph::unordered_map<sobject_t, Object *> >::iterator i
- = objects.begin();
- i != objects.end();
- ++i)
+ for (auto i = objects.begin(); i != objects.end(); ++i)
ceph_assert(i->empty());
ceph_assert(bh_lru_rest.lru_get_size() == 0);
ceph_assert(bh_lru_dirty.lru_get_size() == 0);
// read 1~1 -> immediate ENOENT
// reply to first 1~1 -> ooo ENOENT
bool allzero = true;
- for (map<loff_t, BufferHead*>::iterator p = ob->data.begin();
- p != ob->data.end(); ++p) {
+ for (auto p = ob->data.begin(); p != ob->data.end(); ++p) {
BufferHead *bh = p->second;
- for (map<loff_t, list<Context*> >::iterator p
- = bh->waitfor_read.begin();
+ for (auto p = bh->waitfor_read.begin();
p != bh->waitfor_read.end();
++p)
ls.splice(ls.end(), p->second);
ldout(cct, 10)
<< "bh_read_finish ENOENT and allzero, getting rid of "
<< "bhs for " << *ob << dendl;
- map<loff_t, BufferHead*>::iterator p = ob->data.begin();
+ auto p = ob->data.begin();
while (p != ob->data.end()) {
BufferHead *bh = p->second;
// current iterator will be invalidated by bh_remove()
// apply to bh's!
loff_t opos = start;
while (true) {
- map<loff_t, BufferHead*>::const_iterator p = ob->data_lower_bound(opos);
+ auto p = ob->data_lower_bound(opos);
if (p == ob->data.end())
break;
if (opos >= start+(loff_t)length) {
ldout(cct, 20) << "checking bh " << *bh << dendl;
// finishers?
- for (map<loff_t, list<Context*> >::iterator it
- = bh->waitfor_read.begin();
+ for (auto it = bh->waitfor_read.begin();
it != bh->waitfor_read.end();
++it)
ls.splice(ls.end(), it->second);
// read scatter/gather
struct OSDRead {
- vector<ObjectExtent> extents;
+ std::vector<ObjectExtent> extents;
snapid_t snap;
- bufferlist *bl;
+ ceph::buffer::list *bl;
int fadvise_flags;
- OSDRead(snapid_t s, bufferlist *b, int f)
+ OSDRead(snapid_t s, ceph::buffer::list *b, int f)
: snap(s), bl(b), fadvise_flags(f) {}
};
- OSDRead *prepare_read(snapid_t snap, bufferlist *b, int f) const {
+ OSDRead *prepare_read(snapid_t snap, ceph::buffer::list *b, int f) const {
return new OSDRead(snap, b, f);
}
// write scatter/gather
struct OSDWrite {
- vector<ObjectExtent> extents;
+ std::vector<ObjectExtent> extents;
SnapContext snapc;
- bufferlist bl;
+ ceph::buffer::list bl;
ceph::real_time mtime;
int fadvise_flags;
ceph_tid_t journal_tid;
- OSDWrite(const SnapContext& sc, const bufferlist& b, ceph::real_time mt,
+ OSDWrite(const SnapContext& sc, const ceph::buffer::list& b, ceph::real_time mt,
int f, ceph_tid_t _journal_tid)
: snapc(sc), bl(b), mtime(mt), fadvise_flags(f),
journal_tid(_journal_tid) {}
};
OSDWrite *prepare_write(const SnapContext& sc,
- const bufferlist &b,
+ const ceph::buffer::list &b,
ceph::real_time mt,
int f,
ceph_tid_t journal_tid) const {
public:
Object *ob;
- bufferlist bl;
+ ceph::buffer::list bl;
ceph_tid_t last_write_tid; // version of bh (if non-zero)
ceph_tid_t last_read_tid; // tid of last read op (if any)
ceph::real_time last_write;
ceph_tid_t journal_tid;
int error; // holds return value for failed reads
- map<loff_t, list<Context*> > waitfor_read;
+ std::map<loff_t, std::list<Context*> > waitfor_read;
// cons
explicit BufferHead(Object *o) :
bool complete;
bool exists;
- map<loff_t, BufferHead*> data;
+ std::map<loff_t, BufferHead*> data;
ceph_tid_t last_write_tid; // version of bh (if non-zero)
ceph_tid_t last_commit_tid; // last update committed.
int dirty_or_tx;
- map< ceph_tid_t, list<Context*> > waitfor_commit;
+ std::map< ceph_tid_t, std::list<Context*> > waitfor_commit;
xlist<C_ReadFinish*> reads;
Object(const Object&) = delete;
object_t get_oid() { return oid.oid; }
snapid_t get_snap() { return oid.snap; }
ObjectSet *get_object_set() const { return oset; }
- string get_namespace() { return oloc.nspace; }
+ std::string get_namespace() { return oloc.nspace; }
uint64_t get_object_number() const { return object_no; }
const object_locator_t& get_oloc() const { return oloc; }
* @param offset object byte offset
* @return iterator pointing to buffer, or data.end()
*/
- map<loff_t,BufferHead*>::const_iterator data_lower_bound(loff_t offset) const {
- map<loff_t,BufferHead*>::const_iterator p = data.lower_bound(offset);
+ std::map<loff_t,BufferHead*>::const_iterator data_lower_bound(loff_t offset) const {
+ auto p = data.lower_bound(offset);
if (p != data.begin() &&
(p == data.end() || p->first > offset)) {
--p; // might overlap!
bool is_cached(loff_t off, loff_t len) const;
bool include_all_cached_data(loff_t off, loff_t len);
int map_read(ObjectExtent &ex,
- map<loff_t, BufferHead*>& hits,
- map<loff_t, BufferHead*>& missing,
- map<loff_t, BufferHead*>& rx,
- map<loff_t, BufferHead*>& errors);
+ std::map<loff_t, BufferHead*>& hits,
+ std::map<loff_t, BufferHead*>& missing,
+ std::map<loff_t, BufferHead*>& rx,
+ std::map<loff_t, BufferHead*>& errors);
BufferHead *map_write(ObjectExtent &ex, ceph_tid_t tid);
void replace_journal_tid(BufferHead *bh, ceph_tid_t tid);
WritebackHandler& writeback_handler;
bool scattered_write;
- string name;
+ std::string name;
ceph::mutex& lock;
uint64_t max_dirty, target_dirty, max_size, max_objects;
void *flush_set_callback_arg;
// indexed by pool_id
- vector<ceph::unordered_map<sobject_t, Object*> > objects;
+ std::vector<ceph::unordered_map<sobject_t, Object*> > objects;
- list<Context*> waitfor_read;
+ std::list<Context*> waitfor_read;
ceph_tid_t last_read_tid;
- set<BufferHead*, BufferHead::ptr_lt> dirty_or_tx_bh;
+ std::set<BufferHead*, BufferHead::ptr_lt> dirty_or_tx_bh;
LRU bh_lru_dirty, bh_lru_rest;
LRU ob_lru;
void bh_read(BufferHead *bh, int op_flags,
const ZTracer::Trace &parent_trace);
void bh_write(BufferHead *bh, const ZTracer::Trace &parent_trace);
- void bh_write_scattered(list<BufferHead*>& blist);
+ void bh_write_scattered(std::list<BufferHead*>& blist);
void bh_write_adjacencies(BufferHead *bh, ceph::real_time cutoff,
int64_t *amount, int *max_count);
public:
void bh_read_finish(int64_t poolid, sobject_t oid, ceph_tid_t tid,
loff_t offset, uint64_t length,
- bufferlist &bl, int r,
+ ceph::buffer::list &bl, int r,
bool trust_enoent);
void bh_write_commit(int64_t poolid, sobject_t oid,
- vector<pair<loff_t, uint64_t> >& ranges,
+ std::vector<std::pair<loff_t, uint64_t> >& ranges,
ceph_tid_t t, int r);
class C_WriteCommit;
- ObjectCacher(CephContext *cct_, string name, WritebackHandler& wb, ceph::mutex& l,
+ ObjectCacher(CephContext *cct_, std::string name, WritebackHandler& wb, ceph::mutex& l,
flush_set_callback_t flush_callback,
void *flush_callback_arg,
uint64_t max_bytes, uint64_t max_objects,
ZTracer::Trace *parent_trace = nullptr);
int writex(OSDWrite *wr, ObjectSet *oset, Context *onfreespace,
ZTracer::Trace *parent_trace = nullptr);
- bool is_cached(ObjectSet *oset, vector<ObjectExtent>& extents,
+ bool is_cached(ObjectSet *oset, std::vector<ObjectExtent>& extents,
snapid_t snapid);
private:
void _maybe_wait_for_writeback(uint64_t len, ZTracer::Trace *trace);
bool _flush_set_finish(C_GatherBuilder *gather, Context *onfinish);
- void _discard(ObjectSet *oset, const vector<ObjectExtent>& exls,
+ void _discard(ObjectSet *oset, const std::vector<ObjectExtent>& exls,
C_GatherBuilder* gather);
void _discard_finish(ObjectSet *oset, bool was_dirty, Context* on_finish);
bool set_is_dirty_or_committing(ObjectSet *oset);
bool flush_set(ObjectSet *oset, Context *onfinish=0);
- bool flush_set(ObjectSet *oset, vector<ObjectExtent>& ex,
+ bool flush_set(ObjectSet *oset, std::vector<ObjectExtent>& ex,
ZTracer::Trace *trace, Context *onfinish = 0);
bool flush_all(Context *onfinish = 0);
loff_t release_set(ObjectSet *oset);
uint64_t release_all();
- void discard_set(ObjectSet *oset, const vector<ObjectExtent>& ex);
- void discard_writeback(ObjectSet *oset, const vector<ObjectExtent>& ex,
+ void discard_set(ObjectSet *oset, const std::vector<ObjectExtent>& ex);
+ void discard_writeback(ObjectSet *oset, const std::vector<ObjectExtent>& ex,
Context* on_finish);
/**
max_size = v;
}
void set_max_dirty_age(double a) {
- max_dirty_age = make_timespan(a);
+ max_dirty_age = ceph::make_timespan(a);
}
void set_max_objects(int64_t v) {
max_objects = v;
/*** async+caching (non-blocking) file interface ***/
int file_is_cached(ObjectSet *oset, file_layout_t *layout,
snapid_t snapid, loff_t offset, uint64_t len) {
- vector<ObjectExtent> extents;
+ std::vector<ObjectExtent> extents;
Striper::file_to_extents(cct, oset->ino, layout, offset, len,
oset->truncate_size, extents);
return is_cached(oset, extents, snapid);
}
int file_read(ObjectSet *oset, file_layout_t *layout, snapid_t snapid,
- loff_t offset, uint64_t len, bufferlist *bl, int flags,
+ loff_t offset, uint64_t len, ceph::buffer::list *bl, int flags,
Context *onfinish) {
OSDRead *rd = prepare_read(snapid, bl, flags);
Striper::file_to_extents(cct, oset->ino, layout, offset, len,
int file_write(ObjectSet *oset, file_layout_t *layout,
const SnapContext& snapc, loff_t offset, uint64_t len,
- bufferlist& bl, ceph::real_time mtime, int flags) {
+ ceph::buffer::list& bl, ceph::real_time mtime, int flags) {
OSDWrite *wr = prepare_write(snapc, bl, mtime, flags, 0);
Striper::file_to_extents(cct, oset->ino, layout, offset, len,
oset->truncate_size, wr->extents);
bool file_flush(ObjectSet *oset, file_layout_t *layout,
const SnapContext& snapc, loff_t offset, uint64_t len,
Context *onfinish) {
- vector<ObjectExtent> extents;
+ std::vector<ObjectExtent> extents;
Striper::file_to_extents(cct, oset->ino, layout, offset, len,
oset->truncate_size, extents);
ZTracer::Trace trace;
};
-inline ostream& operator<<(ostream &out, const ObjectCacher::BufferHead &bh)
+inline std::ostream& operator<<(std::ostream &out,
+ const ObjectCacher::BufferHead &bh)
{
out << "bh[ " << &bh << " "
<< bh.start() << "~" << bh.length()
if (bh.error) out << " error=" << bh.error;
out << "]";
out << " waiters = {";
- for (map<loff_t, list<Context*> >::const_iterator it
- = bh.waitfor_read.begin();
- it != bh.waitfor_read.end(); ++it) {
+ for (auto it = bh.waitfor_read.begin(); it != bh.waitfor_read.end(); ++it) {
out << " " << it->first << "->[";
- for (list<Context*>::const_iterator lit = it->second.begin();
+ for (auto lit = it->second.begin();
lit != it->second.end(); ++lit) {
out << *lit << ", ";
}
return out;
}
-inline ostream& operator<<(ostream &out, const ObjectCacher::ObjectSet &os)
+inline std::ostream& operator<<(std::ostream &out,
+ const ObjectCacher::ObjectSet &os)
{
return out << "objectset[" << os.ino
<< " ts " << os.truncate_seq << "/" << os.truncate_size
<< "]";
}
-inline ostream& operator<<(ostream &out, const ObjectCacher::Object &ob)
+inline std::ostream& operator<<(std::ostream &out,
+ const ObjectCacher::Object &ob)
{
out << "object["
- << ob.get_soid() << " oset " << ob.oset << dec
+ << ob.get_soid() << " oset " << ob.oset << std::dec
<< " wr " << ob.last_write_tid << "/" << ob.last_commit_tid;
if (ob.complete)