bool is_n_page_sized() {
return (len & ~CEPH_PAGE_MASK) == 0;
}
+ virtual bool is_shareable() {
+ // true if safe to reference/share the existing buffer copy
+ // false if it is not safe to share the buffer, e.g., due to special
+ // and/or registered memory that is scarce
+ return true;
+ }
bool get_crc(const pair<size_t, size_t> &fromto,
pair<uint32_t, uint32_t> *crc) const {
Mutex::Locker l(crc_lock);
return _raw->clone();
}
+ buffer::ptr& buffer::ptr::make_shareable() {
+ if (_raw && !_raw->is_shareable()) {
+ buffer::raw *tr = _raw;
+ _raw = tr->clone();
+ _raw->nref.set(1);
+ if (unlikely(tr->nref.dec() == 0)) {
+ delete tr;
+ }
+ }
+ return *this;
+ }
+
void buffer::ptr::swap(ptr& other)
{
raw *r = _raw;
}
// sort-of-like-assignment-op
- void buffer::list::claim(list& bl)
+ void buffer::list::claim(list& bl, unsigned int flags)
{
// free my buffers
clear();
- claim_append(bl);
+ claim_append(bl, flags);
}
- void buffer::list::claim_append(list& bl)
+ void buffer::list::claim_append(list& bl, unsigned int flags)
{
// steal the other guy's buffers
_len += bl._len;
- _buffers.splice( _buffers.end(), bl._buffers );
+ if (!(flags & CLAIM_ALLOW_NONSHAREABLE))
+ bl.make_shareable();
+ _buffers.splice(_buffers.end(), bl._buffers );
bl._len = 0;
bl.last_p = bl.begin();
}
- void buffer::list::claim_prepend(list& bl)
+ void buffer::list::claim_prepend(list& bl, unsigned int flags)
{
// steal the other guy's buffers
_len += bl._len;
- _buffers.splice( _buffers.begin(), bl._buffers );
+ if (!(flags & CLAIM_ALLOW_NONSHAREABLE))
+ bl.make_shareable();
+ _buffers.splice(_buffers.begin(), bl._buffers );
bl._len = 0;
bl.last_p = bl.begin();
}
raw *clone();
void swap(ptr& other);
+ ptr& make_shareable();
// misc
bool at_buffer_head() const { return _off == 0; }
append_buffer.set_length(0); // unused, so far.
}
~list() {}
-
- list(const list& other) : _buffers(other._buffers), _len(other._len), _memcopy_count(other._memcopy_count),last_p(this) { }
+ list(const list& other) : _buffers(other._buffers), _len(other._len),
+ _memcopy_count(other._memcopy_count), last_p(this) {
+ make_shareable();
+ }
list& operator= (const list& other) {
if (this != &other) {
_buffers = other._buffers;
_len = other._len;
+ make_shareable();
}
return *this;
}
unsigned align_memory);
void rebuild_page_aligned();
- // sort-of-like-assignment-op
- void claim(list& bl);
- void claim_append(list& bl);
- void claim_prepend(list& bl);
+ // assignment-op with move semantics
+ const static unsigned int CLAIM_DEFAULT = 0;
+ const static unsigned int CLAIM_ALLOW_NONSHAREABLE = 1;
+
+ void claim(list& bl, unsigned int flags = CLAIM_DEFAULT);
+ void claim_append(list& bl, unsigned int flags = CLAIM_DEFAULT);
+ void claim_prepend(list& bl, unsigned int flags = CLAIM_DEFAULT);
+
+ // clone non-shareable buffers (make shareable)
+ void make_shareable() {
+ std::list<buffer::ptr>::iterator pb;
+ for (pb = _buffers.begin(); pb != _buffers.end(); ++pb) {
+ (void) pb->make_shareable();
+ }
+ }
+
+ // copy with explicit volatile-sharing semantics
+ void share(const list& bl)
+ {
+ if (this != &bl) {
+ clear();
+ std::list<buffer::ptr>::const_iterator pb;
+ for (pb = bl._buffers.begin(); pb != bl._buffers.end(); ++pb) {
+ push_back(*pb);
+ }
+ }
+ }
iterator begin() {
return iterator(this, 0);
}
bufferlist& get_data() { return data; }
- void claim_data(bufferlist& bl) {
+ void claim_data(bufferlist& bl,
+ unsigned int flags = buffer::list::CLAIM_DEFAULT) {
if (byte_throttler)
byte_throttler->put(data.length());
- bl.claim(data);
+ bl.claim(data, flags);
}
off_t get_data_len() { return data.length(); }