int ebofs_commit_ms;
int ebofs_oc_size;
int ebofs_cc_size;
- off_t ebofs_bc_size;
- off_t ebofs_bc_max_dirty;
+ __u64 ebofs_bc_size;
+ __u64 ebofs_bc_max_dirty;
unsigned ebofs_max_prefetch;
bool ebofs_realloc;
bool ebofs_verify_csum_on_read;
assert(fd > 0);
- off_t offset = bno * EBOFS_BLOCK_SIZE;
- off_t actual = lseek(fd, offset, SEEK_SET);
+ __u64 offset = bno * EBOFS_BLOCK_SIZE;
+ __u64 actual = lseek(fd, offset, SEEK_SET);
assert(actual == offset);
size_t len = num*EBOFS_BLOCK_SIZE;
assert(fd > 0);
while (1) {
- off_t offset = (off_t)bno << EBOFS_BLOCK_BITS;
- assert((off_t)bno * (off_t)EBOFS_BLOCK_SIZE == offset);
- off_t actual = lseek(fd, offset, SEEK_SET);
+ __u64 offset = bno << EBOFS_BLOCK_BITS;
+ assert(bno * EBOFS_BLOCK_SIZE == offset);
+ __u64 actual = lseek(fd, offset, SEEK_SET);
assert(actual == offset);
// write buffers
#include "Onode.h"
-void do_apply_partial(bufferlist& bl, map<off_t, bufferlist>& pm)
+void do_apply_partial(bufferlist& bl, map<__u64, bufferlist>& pm)
{
assert(bl.length() == (unsigned)EBOFS_BLOCK_SIZE);
//assert(partial_is_complete());
//cout << "apply_partial" << std::endl;
- for (map<off_t, bufferlist>::iterator i = pm.begin();
+ for (map<__u64, bufferlist>::iterator i = pm.begin();
i != pm.end();
i++) {
//cout << "do_apply_partial at " << i->first << "~" << i->second.length() << std::endl;
#define derr(x) if (x <= g_conf.debug_ebofs) *_derr << dbeginl << g_clock.now() << " ebofs." << *this << "."
-void BufferHead::add_partial(off_t off, bufferlist& p)
+void BufferHead::add_partial(__u64 off, bufferlist& p)
{
unsigned len = p.length();
assert(len <= (unsigned)EBOFS_BLOCK_SIZE);
assert(off + len <= EBOFS_BLOCK_SIZE);
// trim any existing that overlaps
- map<off_t, bufferlist>::iterator i = partial.begin();
+ map<__u64, bufferlist>::iterator i = partial.begin();
while (i != partial.end()) {
// is [off,off+len)...
// past i?
// overlap head of i?
if (off <= i->first && off+len < i->first + i->second.length()) {
// move i (make new tail).
- off_t tailoff = off+len;
+ __u64 tailoff = off+len;
unsigned trim = tailoff - i->first;
partial[tailoff].substr_of(i->second, trim, i->second.length()-trim);
partial.erase(i++); // should now be at tailoff
assert(exv[0].start != 0);
block_t cur_block = exv[0].start;
- off_t off_in_bl = (bh->start() - start) * EBOFS_BLOCK_SIZE;
+ __u64 off_in_bl = (bh->start() - start) * EBOFS_BLOCK_SIZE;
assert(off_in_bl >= 0);
- off_t len_in_bl = bh->length() * EBOFS_BLOCK_SIZE;
+ __u64 len_in_bl = bh->length() * EBOFS_BLOCK_SIZE;
// verify csum
csum_t want = *bh->oc->on->get_extent_csum_ptr(bh->start(), 1);
*bh->oc->on->get_extent_csum_ptr(bh->start(), 1) = got;
bh->oc->on->data_csum += got - want;
- interval_set<off_t> bad;
+ interval_set<__u64> bad;
bad.insert(bh->start()*EBOFS_BLOCK_SIZE, EBOFS_BLOCK_SIZE);
bh->oc->on->bad_byte_extents.union_of(bad);
- interval_set<off_t> over;
- for (map<off_t,bufferlist>::iterator q = bh->partial.begin();
+ interval_set<__u64> over;
+ for (map<__u64,bufferlist>::iterator q = bh->partial.begin();
q != bh->partial.end();
q++)
over.insert(bh->start()*EBOFS_BLOCK_SIZE+q->first, q->second.length());
- interval_set<off_t> new_over;
+ interval_set<__u64> new_over;
new_over.intersection_of(over, bh->oc->on->bad_byte_extents);
bh->oc->on->bad_byte_extents.subtract(new_over);
}
ioh_t tx_ioh; //
block_t tx_block;
- map<off_t, bufferlist> partial; // partial dirty content overlayed onto incoming data
+ map<__u64, bufferlist> partial; // partial dirty content overlayed onto incoming data
map<block_t, list<Context*> > waitfor_read;
shadows.clear();
}
- void copy_partial_substr(off_t start, off_t end, bufferlist& bl) {
- map<off_t, bufferlist>::iterator i = partial.begin();
+ void copy_partial_substr(__u64 start, __u64 end, bufferlist& bl) {
+ map<__u64, bufferlist>::iterator i = partial.begin();
// skip first bits (fully to left)
while ((i->first + i->second.length() < start) &&
unsigned bhlen = MIN(end-start, i->second.length());
bl.substr_of( i->second, bhoff, bhlen );
- off_t pos = i->first + i->second.length();
+ __u64 pos = i->first + i->second.length();
// have continuous to end?
for (i++; i != partial.end(); i++) {
assert(bl.length() == (unsigned)(end-start));
}
- bool have_partial_range(off_t start, off_t end) {
- map<off_t, bufferlist>::iterator i = partial.begin();
+ bool have_partial_range(__u64 start, __u64 end) {
+ map<__u64, bufferlist>::iterator i = partial.begin();
// skip first bits (fully to left)
while ((i->first + i->second.length() < start) &&
// have start?
if (i->first > start) return false;
- off_t pos = i->first + i->second.length();
+ __u64 pos = i->first + i->second.length();
// have continuous to end?
for (i++; i != partial.end(); i++) {
return false;
}
- bool partial_is_complete(off_t size) {
+ bool partial_is_complete(__u64 size) {
return have_partial_range( 0, MIN(size, EBOFS_BLOCK_SIZE) );
}
void apply_partial();
- void add_partial(off_t off, bufferlist& p);
+ void add_partial(__u64 off, bufferlist& p);
void take_read_waiters(list<Context*>& finished) {
for (map<block_t,list<Context*> >::iterator p = waitfor_read.begin();
Cond flush_cond;
int stat_waiter;
- off_t stat_all;
- off_t stat_clean, stat_corrupt;
- off_t stat_dirty;
- off_t stat_rx;
- off_t stat_tx;
- off_t stat_partial;
- off_t stat_missing;
+ __u64 stat_all;
+ __u64 stat_clean, stat_corrupt;
+ __u64 stat_dirty;
+ __u64 stat_rx;
+ __u64 stat_tx;
+ __u64 stat_partial;
+ __u64 stat_missing;
int partial_reads;
{}
- off_t get_size() {
+ __u64 get_size() {
assert(stat_clean+stat_dirty+stat_rx+stat_tx+stat_partial+stat_corrupt+stat_missing == stat_all);
return stat_all;
}
- off_t get_trimmable() {
+ __u64 get_trimmable() {
return stat_clean+stat_corrupt;
}
}
stat_all -= bh->length();
}
- off_t get_stat_tx() { return stat_tx; }
- off_t get_stat_rx() { return stat_rx; }
- off_t get_stat_dirty() { return stat_dirty; }
- off_t get_stat_clean() { return stat_clean; }
- off_t get_stat_partial() { return stat_partial; }
+ __u64 get_stat_tx() { return stat_tx; }
+ __u64 get_stat_rx() { return stat_rx; }
+ __u64 get_stat_dirty() { return stat_dirty; }
+ __u64 get_stat_clean() { return stat_clean; }
+ __u64 get_stat_partial() { return stat_partial; }
map<version_t, int> &get_unflushed(int what) {
}
// bad byte extents
- for (map<off_t,off_t>::iterator p = on->bad_byte_extents.m.begin();
+ for (map<__u64,__u64>::iterator p = on->bad_byte_extents.m.begin();
p != on->bad_byte_extents.m.end();
p++) {
extent_t o = {p->first, p->second};
ebofs_lock.Unlock();
}
-void Ebofs::trim_bc(off_t max)
+void Ebofs::trim_bc(__u64 max)
{
if (max < 0)
max = g_conf.ebofs_bc_size;
}
-int Ebofs::check_partial_edges(Onode *on, off_t off, off_t len,
+int Ebofs::check_partial_edges(Onode *on, __u64 off, __u64 len,
bool &partial_head, bool &partial_tail)
{
// partial block overwrite at head or tail?
- off_t last_block_byte = on->last_block * EBOFS_BLOCK_SIZE;
+ __u64 last_block_byte = on->last_block * EBOFS_BLOCK_SIZE;
partial_head = (off < last_block_byte) && (off & EBOFS_BLOCK_MASK);
partial_tail = ((off+len) < on->object_size) && ((off+len) & EBOFS_BLOCK_MASK);
dout(10) << "check_partial_edges on " << *on << " " << off << "~" << len
return -1;
}
if (bh->is_partial()) {
- int off_in_bh = off & EBOFS_BLOCK_MASK;
- int end_in_bh = MAX(EBOFS_BLOCK_SIZE, off_in_bh+len);
+ unsigned off_in_bh = off & EBOFS_BLOCK_MASK;
+ unsigned end_in_bh = MAX(EBOFS_BLOCK_SIZE, off_in_bh+len);
if (!(off_in_bh == 0 || bh->have_partial_range(0, off_in_bh)) ||
!(end_in_bh == EBOFS_BLOCK_SIZE || bh->have_partial_range(end_in_bh, EBOFS_BLOCK_SIZE-end_in_bh))) {
dout(10) << "check_partial_edges can't complete partial head " << *bh << ", deferring" << dendl;
return -1;
}
if (bh->is_partial()) {
- off_t off_in_bh = off & EBOFS_BLOCK_MASK;
- off_t end_in_bh = MAX(EBOFS_BLOCK_SIZE, off_in_bh+len);
- off_t end = EBOFS_BLOCK_SIZE;
- if ((off_t)bh->end()*EBOFS_BLOCK_SIZE > last_block_byte)
+ __u64 off_in_bh = off & EBOFS_BLOCK_MASK;
+ __u64 end_in_bh = MAX(EBOFS_BLOCK_SIZE, off_in_bh+len);
+ __u64 end = EBOFS_BLOCK_SIZE;
+ if (bh->end()*EBOFS_BLOCK_SIZE > last_block_byte)
end = last_block_byte & EBOFS_BLOCK_MASK;
if (!(off_in_bh == 0 || bh->have_partial_range(0, off_in_bh)) ||
!(end_in_bh >= end || bh->have_partial_range(end_in_bh, end-end_in_bh))) {
return 0;
}
-int Ebofs::apply_write(Onode *on, off_t off, off_t len, const bufferlist& bl)
+int Ebofs::apply_write(Onode *on, __u64 off, __u64 len, const bufferlist& bl)
{
ObjectCache *oc = on->get_oc(&bc);
//oc->scrub_csums();
assert(bl.length() == len);
// map into blocks
- off_t opos = off; // byte pos in object
- off_t left = len; // bytes left
+ __u64 opos = off; // byte pos in object
+ __u64 left = len; // bytes left
block_t bstart = off / EBOFS_BLOCK_SIZE;
block_t blast = (len+off-1) / EBOFS_BLOCK_SIZE;
block_t blen = blast-bstart+1;
// -- starting changing stuff --
// extending object?
- off_t old_object_size = on->object_size;
+ __u64 old_object_size = on->object_size;
if (off+len > on->object_size) {
dout(10) << "apply_write extending size on " << *on << ": " << on->object_size
<< " -> " << off+len << dendl;
if ((bh->start() == bstart && partial_head) ||
(bh->last() == blast && partial_tail)) {
unsigned len_in_bh = MIN( left,
- ((off_t)bh->end()*EBOFS_BLOCK_SIZE)-opos );
+ (bh->end()*EBOFS_BLOCK_SIZE)-opos );
if (bh->is_partial() || bh->is_rx() || bh->is_missing() || bh->is_corrupt()) {
assert(bh->length() == 1);
if (bh->is_corrupt()) {
dout(10) << "apply_write marking non-overwritten bytes bad on corrupt " << *bh << dendl;
- interval_set<off_t> bad;
- off_t bs = bh->start() * EBOFS_BLOCK_SIZE;
+ interval_set<__u64> bad;
+ __u64 bs = bh->start() * EBOFS_BLOCK_SIZE;
if (off_in_bh) bad.insert(bs, bs+off_in_bh);
if (off_in_bh+len_in_bh < (unsigned)EBOFS_BLOCK_SIZE)
bad.insert(bs+off_in_bh+len_in_bh, bs+EBOFS_BLOCK_SIZE-off_in_bh-len_in_bh);
// ok
// we're now writing up to a block boundary, or EOF.
- assert(off_in_bh+left >= (off_t)(EBOFS_BLOCK_SIZE*bh->length()) ||
+ assert(off_in_bh+left >= (__u64)(EBOFS_BLOCK_SIZE*bh->length()) ||
(opos+left) >= on->object_size);
- unsigned len_in_bh = MIN((off_t)bh->length()*EBOFS_BLOCK_SIZE - off_in_bh,
+ unsigned len_in_bh = MIN((__u64)bh->length()*EBOFS_BLOCK_SIZE - off_in_bh,
left);
assert(len_in_bh <= left);
// zero leader?
if (off_in_bh &&
opos > old_object_size) {
- off_t zstart = MAX(0, old_object_size-(off_t)bh->start()*EBOFS_BLOCK_SIZE);
- off_t zlen = off_in_bh - zstart;
+ __u64 zstart = MAX(0, old_object_size-(__u64)bh->start()*EBOFS_BLOCK_SIZE);
+ __u64 zlen = off_in_bh - zstart;
dout(15) << "apply_write zeroing bh lead over " << zstart << "~" << zlen << dendl;
bh->data.zero(zstart, zlen);
}
// zero the past-eof tail, too, to be tidy.
if (len_in_bh < bh->data.length()) {
- off_t zstart = off_in_bh+len_in_bh;
- off_t zlen = bh->data.length()-(off_in_bh+len_in_bh);
+ __u64 zstart = off_in_bh+len_in_bh;
+ __u64 zlen = bh->data.length()-(off_in_bh+len_in_bh);
bh->data.zero(zstart, zlen);
dout(15) << "apply_write zeroing bh tail over " << zstart << "~" << zlen << dendl;
}
}
-int Ebofs::apply_zero(Onode *on, off_t off, size_t len)
+int Ebofs::apply_zero(Onode *on, __u64 off, size_t len)
{
dout(10) << "apply_zero " << off << "~" << len << " on " << *on << dendl;
// *** file i/o ***
-int Ebofs::attempt_read(Onode *on, off_t off, size_t len, bufferlist& bl,
+int Ebofs::attempt_read(Onode *on, __u64 off, size_t len, bufferlist& bl,
Cond *will_wait_on, bool *will_wait_on_bool)
{
dout(10) << "attempt_read " << *on << " " << off << "~" << len << dendl;
return -EIO;
}
if (on->bad_byte_extents.end() > off) {
- off_t bad = on->bad_byte_extents.start_after(off);
- if (bad < off+(off_t)len) {
+ __u64 bad = on->bad_byte_extents.start_after(off);
+ if (bad < off+(__u64)len) {
len = bad-off;
dout(10) << "attempt_read corrupt (bad byte extent) at " << bad << ", shortening read to " << len << dendl;
}
i != partials.end();
i++) {
BufferHead *bh = i->second;
- off_t bhstart = (off_t)(bh->start()*EBOFS_BLOCK_SIZE);
- off_t bhend = (off_t)(bh->end()*EBOFS_BLOCK_SIZE);
- off_t start = MAX( off, bhstart );
- off_t end = MIN( off+(off_t)len, bhend );
+ __u64 bhstart = (__u64)(bh->start()*EBOFS_BLOCK_SIZE);
+ __u64 bhend = (__u64)(bh->end()*EBOFS_BLOCK_SIZE);
+ __u64 start = MAX( off, bhstart );
+ __u64 end = MIN( off+(__u64)len, bhend );
if (!i->second->have_partial_range(start-bhstart, end-bhstart)) {
// wait on this one
map<block_t,BufferHead*>::iterator p = partials.begin();
bl.clear();
- off_t pos = off;
+ __u64 pos = off;
block_t curblock = bstart;
while (curblock <= blast) {
BufferHead *bh = 0;
p++;
} else assert(0);
- off_t bhstart = (off_t)(bh->start()*EBOFS_BLOCK_SIZE);
- off_t bhend = (off_t)(bh->end()*EBOFS_BLOCK_SIZE);
- off_t start = MAX( pos, bhstart );
- off_t end = MIN( off+(off_t)len, bhend );
+ __u64 bhstart = (__u64)(bh->start()*EBOFS_BLOCK_SIZE);
+ __u64 bhend = (__u64)(bh->end()*EBOFS_BLOCK_SIZE);
+ __u64 start = MAX( pos, bhstart );
+ __u64 end = MIN( off+(__u64)len, bhend );
if (bh->is_corrupt()) {
if (bl.length()) {
* return value of -1 if onode isn't loaded. otherwise, the number
* of extents that need to be read (i.e. # of seeks)
*/
-int Ebofs::is_cached(coll_t cid, pobject_t oid, off_t off, size_t len)
+int Ebofs::is_cached(coll_t cid, pobject_t oid, __u64 off, size_t len)
{
ebofs_lock.Lock();
int r = _is_cached(oid, off, len);
return r;
}
-int Ebofs::_is_cached(pobject_t oid, off_t off, size_t len)
+int Ebofs::_is_cached(pobject_t oid, __u64 off, size_t len)
{
if (!have_onode(oid)) {
dout(7) << "_is_cached " << oid << " " << off << "~" << len << " ... onode " << dendl;
*/
}
-void Ebofs::trim_from_cache(coll_t cid, pobject_t oid, off_t off, size_t len)
+void Ebofs::trim_from_cache(coll_t cid, pobject_t oid, __u64 off, size_t len)
{
ebofs_lock.Lock();
_trim_from_cache(oid, off, len);
ebofs_lock.Unlock();
}
-void Ebofs::_trim_from_cache(pobject_t oid, off_t off, size_t len)
+void Ebofs::_trim_from_cache(pobject_t oid, __u64 off, size_t len)
{
// be careful not to load it if we don't have it
if (!have_onode(oid)) {
int Ebofs::read(coll_t cid, pobject_t oid,
- off_t off, size_t len,
+ __u64 off, size_t len,
bufferlist& bl)
{
ebofs_lock.Lock();
return r;
}
-int Ebofs::_read(pobject_t oid, off_t off, size_t len, bufferlist& bl)
+int Ebofs::_read(pobject_t oid, __u64 off, size_t len, bufferlist& bl)
{
dout(7) << "_read " << oid << " " << off << "~" << len << dendl;
}
size_t try_len = len ? len:on->object_size;
- size_t will_read = MIN(off+(off_t)try_len, on->object_size) - off;
+ size_t will_read = MIN(off+(__u64)try_len, on->object_size) - off;
bool done;
r = attempt_read(on, off, will_read, bl, &cond, &done);
{
pobject_t oid;
t.get_oid(oid);
- off_t offset, len;
+ __u64 offset, len;
t.get_length(offset);
t.get_length(len);
bufferlist *pbl;
{
pobject_t oid;
t.get_oid(oid);
- off_t offset, len;
+ __u64 offset, len;
t.get_length(offset);
t.get_length(len);
bufferlist bl;
{
pobject_t oid;
t.get_oid(oid);
- off_t offset, len;
+ __u64 offset, len;
t.get_length(offset);
t.get_length(len);
if (_zero(oid, offset, len) < 0) {
{
pobject_t oid;
t.get_oid(oid);
- off_t offset, len;
+ __u64 offset, len;
t.get_length(offset);
t.get_length(len);
_trim_from_cache(oid, offset, len);
{
pobject_t oid;
t.get_oid(oid);
- off_t len;
+ __u64 len;
t.get_length(len);
if (_truncate(oid, len) < 0) {
dout(7) << "apply_transaction fail on _truncate" << dendl;
-int Ebofs::_write(pobject_t oid, off_t offset, size_t length, const bufferlist& bl)
+int Ebofs::_write(pobject_t oid, __u64 offset, size_t length, const bufferlist& bl)
{
dout(7) << "_write " << oid << " " << offset << "~" << length << dendl;
assert(bl.length() == length);
return length;
}
-int Ebofs::_zero(pobject_t oid, off_t offset, size_t length)
+int Ebofs::_zero(pobject_t oid, __u64 offset, size_t length)
{
dout(7) << "_zero " << oid << " " << offset << "~" << length << dendl;
if (length > 0 &&
offset < on->object_size) {
- if (offset + (off_t)length >= on->object_size) {
+ if (offset + (__u64)length >= on->object_size) {
_truncate(oid, offset);
} else {
while (1) {
int Ebofs::write(coll_t cid, pobject_t oid,
- off_t off, size_t len,
+ __u64 off, size_t len,
const bufferlist& bl, Context *onsafe)
{
ebofs_lock.Lock();
return r;
}
-int Ebofs::zero(coll_t cid, pobject_t oid, off_t off, size_t len, Context *onsafe)
+int Ebofs::zero(coll_t cid, pobject_t oid, __u64 off, size_t len, Context *onsafe)
{
ebofs_lock.Lock();
return r;
}
-int Ebofs::_truncate(pobject_t oid, off_t size)
+int Ebofs::_truncate(pobject_t oid, __u64 size)
{
dout(7) << "_truncate " << oid << " size " << size << dendl;
}
-int Ebofs::truncate(coll_t cid, pobject_t oid, off_t size, Context *onsafe)
+int Ebofs::truncate(coll_t cid, pobject_t oid, __u64 size, Context *onsafe)
{
ebofs_lock.Lock();
version_t trigger_commit();
void commit_bc_wait(version_t epoch);
- void trim_bc(off_t max = -1);
+ void trim_bc(__u64 max = -1);
public:
void kick_idle();
protected:
- int check_partial_edges(Onode *on, off_t off, off_t len,
+ int check_partial_edges(Onode *on, __u64 off, __u64 len,
bool &partial_head, bool &partial_tail);
void alloc_write(Onode *on,
interval_set<block_t>& alloc,
block_t& old_bfirst, block_t& old_blast,
csum_t& old_csum_first, csum_t& old_csum_last);
- int apply_write(Onode *on, off_t off, off_t len, const bufferlist& bl);
- int apply_zero(Onode *on, off_t off, size_t len);
- int attempt_read(Onode *on, off_t off, size_t len, bufferlist& bl,
+ int apply_write(Onode *on, __u64 off, __u64 len, const bufferlist& bl);
+ int apply_zero(Onode *on, __u64 off, size_t len);
+ int attempt_read(Onode *on, __u64 off, size_t len, bufferlist& bl,
Cond *will_wait_on, bool *will_wait_on_bool);
Finisher finisher;
// object interface
bool exists(coll_t cid, pobject_t);
int stat(coll_t cid, pobject_t, struct stat*);
- int read(coll_t cid, pobject_t, off_t off, size_t len, bufferlist& bl);
- int is_cached(coll_t cid, pobject_t oid, off_t off, size_t len);
+ int read(coll_t cid, pobject_t, __u64 off, size_t len, bufferlist& bl);
+ int is_cached(coll_t cid, pobject_t oid, __u64 off, size_t len);
- int write(coll_t cid, pobject_t oid, off_t off, size_t len, const bufferlist& bl, Context *onsafe);
- int zero(coll_t cid, pobject_t oid, off_t off, size_t len, Context *onsafe);
- int truncate(coll_t cid, pobject_t oid, off_t size, Context *onsafe=0);
+ int write(coll_t cid, pobject_t oid, __u64 off, size_t len, const bufferlist& bl, Context *onsafe);
+ int zero(coll_t cid, pobject_t oid, __u64 off, size_t len, Context *onsafe);
+ int truncate(coll_t cid, pobject_t oid, __u64 size, Context *onsafe=0);
int remove(coll_t cid, pobject_t oid, Context *onsafe=0);
bool write_will_block();
- void trim_from_cache(coll_t cid, pobject_t oid, off_t off, size_t len);
+ void trim_from_cache(coll_t cid, pobject_t oid, __u64 off, size_t len);
int rename(pobject_t from, pobject_t to);
int clone(coll_t cid, pobject_t from, pobject_t to, Context *onsafe);
// private interface -- use if caller already holds lock
unsigned _apply_transaction(Transaction& t);
- int _read(pobject_t oid, off_t off, size_t len, bufferlist& bl);
- int _is_cached(pobject_t oid, off_t off, size_t len);
+ int _read(pobject_t oid, __u64 off, size_t len, bufferlist& bl);
+ int _is_cached(pobject_t oid, __u64 off, size_t len);
int _stat(pobject_t oid, struct stat *st);
int _getattr(pobject_t oid, const char *name, void *value, size_t size);
int _getattrs(pobject_t oid, map<string,bufferptr> &aset);
int _get_object_collections(pobject_t oid, set<coll_t>& ls);
bool _write_will_block();
- int _write(pobject_t oid, off_t off, size_t len, const bufferlist& bl);
- void _trim_from_cache(pobject_t oid, off_t off, size_t len);
- int _truncate(pobject_t oid, off_t size);
- int _zero(pobject_t oid, off_t offset, size_t length);
+ int _write(pobject_t oid, __u64 off, size_t len, const bufferlist& bl);
+ void _trim_from_cache(pobject_t oid, __u64 off, size_t len);
+ int _truncate(pobject_t oid, __u64 size);
+ int _zero(pobject_t oid, __u64 offset, size_t length);
int _remove(pobject_t oid);
int _clone(pobject_t from, pobject_t to);
int _setattr(pobject_t oid, const char *name, const void *value, size_t size);
extent_t onode_loc;
epoch_t last_alloc_epoch; // epoch i last allocated for
- __s64 object_size;
+ __u64 object_size;
__u64 alloc_blocks, last_block;
csum_t data_csum;
bool readonly;
map<string, bufferptr> attr;
map<block_t, ExtentCsum> extent_map;
- interval_set<off_t> bad_byte_extents;
+ interval_set<__u64> bad_byte_extents;
interval_set<block_t> uncommitted;
pobject_t oid(0, 0, object_t(1,2));
- off_t pos = 0;
- off_t sz = 16;
+ __u64 pos = 0;
+ __u64 sz = 16;
bufferlist bl;
bl.append(crap, sz);
char *p = bl.c_str();
- off_t o = 0;
+ __u64 o = 0;
for (int i=0; i<n; i++) {
cout << "write at " << o << std::endl;
for (int j=0;j<l;j++)
utime_t start = g_clock.now();
- for (off_t m=0; m<megs; m++) {
+ for (__u64 m=0; m<megs; m++) {
//if (m%100 == 0)
cout << m << " / " << megs << std::endl;
fs.write(10, bl.length(), 1024LL*1024LL*m, bl, (Context*)0);
if (1) {
srand(0);
for (int i=0; i<10000; i++) {
- off_t off = rand() % 1000000;
+ __u64 off = rand() % 1000000;
size_t len = 1+rand() % 10000;
cout << std::endl << i << " writing bit at " << off << " len " << len << std::endl;
fs.write(10, len, off, bl, (Context*)0);
}
fs.remove(10);
for (int i=0; i<100; i++) {
- off_t off = rand() % 1000000;
+ __u64 off = rand() % 1000000;
size_t len = 1+rand() % 10000;
cout << std::endl << i << " writing bit at " << off << " len " << len << std::endl;
fs.write(10, len, off, bl, (Context*)0);
if (0) {
// sequential write
srand(0);
- off_t off = 0;
+ __u64 off = 0;
for (int i=0; i<10000; i++) {
size_t len = 1024*1024;//1+rand() % 10000;
cout << std::endl << i << " writing bit at " << off << " len " << len << std::endl;
srand(0);
for (int i=0; i<100; i++) {
bufferlist bl;
- off_t off = rand() % 1000000;
+ __u64 off = rand() % 1000000;
size_t len = rand() % 1000;
cout << std::endl << "read bit at " << off << " len " << len << std::endl;
int r = fs.read(10, len, off, bl);
srand(0);
for (int i=0; i<100; i++) {
bufferlist bl;
- off_t off = rand() % 1000000;
+ __u64 off = rand() % 1000000;
size_t len = 100;
cout << std::endl << "read bit at " << off << " len " << len << std::endl;
int r = fs.read(10, len, off, bl);
// write on empty cache
srand(0);
for (int i=0; i<100; i++) {
- off_t off = rand() % 1000000;
+ __u64 off = rand() % 1000000;
size_t len = 100;
cout << std::endl << "writing bit at " << off << " len " << len << std::endl;
fs.write(10, len, off, bl, (Context*)0);
pobject_t oid;
oid.oid.ino = (rand() % 1000) + 0x10000000;
coll_t cid = rand() % 50;
- off_t off = rand() % 10000;//0;//rand() % 1000000;
- off_t len = 1+rand() % 100000;
+ __u64 off = rand() % 10000;//0;//rand() % 1000000;
+ __u64 len = 1+rand() % 100000;
const char *a = "one";
if (rand() % 2) a = "two";
int l = 3;//rand() % 10;
{
cout << t << " write " << hex << oid << dec << " at " << off << " len " << len << std::endl;
char b[len];
- for (int j=0;j<len;j++)
+ for (unsigned j=0;j<len;j++)
b[j] = fingerprint_byte_at(off+j, oid.oid.ino);
bufferlist w;
w.append(b, len);
// disk
typedef uint64_t block_t; // disk location/sector/block
-static const int EBOFS_BLOCK_SIZE = 4096;
-static const int EBOFS_BLOCK_MASK = 4095;
-static const int EBOFS_BLOCK_BITS = 12; // 1<<12 == 4096
+static const unsigned EBOFS_BLOCK_SIZE = 4096;
+static const unsigned EBOFS_BLOCK_MASK = 4095;
+static const unsigned EBOFS_BLOCK_BITS = 12; // 1<<12 == 4096
struct extent_t {
block_t start, length;
return r < 0 ? -errno:r;
}
-int FileStore::truncate(coll_t cid, pobject_t oid, off_t size, Context *onsafe)
+int FileStore::truncate(coll_t cid, pobject_t oid, __u64 size, Context *onsafe)
{
dout(20) << "truncate " << cid << " " << oid << " size " << size << dendl;
}
int FileStore::read(coll_t cid, pobject_t oid,
- off_t offset, size_t len,
+ __u64 offset, size_t len,
bufferlist& bl) {
dout(20) << "read " << cid << " " << oid << " len " << len << " off " << offset << dendl;
}
::flock(fd, LOCK_EX); // lock for safety
- off_t actual = lseek(fd, offset, SEEK_SET);
+ __u64 actual = lseek(fd, offset, SEEK_SET);
size_t got = 0;
if (len == 0) {
int FileStore::write(coll_t cid, pobject_t oid,
- off_t offset, size_t len,
+ __u64 offset, size_t len,
const bufferlist& bl,
Context *onsafe)
{
::flock(fd, LOCK_EX); // lock for safety
// seek
- off_t actual = ::lseek(fd, offset, SEEK_SET);
+ __u64 actual = ::lseek(fd, offset, SEEK_SET);
int did = 0;
assert(actual == offset);
bool exists(coll_t cid, pobject_t oid);
int stat(coll_t cid, pobject_t oid, struct stat *st);
int remove(coll_t cid, pobject_t oid, Context *onsafe);
- int truncate(coll_t cid, pobject_t oid, off_t size, Context *onsafe);
- int read(coll_t cid, pobject_t oid, off_t offset, size_t len, bufferlist& bl);
- int write(coll_t cid, pobject_t oid, off_t offset, size_t len, const bufferlist& bl, Context *onsafe);
+ int truncate(coll_t cid, pobject_t oid, __u64 size, Context *onsafe);
+ int read(coll_t cid, pobject_t oid, __u64 offset, size_t len, bufferlist& bl);
+ int write(coll_t cid, pobject_t oid, __u64 offset, size_t len, const bufferlist& bl, Context *onsafe);
int clone(coll_t cid, pobject_t oldoid, pobject_t newoid);
void sync();
int collection_list(coll_t c, list<pobject_t>& o);
int pick_object_revision_lt(coll_t cid, pobject_t& oid) { return -1; }
- void trim_from_cache(coll_t cid, pobject_t oid, off_t offset, size_t len) {}
- int is_cached(coll_t cid, pobject_t oid, off_t offset, size_t len) { return -1; }
+ void trim_from_cache(coll_t cid, pobject_t oid, __u64 offset, size_t len) {}
+ int is_cached(coll_t cid, pobject_t oid, __u64 offset, size_t len) { return -1; }
};
cid = cids.front();
cids.pop_front();
}
- void get_length(off_t& len) {
+ void get_length(__u64& len) {
len = lengths.front();
lengths.pop_front();
}
}
- void read(coll_t cid, pobject_t oid, off_t off, size_t len, bufferlist *pbl) {
+ void read(coll_t cid, pobject_t oid, __u64 off, size_t len, bufferlist *pbl) {
int op = OP_READ;
ops.push_back(op);
cids.push_back(cid);
pattrsets.push_back(&aset);
}
- void write(coll_t cid, pobject_t oid, off_t off, size_t len, const bufferlist& bl) {
+ void write(coll_t cid, pobject_t oid, __u64 off, size_t len, const bufferlist& bl) {
int op = OP_WRITE;
ops.push_back(op);
cids.push_back(cid);
lengths.push_back(len);
bls.push_back(bl);
}
- void zero(coll_t cid, pobject_t oid, off_t off, size_t len) {
+ void zero(coll_t cid, pobject_t oid, __u64 off, size_t len) {
int op = OP_ZERO;
ops.push_back(op);
cids.push_back(cid);
lengths.push_back(off);
lengths.push_back(len);
}
- void trim_from_cache(coll_t cid, pobject_t oid, off_t off, size_t len) {
+ void trim_from_cache(coll_t cid, pobject_t oid, __u64 off, size_t len) {
int op = OP_TRIMCACHE;
ops.push_back(op);
cids.push_back(cid);
lengths.push_back(off);
lengths.push_back(len);
}
- void truncate(coll_t cid, pobject_t oid, off_t off) {
+ void truncate(coll_t cid, pobject_t oid, __u64 off) {
int op = OP_TRUNCATE;
ops.push_back(op);
cids.push_back(cid);
{
coll_t cid;
pobject_t oid;
- off_t offset, len;
+ __u64 offset, len;
t.get_cid(cid);
t.get_oid(oid);
t.get_length(offset);
t.get_cid(cid);
pobject_t oid;
t.get_oid(oid);
- off_t offset, len;
+ __u64 offset, len;
t.get_length(offset);
t.get_length(len);
bufferlist bl;
t.get_cid(cid);
pobject_t oid;
t.get_oid(oid);
- off_t offset, len;
+ __u64 offset, len;
t.get_length(offset);
t.get_length(len);
zero(cid, oid, offset, len, 0);
t.get_cid(cid);
pobject_t oid;
t.get_oid(oid);
- off_t offset, len;
+ __u64 offset, len;
t.get_length(offset);
t.get_length(len);
trim_from_cache(cid, oid, offset, len);
t.get_cid(cid);
pobject_t oid;
t.get_oid(oid);
- off_t len;
+ __u64 len;
t.get_length(len);
truncate(cid, oid, len, 0);
}
virtual bool exists(coll_t cid, pobject_t oid) = 0; // useful?
virtual int stat(coll_t cid, pobject_t oid, struct stat *st) = 0; // struct stat?
virtual int remove(coll_t cid, pobject_t oid, Context *onsafe=0) = 0;
- virtual int truncate(coll_t cid, pobject_t oid, off_t size, Context *onsafe=0) = 0;
+ virtual int truncate(coll_t cid, pobject_t oid, __u64 size, Context *onsafe=0) = 0;
- virtual int read(coll_t cid, pobject_t oid, off_t offset, size_t len, bufferlist& bl) = 0;
- virtual int write(coll_t cid, pobject_t oid, off_t offset, size_t len, const bufferlist& bl, Context *onsafe) = 0;
- virtual int zero(coll_t cid, pobject_t oid, off_t offset, size_t len, Context *onsafe) {
+ virtual int read(coll_t cid, pobject_t oid, __u64 offset, size_t len, bufferlist& bl) = 0;
+ virtual int write(coll_t cid, pobject_t oid, __u64 offset, size_t len, const bufferlist& bl, Context *onsafe) = 0;
+ virtual int zero(coll_t cid, pobject_t oid, __u64 offset, size_t len, Context *onsafe) {
// write zeros.. yuck!
bufferptr bp(len);
bufferlist bl;
bl.push_back(bp);
return write(cid, oid, offset, len, bl, onsafe);
}
- virtual void trim_from_cache(coll_t cid, pobject_t oid, off_t offset, size_t len) = 0; //{ }
- virtual int is_cached(coll_t cid, pobject_t oid, off_t offset, size_t len) = 0; //{ return -1; }
+ virtual void trim_from_cache(coll_t cid, pobject_t oid, __u64 offset, size_t len) = 0; //{ }
+ virtual int is_cached(coll_t cid, pobject_t oid, __u64 offset, size_t len) = 0; //{ return -1; }
virtual int setattr(coll_t cid, pobject_t oid, const char *name,
const void *value, size_t size,