OPTION(osd_bench_max_block_size, OPT_U64, 64 << 20) // cap the block size at 64MB
OPTION(osd_bench_duration, OPT_U32, 30) // duration of 'osd bench', capped at 30s to avoid triggering timeouts
+OPTION(memstore_device_bytes, OPT_U32, 1024*1024*1024)
+
OPTION(filestore_omap_backend, OPT_STR, "leveldb")
OPTION(filestore_debug_disable_sharded_check, OPT_BOOL, false)
bufferlist::iterator p = cbl.begin();
c->decode(p);
coll_map[*q] = c;
+ used_bytes += c->used_bytes();
}
fn = path + "/sharded";
int MemStore::statfs(struct statfs *st)
{
dout(10) << __func__ << dendl;
- // make some shit up. these are the only fields that matter.
st->f_bsize = 1024;
- st->f_blocks = 1000000;
- st->f_bfree = 1000000;
- st->f_bavail = 1000000;
+
+ // Device size is a configured constant
+ st->f_blocks = g_conf->memstore_device_bytes / st->f_bsize;
+
+ dout(10) << __func__ << ": used_bytes: " << used_bytes << "/" << g_conf->memstore_device_bytes << dendl;
+ st->f_bfree = st->f_bavail = MAX((st->f_blocks - used_bytes / st->f_bsize), 0);
+
return 0;
}
c->object_hash[oid] = o;
}
+ int old_size = o->data.length();
_write_into_bl(bl, offset, &o->data);
+ used_bytes += (o->data.length() - old_size);
+
return 0;
}
if (o->data.length() > size) {
bufferlist bl;
bl.substr_of(o->data, 0, size);
+ used_bytes -= o->data.length() - size;
o->data.claim(bl);
} else if (o->data.length() == size) {
// do nothing
} else {
bufferptr bp(size - o->data.length());
bp.zero();
+ used_bytes += bp.length();
o->data.append(bp);
}
return 0;
return -ENOENT;
c->object_map.erase(oid);
c->object_hash.erase(oid);
+
+ used_bytes -= o->data.length();
+
return 0;
}
c->object_map[newoid] = no;
c->object_hash[newoid] = no;
}
+ used_bytes += oo->data.length() - no->data.length();
no->data = oo->data;
no->omap_header = oo->omap_header;
no->omap = oo->omap;
len = oo->data.length() - srcoff;
bufferlist bl;
bl.substr_of(oo->data, srcoff, len);
+
+ int old_size = no->data.length();
_write_into_bl(bl, dstoff, &no->data);
+ used_bytes += (no->data.length() - old_size);
+
return len;
}
if (!cp->second->object_map.empty())
return -ENOTEMPTY;
}
+ used_bytes -= cp->second->used_bytes();
coll_map.erase(cp);
return 0;
}
DECODE_FINISH(p);
}
+ uint64_t used_bytes() const {
+ uint64_t result = 0;
+ for (map<ghobject_t, ObjectRef>::const_iterator p = object_map.begin();
+ p != object_map.end();
+ ++p) {
+ result += p->second->data.length();
+ }
+
+ return result;
+ }
+
Collection() : lock("MemStore::Collection::lock") {}
};
typedef ceph::shared_ptr<Collection> CollectionRef;
Finisher finisher;
+ uint64_t used_bytes;
+
void _do_transaction(Transaction& t);
void _write_into_bl(const bufferlist& src, unsigned offset, bufferlist *dst);
coll_lock("MemStore::coll_lock"),
apply_lock("MemStore::apply_lock"),
finisher(cct),
- sharded(false) { }
+ used_bytes(0),
+ sharded(false) {}
~MemStore() { }
bool need_journal() { return false; };