unsigned in_flight;
map<ghobject_t, Object> contents;
set<ghobject_t> available_objects;
+ set<ghobject_t>::iterator next_available_object;
set<ghobject_t> in_flight_objects;
ObjectGenerator *object_gen;
gen_type *rng;
unsigned max_write,
unsigned alignment)
: cid(cid), write_alignment(alignment), max_object_len(max_size),
- max_write_len(max_write), in_flight(0), object_gen(gen),
- rng(rng), store(store) {}
+ max_write_len(max_write), in_flight(0),
+ next_available_object(available_objects.end()),
+ object_gen(gen), rng(rng), store(store) {}
int init() {
ObjectStore::Transaction t;
return queue_transaction(store, ch, std::move(t));
}
void shutdown() {
+ ghobject_t next;
while (1) {
vector<ghobject_t> objects;
- int r = store->collection_list(ch, ghobject_t(), ghobject_t::get_max(),
- 10, &objects, 0);
+ int r = store->collection_list(ch, next, ghobject_t::get_max(),
+ 10, &objects, &next);
ceph_assert(r >= 0);
- if (objects.empty())
- break;
+ if (objects.size() == 0)
+ break;
ObjectStore::Transaction t;
+ std::map<std::string, ceph::buffer::list> attrset;
for (vector<ghobject_t>::iterator p = objects.begin();
- p != objects.end(); ++p) {
- t.remove(cid, *p);
+ p != objects.end(); ++p) {
+ t.remove(cid, *p);
}
queue_transaction(store, ch, std::move(t));
}
return ret;
}
+ ghobject_t get_next_object(std::unique_lock<ceph::mutex>& locker) {
+ cond.wait(locker, [this] {
+ return in_flight < max_in_flight && !available_objects.empty();
+ });
+
+ if (next_available_object == available_objects.end()) {
+ next_available_object = available_objects.begin();
+ }
+
+ ghobject_t ret = *next_available_object;
+ ++next_available_object;
+ return ret;
+ }
+
void wait_for_ready(std::unique_lock<ceph::mutex>& locker) {
cond.wait(locker, [this] { return in_flight < max_in_flight; });
}
return status;
}
+ int set_fixed_attrs(size_t entries, size_t key_size, size_t val_size) {
+ std::unique_lock locker{ lock };
+ EnterExit ee("setattrs");
+ if (!can_unlink())
+ return -ENOENT;
+ wait_for_ready(locker);
+
+ ghobject_t obj = get_next_object(locker);
+ available_objects.erase(obj);
+ ObjectStore::Transaction t;
+
+ map<string, bufferlist> attrs;
+ set<string> keys;
+
+ while (entries--) {
+ bufferlist name, value;
+ filled_byte_array(value, val_size);
+ filled_byte_array(name, key_size);
+ attrs[name.c_str()] = value;
+ contents[obj].attrs[name.c_str()] = value;
+ }
+ t.setattrs(cid, obj, attrs);
+ ++in_flight;
+ in_flight_objects.insert(obj);
+ t.register_on_applied(new C_SyntheticOnReadable(this, obj));
+ int status = store->queue_transaction(ch, std::move(t));
+ return status;
+ }
+
void getattrs() {
EnterExit ee("getattrs");
ghobject_t obj;
gen_type rng(time(NULL));
coll_t cid(spg_t(pg_t(0, 447), shard_id_t::NO_SHARD));
- SyntheticWorkloadState test_obj(store, &gen, &rng, cid, 40 * 1024, 4 * 1024, 0);
+ SyntheticWorkloadState test_obj(store, &gen, &rng, cid, 0, 0, 0);
test_obj.init();
- for (int i = 0; i < 1500; ++i) {
+ size_t object_count = 256;
+ for (size_t i = 0; i < object_count; ++i) {
if (!(i % 10)) cerr << "seeding object " << i << std::endl;
test_obj.touch();
}
- for (int i = 0; i < 10000; ++i) {
+ for (size_t i = 0; i < object_count; ++i) {
if (!(i % 100)) {
cerr << "Op " << i << std::endl;
test_obj.print_internal_state();
}
- boost::uniform_int<> true_false(0, 99);
- test_obj.setattrs();
+ test_obj.set_fixed_attrs(1024, 64, 4096); // 1024 attributes, 64 bytes name and 4K value
}
test_obj.wait_for_done();
+ std::cout << "done" << std::endl;
AdminSocket* admin_socket = g_ceph_context->get_admin_socket();
ceph_assert(admin_socket);
const PerfCounters* logger = bstore->get_bluefs_perf_counters();
//experimentally it was discovered that this case results in 400+MB spillover
//using lower 300MB threshold just to be safe enough
- ASSERT_GE(logger->get(l_bluefs_slow_used_bytes), 300 * 1024 * 1024);
+ std::cout << "db_used:" << logger->get(l_bluefs_db_used_bytes) << std::endl;
+ std::cout << "slow_used:" << logger->get(l_bluefs_slow_used_bytes) << std::endl;
+ ASSERT_GE(logger->get(l_bluefs_slow_used_bytes), 16 * 1024 * 1024);
}
);