ostream& ObjBencher::out(ostream& os)
{
- utime_t cur_time = ceph_clock_now(g_ceph_context);
+ utime_t cur_time = ceph_clock_now(cct);
return out(os, cur_time);
}
ONE_SECOND.set_from_double(1.0);
bencher->lock.Lock();
while(!data.done) {
- utime_t cur_time = ceph_clock_now(g_ceph_context);
+ utime_t cur_time = ceph_clock_now(bencher->cct);
if (i % 20 == 0) {
if (i > 0)
}
++i;
++cycleSinceChange;
- cond.WaitInterval(g_ceph_context, bencher->lock, ONE_SECOND);
+ cond.WaitInterval(bencher->cct, bencher->lock, ONE_SECOND);
}
bencher->lock.Unlock();
return NULL;
pthread_create(&print_thread, NULL, ObjBencher::status_printer, (void *)this);
lock.Lock();
- data.start_time = ceph_clock_now(g_ceph_context);
+ data.start_time = ceph_clock_now(cct);
lock.Unlock();
for (int i = 0; i<concurrentios; ++i) {
- start_times[i] = ceph_clock_now(g_ceph_context);
+ start_times[i] = ceph_clock_now(cct);
r = create_completion(i, _aio_cb, (void *)&lc);
if (r < 0)
goto ERR;
stopTime = data.start_time + runtime;
slot = 0;
lock.Lock();
- while( ceph_clock_now(g_ceph_context) < stopTime &&
+ while( ceph_clock_now(cct) < stopTime &&
(!maxObjectsToCreate || data.started < maxObjectsToCreate)) {
bool found = false;
while (1) {
lock.Unlock();
goto ERR;
}
- data.cur_latency = ceph_clock_now(g_ceph_context) - start_times[slot];
+ data.cur_latency = ceph_clock_now(cct) - start_times[slot];
data.history.latency.push_back(data.cur_latency);
total_latency += data.cur_latency;
if( data.cur_latency > data.max_latency) data.max_latency = data.cur_latency;
--data.in_flight;
lock.Unlock();
release_completion(slot);
- timePassed = ceph_clock_now(g_ceph_context) - data.start_time;
+ timePassed = ceph_clock_now(cct) - data.start_time;
//write new stuff to backend, then delete old stuff
//and save locations of new stuff for later deletion
- start_times[slot] = ceph_clock_now(g_ceph_context);
+ start_times[slot] = ceph_clock_now(cct);
r = create_completion(slot, _aio_cb, &lc);
if (r < 0)
goto ERR;
lock.Unlock();
goto ERR;
}
- data.cur_latency = ceph_clock_now(g_ceph_context) - start_times[slot];
+ data.cur_latency = ceph_clock_now(cct) - start_times[slot];
data.history.latency.push_back(data.cur_latency);
total_latency += data.cur_latency;
if (data.cur_latency > data.max_latency) data.max_latency = data.cur_latency;
delete contents[slot];
}
- timePassed = ceph_clock_now(g_ceph_context) - data.start_time;
+ timePassed = ceph_clock_now(cct) - data.start_time;
lock.Lock();
data.done = true;
lock.Unlock();
lock.Lock();
data.finished = 0;
- data.start_time = ceph_clock_now(g_ceph_context);
+ data.start_time = ceph_clock_now(cct);
lock.Unlock();
pthread_t print_thread;
//start initial reads
for (int i = 0; i < concurrentios; ++i) {
index[i] = i;
- start_times[i] = ceph_clock_now(g_ceph_context);
+ start_times[i] = ceph_clock_now(cct);
create_completion(i, _aio_cb, (void *)&lc);
r = aio_read(name[i], i, contents[i], data.object_size);
if (r < 0) { //naughty, doesn't clean up heap -- oh, or handle the print thread!
bufferlist *cur_contents;
slot = 0;
- while (seconds_to_run && (ceph_clock_now(g_ceph_context) < finish_time) &&
+ while (seconds_to_run && (ceph_clock_now(cct) < finish_time) &&
num_objects > data.started) {
lock.Lock();
int old_slot = slot;
lock.Unlock();
goto ERR;
}
- data.cur_latency = ceph_clock_now(g_ceph_context) - start_times[slot];
+ data.cur_latency = ceph_clock_now(cct) - start_times[slot];
total_latency += data.cur_latency;
if( data.cur_latency > data.max_latency) data.max_latency = data.cur_latency;
if (data.cur_latency < data.min_latency) data.min_latency = data.cur_latency;
cur_contents = contents[slot];
//start new read and check data if requested
- start_times[slot] = ceph_clock_now(g_ceph_context);
+ start_times[slot] = ceph_clock_now(cct);
contents[slot] = new bufferlist();
create_completion(slot, _aio_cb, (void *)&lc);
r = aio_read(newName, slot, contents[slot], data.object_size);
lock.Unlock();
goto ERR;
}
- data.cur_latency = ceph_clock_now(g_ceph_context) - start_times[slot];
+ data.cur_latency = ceph_clock_now(cct) - start_times[slot];
total_latency += data.cur_latency;
if (data.cur_latency > data.max_latency) data.max_latency = data.cur_latency;
if (data.cur_latency < data.min_latency) data.min_latency = data.cur_latency;
delete contents[slot];
}
- runtime = ceph_clock_now(g_ceph_context) - data.start_time;
+ runtime = ceph_clock_now(cct) - data.start_time;
lock.Lock();
data.done = true;
lock.Unlock();
}
public:
- RadosBencher(librados::Rados& _r, librados::IoCtx& _i) : completions(NULL), rados(_r), io_ctx(_i), iterator_valid(false) {}
+ RadosBencher(CephContext *cct_, librados::Rados& _r, librados::IoCtx& _i)
+ : ObjBencher(cct), completions(NULL), rados(_r), io_ctx(_i), iterator_valid(false) {}
~RadosBencher() { }
};
operation = OP_RAND_READ;
else
usage_exit();
- RadosBencher bencher(rados, io_ctx);
+ RadosBencher bencher(g_ceph_context, rados, io_ctx);
bencher.set_show_time(show_time);
ret = bencher.aio_bench(operation, seconds, num_objs,
concurrent_ios, op_size, cleanup);
if (!pool_name || nargs.size() < 2)
usage_exit();
const char *prefix = nargs[1];
- RadosBencher bencher(rados, io_ctx);
+ RadosBencher bencher(g_ceph_context, rados, io_ctx);
ret = bencher.clean_up(prefix, concurrent_ios);
if (ret != 0)
cerr << "error during cleanup: " << ret << std::endl;
} req_wq;
public:
- RESTDispatcher(CephContext *cct, int num_threads)
- : m_tp(cct, "RESTDispatcher::m_tp", num_threads),
- req_wq(this, g_conf->rgw_op_thread_timeout,
- g_conf->rgw_op_thread_suicide_timeout, &m_tp) {
+ CephContext *cct;
+ RESTDispatcher(CephContext *cct_, int num_threads)
+ : m_tp(cct_, "RESTDispatcher::m_tp", num_threads),
+ req_wq(this, cct_->_conf->rgw_op_thread_timeout,
+ cct_->_conf->rgw_op_thread_suicide_timeout, &m_tp),
+ cct(cct_) {
response_handler.propertiesCallback = properties_callback;
public:
RESTBencher(RESTDispatcher *_dispatcher) :
+ ObjBencher(_dispatcher->cct),
dispatcher(_dispatcher),
completions(NULL),
list_start(NULL),