roles:
-- [mon.a, mon.b, mon.c, osd.0, osd.1]
+- [mon.a, mon.c, osd.0]
+- [mon.b, osd.1]
openstack:
- volumes: # attached to each instance
count: 2
-overrides:
- ceph:
- conf:
- mon.b:
- clock offset: 10
tasks:
- install:
+- exec:
+ mon.b:
+ - date -u -s @$(expr $(date -u +%s) + 10)
- ceph:
wait-for-healthy: false
log-whitelist:
r = _set_secret(t, s);
if (r < 0)
return r;
- created = ceph_clock_now(cct);
+ created = ceph_clock_now();
return r;
}
const RotatingSecrets& secrets = iter->second;
// second to oldest, unless it's expired
- map<uint64_t, ExpiringCryptoKey>::const_iterator riter =
+ map<uint64_t, ExpiringCryptoKey>::const_iterator riter =
secrets.secrets.begin();
if (secrets.secrets.size() > 1)
++riter;
- if (riter->second.expiration < ceph_clock_now(cct))
+ if (riter->second.expiration < ceph_clock_now())
++riter; // "current" key has expired, use "next" key instead
secret_id = riter->first;
{
RotatingSecrets& r = data.rotating_secrets[service_id];
int added = 0;
- utime_t now = ceph_clock_now(cct);
+ utime_t now = ceph_clock_now();
double ttl = service_id == CEPH_ENTITY_TYPE_AUTH ? cct->_conf->auth_mon_ticket_ttl : cct->_conf->auth_service_ticket_ttl;
while (r.need_new_secrets(now)) {
if (crypto->create(bp) < 0)
return false;
- secret.set_secret(CEPH_CRYPTO_AES, bp, ceph_clock_now(NULL));
+ secret.set_secret(CEPH_CRYPTO_AES, bp, ceph_clock_now());
return true;
}
{
info.service_id = service_id;
info.ticket = auth_ticket_info.ticket;
- info.ticket.init_timestamps(ceph_clock_now(cct), cct->_conf->auth_service_ticket_ttl);
+ info.ticket.init_timestamps(ceph_clock_now(), cct->_conf->auth_service_ticket_ttl);
generate_secret(info.session_key);
<< " validity=" << msg_a.validity << dendl;
session_key = msg_a.session_key;
if (!msg_a.validity.is_zero()) {
- expires = ceph_clock_now(cct);
+ expires = ceph_clock_now();
expires += msg_a.validity;
renew_after = expires;
renew_after -= ((double)msg_a.validity.sec() / 4);
bool CephXTicketHandler::have_key()
{
if (have_key_flag) {
- have_key_flag = ceph_clock_now(cct) < expires;
+ have_key_flag = ceph_clock_now() < expires;
}
return have_key_flag;
bool CephXTicketHandler::need_key() const
{
if (have_key_flag) {
- return (!expires.is_zero()) && (ceph_clock_now(cct) >= renew_after);
+ return (!expires.is_zero()) && (ceph_clock_now() >= renew_after);
}
return true;
should_enc_ticket = true;
}
- info.ticket.init_timestamps(ceph_clock_now(cct), cct->_conf->auth_mon_ticket_ttl);
+ info.ticket.init_timestamps(ceph_clock_now(), cct->_conf->auth_mon_ticket_ttl);
info.ticket.name = entity_name;
info.ticket.global_id = global_id;
info.ticket.auid = eauth.auid;
request->set_tid(tid);
// and timestamp
- request->op_stamp = ceph_clock_now(NULL);
+ request->op_stamp = ceph_clock_now();
// make note
mds_requests[tid] = request->get();
pdirbl->claim(reply->get_extra_bl());
// -- log times --
- utime_t lat = ceph_clock_now(cct);
+ utime_t lat = ceph_clock_now();
lat -= request->sent_stamp;
ldout(cct, 20) << "lat " << lat << dendl;
logger->tinc(l_c_lat, lat);
}
if (request->mds == -1) {
- request->sent_stamp = ceph_clock_now(cct);
+ request->sent_stamp = ceph_clock_now();
ldout(cct, 20) << "send_request set sent_stamp to " << request->sent_stamp << dendl;
}
request->mds = mds;
void Client::cap_delay_requeue(Inode *in)
{
ldout(cct, 10) << "cap_delay_requeue on " << *in << dendl;
- in->hold_caps_until = ceph_clock_now(cct);
+ in->hold_caps_until = ceph_clock_now();
in->hold_caps_until += cct->_conf->client_caps_release_delay;
delayed_caps.push_back(&in->cap_item);
}
else
cap_delay_requeue(in);
- utime_t now = ceph_clock_now(cct);
+ utime_t now = ceph_clock_now();
map<mds_rank_t, Cap*>::iterator it = in->caps.begin();
while (it != in->caps.end()) {
lru.lru_set_max(0);
trim_cache();
- while (lru.lru_get_size() > 0 ||
+ while (lru.lru_get_size() > 0 ||
!inode_map.empty()) {
- ldout(cct, 2) << "cache still has " << lru.lru_get_size()
- << "+" << inode_map.size() << " items"
+ ldout(cct, 2) << "cache still has " << lru.lru_get_size()
+ << "+" << inode_map.size() << " items"
<< ", waiting (for caps to release?)"
<< dendl;
- utime_t until = ceph_clock_now(cct) + utime_t(5, 0);
+ utime_t until = ceph_clock_now() + utime_t(5, 0);
int r = mount_cond.WaitUntil(client_lock, until);
if (r == ETIMEDOUT) {
dump_cache(NULL);
tick_event = new C_C_Tick(this);
timer.add_event_after(cct->_conf->client_tick_interval, tick_event);
- utime_t now = ceph_clock_now(cct);
+ utime_t now = ceph_clock_now();
if (!mounted && !mds_requests.empty()) {
MetaRequest *req = mds_requests.begin()->second;
void Client::renew_caps()
{
ldout(cct, 10) << "renew_caps()" << dendl;
- last_cap_renew = ceph_clock_now(cct);
-
+ last_cap_renew = ceph_clock_now();
+
for (map<mds_rank_t,MetaSession*>::iterator p = mds_sessions.begin();
p != mds_sessions.end();
++p) {
void Client::renew_caps(MetaSession *session)
{
ldout(cct, 10) << "renew_caps mds." << session->mds_num << dendl;
- session->last_cap_renew_request = ceph_clock_now(cct);
+ session->last_cap_renew_request = ceph_clock_now();
uint64_t seq = ++session->cap_renew_seq;
session->con->send_message(new MClientSession(CEPH_SESSION_REQUEST_RENEWCAPS, seq));
}
if (!dn->inode || dn->inode->caps_issued_mask(mask)) {
// is dn lease valid?
- utime_t now = ceph_clock_now(cct);
+ utime_t now = ceph_clock_now();
if (dn->lease_mds >= 0 &&
dn->lease_ttl > now &&
mds_sessions.count(dn->lease_mds)) {
Dentry *dn = dir->dir->dentries[name];
// is dn lease valid?
- utime_t now = ceph_clock_now(cct);
+ utime_t now = ceph_clock_now();
if (dn->inode &&
- dn->lease_mds >= 0 &&
+ dn->lease_mds >= 0 &&
dn->lease_ttl > now &&
mds_sessions.count(dn->lease_mds)) {
MetaSession *s = mds_sessions[dn->lease_mds];
if (!mask) {
// caller just needs us to bump the ctime
- in->ctime = ceph_clock_now(cct);
+ in->ctime = ceph_clock_now();
in->cap_dirtier_uid = perms.uid();
in->cap_dirtier_gid = perms.gid();
if (issued & CEPH_CAP_AUTH_EXCL)
mask &= ~CEPH_SETATTR_KILL_SGUID;
if (mask & CEPH_SETATTR_UID) {
- in->ctime = ceph_clock_now(cct);
+ in->ctime = ceph_clock_now();
in->cap_dirtier_uid = perms.uid();
in->cap_dirtier_gid = perms.gid();
in->uid = stx->stx_uid;
ldout(cct,10) << "changing uid to " << stx->stx_uid << dendl;
}
if (mask & CEPH_SETATTR_GID) {
- in->ctime = ceph_clock_now(cct);
+ in->ctime = ceph_clock_now();
in->cap_dirtier_uid = perms.uid();
in->cap_dirtier_gid = perms.gid();
in->gid = stx->stx_gid;
}
if (mask & CEPH_SETATTR_MODE) {
- in->ctime = ceph_clock_now(cct);
+ in->ctime = ceph_clock_now();
in->cap_dirtier_uid = perms.uid();
in->cap_dirtier_gid = perms.gid();
in->mode = (in->mode & ~07777) | (stx->stx_mode & 07777);
}
if (mask & CEPH_SETATTR_BTIME) {
- in->ctime = ceph_clock_now(cct);
+ in->ctime = ceph_clock_now();
in->cap_dirtier_uid = perms.uid();
in->cap_dirtier_gid = perms.gid();
in->btime = utime_t(stx->stx_btime);
in->mtime = utime_t(stx->stx_mtime);
if (mask & CEPH_SETATTR_ATIME)
in->atime = utime_t(stx->stx_atime);
- in->ctime = ceph_clock_now(cct);
+ in->ctime = ceph_clock_now();
in->cap_dirtier_uid = perms.uid();
in->cap_dirtier_gid = perms.gid();
in->time_warp_seq++;
OSDMap::file_to_object_locator(in->layout),
create_ops,
in->snaprealm->get_snap_context(),
- ceph::real_clock::now(cct),
+ ceph::real_clock::now(),
0,
NULL,
NULL);
OSDMap::file_to_object_locator(in->layout),
uninline_ops,
in->snaprealm->get_snap_context(),
- ceph::real_clock::now(cct),
+ ceph::real_clock::now(),
0,
NULL,
onfinish);
ldout(cct, 10) << "cur file size is " << in->size << dendl;
// time it.
- utime_t start = ceph_clock_now(cct);
+ utime_t start = ceph_clock_now();
if (in->inline_version == 0) {
int r = _getattr(in, CEPH_STAT_CAP_INLINE_DATA, f->actor_perms, true);
// async, caching, non-blocking.
r = objectcacher->file_write(&in->oset, &in->layout,
in->snaprealm->get_snap_context(),
- offset, size, bl, ceph::real_clock::now(cct),
+ offset, size, bl, ceph::real_clock::now(),
0);
put_cap_ref(in, CEPH_CAP_FILE_BUFFER);
get_cap_ref(in, CEPH_CAP_FILE_BUFFER); // released by onsafe callback
filer->write_trunc(in->ino, &in->layout, in->snaprealm->get_snap_context(),
- offset, size, bl, ceph::real_clock::now(cct), 0,
+ offset, size, bl, ceph::real_clock::now(), 0,
in->truncate_size, in->truncate_seq,
onfinish, new C_OnFinisher(onsafe, &objecter_finisher));
client_lock.Unlock();
// if we get here, write was successful, update client metadata
success:
// time
- lat = ceph_clock_now(cct);
+ lat = ceph_clock_now();
lat -= start;
logger->tinc(l_c_wrlat, lat);
}
// mtime
- in->mtime = ceph_clock_now(cct);
+ in->mtime = ceph_clock_now();
in->change_attr++;
mark_caps_dirty(in, CEPH_CAP_FILE_WR);
length,
fakesnap,
bl,
- ceph::real_clock::now(cct),
+ ceph::real_clock::now(),
0,
onack,
onsafe);
in->inline_data = bl;
in->inline_version++;
}
- in->mtime = ceph_clock_now(cct);
+ in->mtime = ceph_clock_now();
in->change_attr++;
mark_caps_dirty(in, CEPH_CAP_FILE_WR);
} else {
filer->zero(in->ino, &in->layout,
in->snaprealm->get_snap_context(),
offset, length,
- ceph::real_clock::now(cct),
+ ceph::real_clock::now(),
0, true, onfinish,
new C_OnFinisher(onsafe, &objecter_finisher));
- in->mtime = ceph_clock_now(cct);
+ in->mtime = ceph_clock_now();
in->change_attr++;
mark_caps_dirty(in, CEPH_CAP_FILE_WR);
uint64_t size = offset + length;
if (size > in->size) {
in->size = size;
- in->mtime = ceph_clock_now(cct);
+ in->mtime = ceph_clock_now();
in->change_attr++;
mark_caps_dirty(in, CEPH_CAP_FILE_WR);
return NULL;
Inode *cur = in;
- utime_t now = ceph_clock_now(cct);
+ utime_t now = ceph_clock_now();
while (cur) {
if (cur != in && cur->quota.is_enable())
break;
}
- now = ceph_clock_now(cct);
+ now = ceph_clock_now();
if (cur == in)
cur = parent_ref.get();
else
rd_op.stat(NULL, (ceph::real_time*)nullptr, NULL);
objecter->mutate(oid, OSDMap::file_to_object_locator(in->layout), rd_op,
- nullsnapc, ceph::real_clock::now(cct), 0, &rd_cond, NULL);
+ nullsnapc, ceph::real_clock::now(), 0, &rd_cond, NULL);
C_SaferCond wr_cond;
ObjectOperation wr_op;
wr_op.create(true);
objecter->mutate(oid, OSDMap::file_to_object_locator(in->layout), wr_op,
- nullsnapc, ceph::real_clock::now(cct), 0, &wr_cond, NULL);
+ nullsnapc, ceph::real_clock::now(), 0, &wr_cond, NULL);
client_lock.Unlock();
int rd_ret = rd_cond.wait();
<< "cap expire " << cap->session->cap_ttl << std::endl
<< "cur time " << ceph_clock_now(cct) << std::endl;*/
if ((cap->session->cap_gen <= cap->gen)
- && (ceph_clock_now(client->cct) < cap->session->cap_ttl)) {
+ && (ceph_clock_now() < cap->session->cap_ttl)) {
return true;
}
return false;
this->iargs = syn_iargs;
this->sargs = syn_sargs;
- run_start = ceph_clock_now(client->cct);
+ run_start = ceph_clock_now();
}
iargs.pop_front();
if (iarg1 && run_me()) {
dout(2) << "sleepuntil " << iarg1 << dendl;
- utime_t at = ceph_clock_now(client->cct) - run_start;
- if (at.sec() < iarg1)
+ utime_t at = ceph_clock_now() - run_start;
+ if (at.sec() < iarg1)
sleep(iarg1 - at.sec());
}
did_run_me();
if (iarg1 == 0) iarg1 = 1; // play trace at least once!
for (int i=0; i<iarg1; i++) {
- utime_t start = ceph_clock_now(client->cct);
+ utime_t start = ceph_clock_now();
if (time_to_stop()) break;
play_trace(t, prefix, !playdata);
if (time_to_stop()) break;
if (iarg1 > 1) clean_dir(prefix); // clean only if repeat
- utime_t lat = ceph_clock_now(client->cct);
+ utime_t lat = ceph_clock_now();
lat -= start;
dout(0) << " trace " << tfile << " loop " << (i+1) << "/" << iarg1 << " done in " << (double)lat << " seconds" << dendl;
char buf[1024];
char buf2[1024];
- utime_t start = ceph_clock_now(client->cct);
+ utime_t start = ceph_clock_now();
ceph::unordered_map<int64_t, int64_t> open_files;
ceph::unordered_map<int64_t, dir_result_t*> open_dirs;
bl.push_back(bp);
SnapContext snapc;
client->objecter->write(oid, oloc, off, len, snapc, bl,
- ceph::real_clock::now(client->cct), 0,
+ ceph::real_clock::now(), 0,
new C_SafeCond(&lock, &cond, &ack),
safeg.new_sub());
safeg.activate();
lock.Lock();
SnapContext snapc;
client->objecter->zero(oid, oloc, off, len, snapc,
- ceph::real_clock::now(client->cct), 0,
+ ceph::real_clock::now(), 0,
new C_SafeCond(&lock, &cond, &ack),
safeg.new_sub());
safeg.activate();
list<string> contents;
UserPerm perms = client->pick_my_perms();
- utime_t s = ceph_clock_now(client->cct);
+ utime_t s = ceph_clock_now();
int r = client->getdir(basedir, contents, perms);
- utime_t e = ceph_clock_now(client->cct);
+ utime_t e = ceph_clock_now();
e -= s;
if (r < 0) {
dout(0) << "getdir couldn't readdir " << basedir << ", stopping" << dendl;
for (int i=0; i<files; i++) {
snprintf(d, sizeof(d), "%s/file.%d", basedir, i);
- utime_t s = ceph_clock_now(client->cct);
+ utime_t s = ceph_clock_now();
if (client->lstat(d, &st, perms) < 0) {
dout(2) << "read_dirs failed stat on " << d << ", stopping" << dendl;
return -1;
}
- utime_t e = ceph_clock_now(client->cct);
+ utime_t e = ceph_clock_now();
e -= s;
}
// files
struct stat st;
- utime_t start = ceph_clock_now(client->cct);
+ utime_t start = ceph_clock_now();
for (int c=0; c<count; c++) {
for (int n=0; n<num; n++) {
snprintf(d, sizeof(d), "dir.%d.run%d/file.client%d.%d", priv ? whoami:0, c, whoami, n);
if (time_to_stop()) return 0;
}
}
- utime_t end = ceph_clock_now(client->cct);
+ utime_t end = ceph_clock_now();
end -= start;
dout(0) << "makefiles time is " << end << " or " << ((double)end / (double)num) <<" per file" << dendl;
-
+
return 0;
}
client->mkdir("orig", 0755, perms);
client->mkdir("copy", 0755, perms);
- utime_t start = ceph_clock_now(client->cct);
+ utime_t start = ceph_clock_now();
for (int i=0; i<num; i++) {
snprintf(d, sizeof(d), "orig/file.%d", i);
client->mknod(d, 0755, perms);
}
- utime_t end = ceph_clock_now(client->cct);
+ utime_t end = ceph_clock_now();
end -= start;
dout(0) << "orig " << end << dendl;
// link
- start = ceph_clock_now(client->cct);
+ start = ceph_clock_now();
for (int i=0; i<num; i++) {
snprintf(d, sizeof(d), "orig/file.%d", i);
snprintf(e, sizeof(e), "copy/file.%d", i);
client->link(d, e, perms);
}
- end = ceph_clock_now(client->cct);
+ end = ceph_clock_now();
end -= start;
dout(0) << "copy " << end << dendl;
delete[] buf;
return fd;
}
-
- utime_t from = ceph_clock_now(client->cct);
+
+ utime_t from = ceph_clock_now();
utime_t start = from;
uint64_t bytes = 0, total = 0;
bytes += wrsize;
total += wrsize;
- utime_t now = ceph_clock_now(client->cct);
+ utime_t now = ceph_clock_now();
if (now - from >= 1.0) {
double el = now - from;
dout(0) << "write " << (bytes / el / 1048576.0) << " MB/sec" << dendl;
}
client->fsync(fd, true);
-
- utime_t stop = ceph_clock_now(client->cct);
+
+ utime_t stop = ceph_clock_now();
double el = stop - start;
dout(0) << "write total " << (total / el / 1048576.0) << " MB/sec ("
<< total << " bytes in " << el << " seconds)" << dendl;
return fd;
}
- utime_t from = ceph_clock_now(client->cct);
+ utime_t from = ceph_clock_now();
utime_t start = from;
uint64_t bytes = 0, total = 0;
bytes += rdsize;
total += rdsize;
- utime_t now = ceph_clock_now(client->cct);
+ utime_t now = ceph_clock_now();
if (now - from >= 1.0) {
double el = now - from;
dout(0) << "read " << (bytes / el / 1048576.0) << " MB/sec" << dendl;
dout(0) << " + " << (bad-1) << " other bad 16-byte bits in this block" << dendl;
}
- utime_t stop = ceph_clock_now(client->cct);
+ utime_t stop = ceph_clock_now();
double el = stop - start;
dout(0) << "read total " << (total / el / 1048576.0) << " MB/sec ("
<< total << " bytes in " << el << " seconds)" << dendl;
}
dout(10) << "writing " << oid << dendl;
- starts.push_back(ceph_clock_now(client->cct));
+ starts.push_back(ceph_clock_now());
client->client_lock.Lock();
client->objecter->write(oid, oloc, 0, osize, snapc, bl,
- ceph::real_clock::now(client->cct), 0,
+ ceph::real_clock::now(), 0,
new C_Ref(lock, cond, &unack),
new C_Ref(lock, cond, &unsafe));
client->client_lock.Unlock();
cond.Wait(lock);
}
lock.Unlock();
-
- utime_t lat = ceph_clock_now(client->cct);
+
+ utime_t lat = ceph_clock_now();
lat -= starts.front();
starts.pop_front();
}
SnapContext snapc;
client->client_lock.Lock();
- utime_t start = ceph_clock_now(client->cct);
+ utime_t start = ceph_clock_now();
if (write) {
dout(10) << "write to " << oid << dendl;
op.indata = bl;
m.ops.push_back(op);
client->objecter->mutate(oid, oloc, m, snapc,
- ceph::real_clock::now(client->cct), 0,
+ ceph::real_clock::now(), 0,
NULL, new C_Ref(lock, cond, &unack));
} else {
dout(10) << "read from " << oid << dendl;
}
lock.Unlock();
- utime_t lat = ceph_clock_now(client->cct);
+ utime_t lat = ceph_clock_now();
lat -= start;
}
}
bool time_to_stop() {
- utime_t now = ceph_clock_now(client->cct);
- if (0) cout << "time_to_stop .. now " << now
- << " until " << run_until
- << " start " << run_start
+ utime_t now = ceph_clock_now();
+ if (0) cout << "time_to_stop .. now " << now
+ << " until " << run_until
+ << " start " << run_start
<< std::endl;
- if (run_until.sec() && now > run_until)
+ if (run_until.sec() && now > run_until)
return true;
else
return false;
/* now trim expired locks */
- utime_t now = ceph_clock_now(g_ceph_context);
+ utime_t now = ceph_clock_now();
map<locker_id_t, locker_info_t>::iterator iter = lock->lockers.begin();
linfo.tag = tag;
utime_t expiration;
if (!duration.is_zero()) {
- expiration = ceph_clock_now(g_ceph_context);
+ expiration = ceph_clock_now();
expiration += duration;
}
const cls::rbd::MirrorImageStatus &status) {
MirrorImageStatusOnDisk ondisk_status(status);
ondisk_status.up = false;
- ondisk_status.last_update = ceph_clock_now(g_ceph_context);
+ ondisk_status.last_update = ceph_clock_now();
int r = cls_get_request_origin(hctx, &ondisk_status.origin);
assert(r == 0);
-// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
- * License version 2.1, as published by the Free Software
+ * License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
- *
+ *
*/
#include <time.h>
-utime_t ceph_clock_now(CephContext *cct)
+utime_t ceph_clock_now()
{
#if defined(__linux__)
struct timespec tp;
gettimeofday(&tv, NULL);
utime_t n(&tv);
#endif
- if (cct)
- n += cct->_conf->clock_offset;
return n;
}
-time_t ceph_clock_gettime(CephContext *cct)
+time_t ceph_clock_gettime()
{
- time_t ret = time(NULL);
- if (cct)
- ret += ((time_t)cct->_conf->clock_offset);
- return ret;
+ return time(NULL);
}
-// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
- * License version 2.1, as published by the Free Software
+ * License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
- *
+ *
*/
#ifndef CEPH_CLOCK_H
#include <time.h>
-class CephContext;
-
-extern utime_t ceph_clock_now(CephContext *cct);
-extern time_t ceph_clock_gettime(CephContext *cct);
+extern utime_t ceph_clock_now();
+extern time_t ceph_clock_gettime();
#endif
return r;
}
- int WaitInterval(CephContext *cct, Mutex &mutex, utime_t interval) {
- utime_t when = ceph_clock_now(cct);
+ int WaitInterval(Mutex &mutex, utime_t interval) {
+ utime_t when = ceph_clock_now();
when += interval;
return WaitUntil(mutex, when);
}
template<typename Duration>
- int WaitInterval(CephContext *cct, Mutex &mutex, Duration interval) {
- ceph::real_time when(ceph::real_clock::now(cct));
+ int WaitInterval(Mutex &mutex, Duration interval) {
+ ceph::real_time when(ceph::real_clock::now());
when += interval;
struct timespec ts = ceph::real_clock::to_timespec(when);
ldout(cct, 10) << "finisher_thread doing " << ls << dendl;
if (logger)
- start = ceph_clock_now(cct);
+ start = ceph_clock_now();
// Now actually process the contexts.
for (vector<Context*>::iterator p = ls.begin();
}
if (logger) {
logger->dec(l_finisher_queue_len);
- end = ceph_clock_now(cct);
+ end = ceph_clock_now();
logger->tinc(l_finisher_complete_lat, end - start);
start = end;
}
int lvl = (prio == CLOG_ERROR ? -1 : 0);
ldout(cct,lvl) << "log " << prio << " : " << s << dendl;
LogEntry e;
- e.stamp = ceph_clock_now(cct);
+ e.stamp = ceph_clock_now();
// seq and who should be set for syslog/graylog/log_to_mon
e.who = parent->get_myinst();
e.seq = parent->get_next_seq();
if (logger && cct && cct->_conf->mutex_perf_counter) {
utime_t start;
// instrumented mutex enabled
- start = ceph_clock_now(cct);
+ start = ceph_clock_now();
if (TryLock()) {
goto out;
}
r = pthread_mutex_lock(&_m);
logger->tinc(l_mutex_wait,
- ceph_clock_now(cct) - start);
+ ceph_clock_now() - start);
} else {
r = pthread_mutex_lock(&_m);
}
waited = true;
ldout(cct, 2) << "_wait waiting..." << dendl;
if (logger)
- start = ceph_clock_now(cct);
+ start = ceph_clock_now();
do {
cv->Wait(lock);
ldout(cct, 2) << "_wait finished waiting" << dendl;
if (logger) {
- utime_t dur = ceph_clock_now(cct) - start;
+ utime_t dur = ceph_clock_now() - start;
logger->tinc(l_throttle_wait, dur);
}
lock.Lock();
ldout(cct,10) << "timer_thread starting" << dendl;
while (!stopping) {
- utime_t now = ceph_clock_now(cct);
-
+ utime_t now = ceph_clock_now();
+
while (!schedule.empty()) {
scheduled_map_t::iterator p = schedule.begin();
{
assert(lock.is_locked());
- utime_t when = ceph_clock_now(cct);
+ utime_t when = ceph_clock_now();
when += seconds;
add_event_at(when, callback);
}
if (!tracking_enabled)
return false;
- utime_t now = ceph_clock_now(cct);
+ utime_t now = ceph_clock_now();
history.dump_ops(now, f);
return true;
}
f->open_object_section("ops_in_flight"); // overall dump
uint64_t total_ops_in_flight = 0;
f->open_array_section("ops"); // list of TrackedOps
- utime_t now = ceph_clock_now(cct);
+ utime_t now = ceph_clock_now();
for (uint32_t i = 0; i < num_optracker_shards; i++) {
ShardedTrackingData* sdata = sharded_in_flight_list[i];
assert(NULL != sdata);
if (!tracking_enabled)
delete i;
else {
- utime_t now = ceph_clock_now(cct);
+ utime_t now = ceph_clock_now();
history.insert(now, TrackedOpRef(i));
}
}
if (!tracking_enabled)
return false;
- utime_t now = ceph_clock_now(cct);
+ utime_t now = ceph_clock_now();
utime_t too_old = now;
too_old -= complaint_time;
utime_t oldest_op = now;
void OpTracker::get_age_ms_histogram(pow2_hist_t *h)
{
h->clear();
- utime_t now = ceph_clock_now(NULL);
+ utime_t now = ceph_clock_now();
for (uint32_t iter = 0; iter < num_optracker_shards; iter++) {
ShardedTrackingData* sdata = sharded_in_flight_list[iter];
if (!is_tracked)
return;
- utime_t now = ceph_clock_now(g_ceph_context);
+ utime_t now = ceph_clock_now();
{
Mutex::Locker l(lock);
events.push_back(make_pair(now, event));
*/
bool check_ops_in_flight(std::vector<string> &warning_strings, int *slow = NULL);
void mark_event(TrackedOp *op, const string &evt,
- utime_t time = ceph_clock_now(g_ceph_context));
+ utime_t time = ceph_clock_now());
void on_shutdown() {
history.on_shutdown();
if (!events.empty() && events.rbegin()->second.compare("done") == 0)
return events.rbegin()->first - get_initiated();
else
- return ceph_clock_now(NULL) - get_initiated();
+ return ceph_clock_now() - get_initiated();
}
void mark_event(const string &event);
hb,
cct->_conf->threadpool_default_timeout,
0);
- _cond.WaitInterval(cct, _lock,
+ _cond.WaitInterval(_lock,
utime_t(
cct->_conf->threadpool_empty_queue_max_wait, 0));
}
cct->get_heartbeat_map()->reset_timeout(
hb,
wq->timeout_interval, wq->suicide_interval);
- shardedpool_cond.WaitInterval(cct, shardedpool_lock,
+ shardedpool_cond.WaitInterval(shardedpool_lock,
utime_t(
cct->_conf->threadpool_empty_queue_max_wait, 0));
}
cct->get_heartbeat_map()->reset_timeout(
hb,
wq->timeout_interval, wq->suicide_interval);
- shardedpool_cond.WaitInterval(cct, shardedpool_lock,
+ shardedpool_cond.WaitInterval(shardedpool_lock,
utime_t(
cct->_conf->threadpool_empty_queue_max_wait, 0));
}
const char *func)
{
ostringstream tss;
- tss << ceph_clock_now(g_assert_context);
+ tss << ceph_clock_now();
char buf[8096];
BackTrace *bt = new BackTrace(1);
const char *func, const char* msg, ...)
{
ostringstream tss;
- tss << ceph_clock_now(g_assert_context);
+ tss << ceph_clock_now();
class BufAppender {
public:
if (_cct->_conf->heartbeat_interval) {
utime_t interval(_cct->_conf->heartbeat_interval, 0);
- _cond.WaitInterval(_cct, _lock, interval);
+ _cond.WaitInterval(_lock, interval);
} else
_cond.Wait(_lock);
// For ceph_timespec
#include "include/types.h"
-
-#include "ceph_context.h"
#include "ceph_time.h"
#include "config.h"
namespace ceph {
namespace time_detail {
- real_clock::time_point real_clock::now(const CephContext* cct) noexcept {
- auto t = now();
- if (cct)
- t += make_timespan(cct->_conf->clock_offset);
- return t;
- }
-
void real_clock::to_ceph_timespec(const time_point& t,
struct ceph_timespec& ts) {
ts.tv_sec = to_time_t(t);
return time_point(seconds(ts.tv_sec) + nanoseconds(ts.tv_nsec));
}
- coarse_real_clock::time_point coarse_real_clock::now(
- const CephContext* cct) noexcept {
- auto t = now();
- if (cct)
- t += make_timespan(cct->_conf->clock_offset);
- return t;
- }
-
void coarse_real_clock::to_ceph_timespec(const time_point& t,
struct ceph_timespec& ts) {
ts.tv_sec = to_time_t(t);
const struct ceph_timespec& ts) {
return time_point(seconds(ts.tv_sec) + nanoseconds(ts.tv_nsec));
}
- };
+ }
using std::chrono::duration_cast;
using std::chrono::seconds;
int clock_gettime(int clk_id, struct timespec *tp);
#endif
-class CephContext;
struct ceph_timespec;
namespace ceph {
clock_gettime(CLOCK_REALTIME, &ts);
return from_timespec(ts);
}
- // We need a version of 'now' that can take a CephContext for
- // introducing configurable clock skew.
- static time_point now(const CephContext* cct) noexcept;
static bool is_zero(const time_point& t) {
- return (t == time_point::min());
+ return (t == time_point::min());
}
// Allow conversion to/from any clock with the same interface as
#endif
return from_timespec(ts);
}
- static time_point now(const CephContext* cct) noexcept;
static time_t to_time_t(const time_point& t) noexcept {
return duration_cast<seconds>(t.time_since_epoch()).count();
ostream& ObjBencher::out(ostream& os)
{
- utime_t cur_time = ceph_clock_now(cct);
+ utime_t cur_time = ceph_clock_now();
return out(os, cur_time);
}
if (formatter)
formatter->open_array_section("datas");
while(!data.done) {
- utime_t cur_time = ceph_clock_now(bencher->cct);
+ utime_t cur_time = ceph_clock_now();
if (i % 20 == 0 && !formatter) {
if (i > 0)
}
++i;
++cycleSinceChange;
- cond.WaitInterval(bencher->cct, bencher->lock, ONE_SECOND);
+ cond.WaitInterval(bencher->lock, ONE_SECOND);
}
if (formatter)
formatter->close_section(); //datas
goto out;
}
- data.start_time = ceph_clock_now(cct);
+ data.start_time = ceph_clock_now();
out(cout) << "Cleaning up (deleting benchmark objects)" << std::endl;
r = clean_up(num_objects, prevPid, concurrentios);
if (r != 0) goto out;
- runtime = ceph_clock_now(cct) - data.start_time;
+ runtime = ceph_clock_now() - data.start_time;
out(cout) << "Clean up completed and total clean up time :" << runtime << std::endl;
// lastrun file
ceph_pthread_setname(print_thread, "write_stat");
lock.Lock();
data.finished = 0;
- data.start_time = ceph_clock_now(cct);
+ data.start_time = ceph_clock_now();
lock.Unlock();
for (int i = 0; i<concurrentios; ++i) {
- start_times[i] = ceph_clock_now(cct);
+ start_times[i] = ceph_clock_now();
r = create_completion(i, _aio_cb, (void *)&lc);
if (r < 0)
goto ERR;
stopTime = data.start_time + runtime;
slot = 0;
lock.Lock();
- while (!secondsToRun || ceph_clock_now(cct) < stopTime) {
+ while (!secondsToRun || ceph_clock_now() < stopTime) {
bool found = false;
while (1) {
int old_slot = slot;
lock.Unlock();
goto ERR;
}
- data.cur_latency = ceph_clock_now(cct) - start_times[slot];
+ data.cur_latency = ceph_clock_now() - start_times[slot];
data.history.latency.push_back(data.cur_latency);
total_latency += data.cur_latency;
if( data.cur_latency > data.max_latency) data.max_latency = data.cur_latency;
--data.in_flight;
lock.Unlock();
release_completion(slot);
- timePassed = ceph_clock_now(cct) - data.start_time;
+ timePassed = ceph_clock_now() - data.start_time;
//write new stuff to backend
- start_times[slot] = ceph_clock_now(cct);
+ start_times[slot] = ceph_clock_now();
r = create_completion(slot, _aio_cb, &lc);
if (r < 0)
goto ERR;
lock.Unlock();
goto ERR;
}
- data.cur_latency = ceph_clock_now(cct) - start_times[slot];
+ data.cur_latency = ceph_clock_now() - start_times[slot];
data.history.latency.push_back(data.cur_latency);
total_latency += data.cur_latency;
if (data.cur_latency > data.max_latency) data.max_latency = data.cur_latency;
contents[slot] = 0;
}
- timePassed = ceph_clock_now(cct) - data.start_time;
+ timePassed = ceph_clock_now() - data.start_time;
lock.Lock();
data.done = true;
lock.Unlock();
lock.Lock();
data.finished = 0;
- data.start_time = ceph_clock_now(cct);
+ data.start_time = ceph_clock_now();
lock.Unlock();
pthread_t print_thread;
//start initial reads
for (int i = 0; i < concurrentios; ++i) {
index[i] = i;
- start_times[i] = ceph_clock_now(cct);
+ start_times[i] = ceph_clock_now();
create_completion(i, _aio_cb, (void *)&lc);
r = aio_read(name[i], i, contents[i], data.op_size,
data.op_size * (i % writes_per_object));
bufferlist *cur_contents;
slot = 0;
- while ((!seconds_to_run || ceph_clock_now(cct) < finish_time) &&
+ while ((!seconds_to_run || ceph_clock_now() < finish_time) &&
num_objects > data.started) {
lock.Lock();
int old_slot = slot;
}
// calculate latency here, so memcmp doesn't inflate it
- data.cur_latency = ceph_clock_now(cct) - start_times[slot];
+ data.cur_latency = ceph_clock_now() - start_times[slot];
cur_contents = contents[slot];
int current_index = index[slot];
release_completion(slot);
//start new read and check data if requested
- start_times[slot] = ceph_clock_now(cct);
+ start_times[slot] = ceph_clock_now();
create_completion(slot, _aio_cb, (void *)&lc);
r = aio_read(newName, slot, contents[slot], data.op_size,
data.op_size * (data.started % writes_per_object));
lock.Unlock();
goto ERR;
}
- data.cur_latency = ceph_clock_now(cct) - start_times[slot];
+ data.cur_latency = ceph_clock_now() - start_times[slot];
total_latency += data.cur_latency;
if (data.cur_latency > data.max_latency) data.max_latency = data.cur_latency;
if (data.cur_latency < data.min_latency) data.min_latency = data.cur_latency;
delete contents[slot];
}
- runtime = ceph_clock_now(cct) - data.start_time;
+ runtime = ceph_clock_now() - data.start_time;
lock.Lock();
data.done = true;
lock.Unlock();
lock.Lock();
data.finished = 0;
- data.start_time = ceph_clock_now(g_ceph_context);
+ data.start_time = ceph_clock_now();
lock.Unlock();
pthread_t print_thread;
//start initial reads
for (int i = 0; i < concurrentios; ++i) {
index[i] = i;
- start_times[i] = ceph_clock_now(g_ceph_context);
+ start_times[i] = ceph_clock_now();
create_completion(i, _aio_cb, (void *)&lc);
r = aio_read(name[i], i, contents[i], data.op_size,
data.op_size * (i % writes_per_object));
int rand_id;
slot = 0;
- while ((!seconds_to_run || ceph_clock_now(g_ceph_context) < finish_time)) {
+ while ((!seconds_to_run || ceph_clock_now() < finish_time)) {
lock.Lock();
int old_slot = slot;
bool found = false;
}
// calculate latency here, so memcmp doesn't inflate it
- data.cur_latency = ceph_clock_now(cct) - start_times[slot];
+ data.cur_latency = ceph_clock_now() - start_times[slot];
lock.Unlock();
cur_contents->invalidate_crc();
//start new read and check data if requested
- start_times[slot] = ceph_clock_now(g_ceph_context);
+ start_times[slot] = ceph_clock_now();
create_completion(slot, _aio_cb, (void *)&lc);
r = aio_read(newName, slot, contents[slot], data.op_size,
data.op_size * (rand_id % writes_per_object));
lock.Unlock();
goto ERR;
}
- data.cur_latency = ceph_clock_now(g_ceph_context) - start_times[slot];
+ data.cur_latency = ceph_clock_now() - start_times[slot];
total_latency += data.cur_latency;
if (data.cur_latency > data.max_latency) data.max_latency = data.cur_latency;
if (data.cur_latency < data.min_latency) data.min_latency = data.cur_latency;
delete contents[slot];
}
- runtime = ceph_clock_now(g_ceph_context) - data.start_time;
+ runtime = ceph_clock_now() - data.start_time;
lock.Lock();
data.done = true;
lock.Unlock();
utime_t old_time = new_it->second.second;
t2kmap.erase(old_time);
}
- utime_t time = ceph_clock_now(g_ceph_context);
+ utime_t time = ceph_clock_now();
k2itmap[idata.kdata] = make_pair(idata, time);
t2kmap[time] = idata.kdata;
if ((int)k2itmap.size() > cache_size) {
t2kmap.erase(old_time);
k2itmap.erase(idata.kdata);
}
- utime_t time = ceph_clock_now(g_ceph_context);
+ utime_t time = ceph_clock_now();
k2itmap[idata.kdata] = make_pair(idata, time);
t2kmap[time] = idata.kdata;
if ((int)k2itmap.size() > cache_size) {
out_data->kdata.parse(kvs.begin()->first);
bufferlist::iterator b = kvs.begin()->second.begin();
out_data->decode(b);
- if (idata.is_timed_out(ceph_clock_now(g_ceph_context),timeout)) {
+ if (idata.is_timed_out(ceph_clock_now(), timeout)) {
if (verbose) cout << client_name << " THINKS THE OTHER CLIENT DIED."
<< std::endl;
//the client died after deleting the object. clean up.
if (verbose) cout << "\t\t\t" << client_name
<< "-prev: getting index failed with error "
<< err << std::endl;
- if (idata.is_timed_out(ceph_clock_now(g_ceph_context),timeout)) {
+ if (idata.is_timed_out(ceph_clock_now(), timeout)) {
if (verbose) cout << client_name << " THINKS THE OTHER CLIENT DIED."
<< std::endl;
//the client died after deleting the object. clean up.
(cache_size / cache_refresh >= 2? cache_size / cache_refresh: 2),
&kvmap,&err);
err = io_ctx.operate(index_name, &oro, NULL);
- utime_t mytime = ceph_clock_now(g_ceph_context);
+ utime_t mytime = ceph_clock_now();
if (err < 0){
cerr << "\t" << client_name
<< "-read_index: getting keys failed with "
std::map<std::string, pair<bufferlist, int> > assertions;
map<string, bufferlist> to_insert;
idata->prefix = "1";
- idata->ts = ceph_clock_now(g_ceph_context);
+ idata->ts = ceph_clock_now();
for(vector<object_data>::const_iterator it = to_create.begin();
it != to_create.end();
++it) {
return -ESUICIDE;
}
err = read_index(key, &idata, NULL, false);
- mytime = ceph_clock_now(g_ceph_context);
+ mytime = ceph_clock_now();
if (err < 0) {
if (verbose) cout << "getting oid failed with code " << err << std::endl;
return err;
io_ctx.aio_operate(dit->obj, aioc, &oro, NULL);
aioc->wait_for_safe();
err = aioc->get_return_value();
- if (ceph_clock_now(g_ceph_context) - idata.ts > timeout) {
+ if (ceph_clock_now() - idata.ts > timeout) {
if (err < 0) {
aioc->release();
if (err == -ENOENT) {
int LevelDBStore::submit_transaction(KeyValueDB::Transaction t)
{
- utime_t start = ceph_clock_now(g_ceph_context);
+ utime_t start = ceph_clock_now();
LevelDBTransactionImpl * _t =
static_cast<LevelDBTransactionImpl *>(t.get());
leveldb::Status s = db->Write(leveldb::WriteOptions(), &(_t->bat));
- utime_t lat = ceph_clock_now(g_ceph_context) - start;
+ utime_t lat = ceph_clock_now() - start;
logger->inc(l_leveldb_txns);
logger->tinc(l_leveldb_submit_latency, lat);
return s.ok() ? 0 : -1;
int LevelDBStore::submit_transaction_sync(KeyValueDB::Transaction t)
{
- utime_t start = ceph_clock_now(g_ceph_context);
+ utime_t start = ceph_clock_now();
LevelDBTransactionImpl * _t =
static_cast<LevelDBTransactionImpl *>(t.get());
leveldb::WriteOptions options;
options.sync = true;
leveldb::Status s = db->Write(options, &(_t->bat));
- utime_t lat = ceph_clock_now(g_ceph_context) - start;
+ utime_t lat = ceph_clock_now() - start;
logger->inc(l_leveldb_txns);
logger->tinc(l_leveldb_submit_sync_latency, lat);
return s.ok() ? 0 : -1;
const std::set<string> &keys,
std::map<string, bufferlist> *out)
{
- utime_t start = ceph_clock_now(g_ceph_context);
+ utime_t start = ceph_clock_now();
for (std::set<string>::const_iterator i = keys.begin();
i != keys.end(); ++i) {
std::string value;
if (status.ok())
(*out)[*i].append(value);
}
- utime_t lat = ceph_clock_now(g_ceph_context) - start;
+ utime_t lat = ceph_clock_now() - start;
logger->inc(l_leveldb_gets);
logger->tinc(l_leveldb_get_latency, lat);
return 0;
bufferlist *out)
{
assert(out && (out->length() == 0));
- utime_t start = ceph_clock_now(g_ceph_context);
+ utime_t start = ceph_clock_now();
int r = 0;
string value, k;
leveldb::Status s;
} else {
r = -ENOENT;
}
- utime_t lat = ceph_clock_now(g_ceph_context) - start;
+ utime_t lat = ceph_clock_now() - start;
logger->inc(l_leveldb_gets);
logger->tinc(l_leveldb_get_latency, lat);
return r;
int RocksDBStore::submit_transaction(KeyValueDB::Transaction t)
{
- utime_t start = ceph_clock_now(g_ceph_context);
+ utime_t start = ceph_clock_now();
// enable rocksdb breakdown
// considering performance overhead, default is disabled
if (g_conf->rocksdb_perf) {
derr << __func__ << " error: " << s.ToString() << " code = " << s.code()
<< " Rocksdb transaction: " << rocks_txc.seen << dendl;
}
- utime_t lat = ceph_clock_now(g_ceph_context) - start;
+ utime_t lat = ceph_clock_now() - start;
if (g_conf->rocksdb_perf) {
utime_t write_memtable_time;
int RocksDBStore::submit_transaction_sync(KeyValueDB::Transaction t)
{
- utime_t start = ceph_clock_now(g_ceph_context);
+ utime_t start = ceph_clock_now();
// enable rocksdb breakdown
// considering performance overhead, default is disabled
if (g_conf->rocksdb_perf) {
derr << __func__ << " error: " << s.ToString() << " code = " << s.code()
<< " Rocksdb transaction: " << rocks_txc.seen << dendl;
}
- utime_t lat = ceph_clock_now(g_ceph_context) - start;
+ utime_t lat = ceph_clock_now() - start;
if (g_conf->rocksdb_perf) {
utime_t write_memtable_time;
const std::set<string> &keys,
std::map<string, bufferlist> *out)
{
- utime_t start = ceph_clock_now(g_ceph_context);
+ utime_t start = ceph_clock_now();
for (std::set<string>::const_iterator i = keys.begin();
i != keys.end(); ++i) {
std::string value;
if (status.ok())
(*out)[*i].append(value);
}
- utime_t lat = ceph_clock_now(g_ceph_context) - start;
+ utime_t lat = ceph_clock_now() - start;
logger->inc(l_rocksdb_gets);
logger->tinc(l_rocksdb_get_latency, lat);
return 0;
bufferlist *out)
{
assert(out && (out->length() == 0));
- utime_t start = ceph_clock_now(g_ceph_context);
+ utime_t start = ceph_clock_now();
int r = 0;
string value, k;
rocksdb::Status s;
} else {
r = -ENOENT;
}
- utime_t lat = ceph_clock_now(g_ceph_context) - start;
+ utime_t lat = ceph_clock_now() - start;
logger->inc(l_rocksdb_gets);
logger->tinc(l_rocksdb_get_latency, lat);
return r;
prepare_assert_ops(&op);
op.rollback(snapid);
objecter->mutate(oid, oloc,
- op, snapc, ceph::real_clock::now(client->cct), 0,
+ op, snapc, ceph::real_clock::now(), 0,
onack, NULL, NULL);
mylock.Lock();
ceph::real_time *pmtime, int flags)
{
ceph::real_time ut = (pmtime ? *pmtime :
- ceph::real_clock::now(client->cct));
+ ceph::real_clock::now());
/* can't write to a snapshot */
if (snap_seq != CEPH_NOSNAP)
::ObjectOperation *o, AioCompletionImpl *c,
const SnapContext& snap_context, int flags)
{
- auto ut = ceph::real_clock::now(client->cct);
+ auto ut = ceph::real_clock::now();
/* can't write to a snapshot */
if (snap_seq != CEPH_NOSNAP)
return -EROFS;
const bufferlist& bl, size_t len,
uint64_t off)
{
- auto ut = ceph::real_clock::now(client->cct);
+ auto ut = ceph::real_clock::now();
ldout(client->cct, 20) << "aio_write " << oid << " " << off << "~" << len << " snapc=" << snapc << " snap_seq=" << snap_seq << dendl;
if (len > UINT_MAX/2)
int librados::IoCtxImpl::aio_append(const object_t &oid, AioCompletionImpl *c,
const bufferlist& bl, size_t len)
{
- auto ut = ceph::real_clock::now(client->cct);
+ auto ut = ceph::real_clock::now();
if (len > UINT_MAX/2)
return -E2BIG;
AioCompletionImpl *c,
const bufferlist& bl)
{
- auto ut = ceph::real_clock::now(client->cct);
+ auto ut = ceph::real_clock::now();
if (bl.length() > UINT_MAX/2)
return -E2BIG;
size_t write_len,
uint64_t off)
{
- auto ut = ceph::real_clock::now(client->cct);
+ auto ut = ceph::real_clock::now();
if ((bl.length() > UINT_MAX/2) || (write_len > UINT_MAX/2))
return -E2BIG;
int librados::IoCtxImpl::aio_remove(const object_t &oid, AioCompletionImpl *c, int flags)
{
- auto ut = ceph::real_clock::now(client->cct);
+ auto ut = ceph::real_clock::now();
/* can't write to a snapshot */
if (snap_seq != CEPH_NOSNAP)
prepare_assert_ops(&wr);
wr.watch(cookie, CEPH_OSD_WATCH_OP_UNWATCH);
objecter->mutate(linger_op->target.base_oid, oloc, wr,
- snapc, ceph::real_clock::now(client->cct), 0, NULL,
+ snapc, ceph::real_clock::now(), 0, NULL,
&onfinish, &ver);
objecter->linger_cancel(linger_op);
prepare_assert_ops(&wr);
wr.watch(cookie, CEPH_OSD_WATCH_OP_UNWATCH);
objecter->mutate(linger_op->target.base_oid, oloc, wr,
- snapc, ceph::real_clock::now(client->cct), 0, NULL,
+ snapc, ceph::real_clock::now(), 0, NULL,
oncomplete, &c->objver);
return 0;
}
if (objecter->with_osdmap(std::mem_fn(&OSDMap::get_epoch)) == 0) {
ldout(cct, 10) << __func__ << " waiting" << dendl;
- utime_t start = ceph_clock_now(cct);
+ utime_t start = ceph_clock_now();
while (objecter->with_osdmap(std::mem_fn(&OSDMap::get_epoch)) == 0) {
if (timeout.is_zero()) {
cond.Wait(lock);
} else {
- cond.WaitInterval(cct, lock, timeout);
- utime_t elapsed = ceph_clock_now(cct) - start;
+ cond.WaitInterval(lock, timeout);
+ utime_t elapsed = ceph_clock_now() - start;
if (elapsed > timeout) {
lderr(cct) << "timed out waiting for first osdmap from monitors"
<< dendl;
tracepoint(librbd, aio_complete_enter, this, rval);
utime_t elapsed;
- elapsed = ceph_clock_now(cct) - start_time;
+ elapsed = ceph_clock_now() - start_time;
switch (aio_type) {
case AIO_TYPE_OPEN:
case AIO_TYPE_CLOSE:
if (ictx == nullptr) {
ictx = i;
aio_type = t;
- start_time = ceph_clock_now(ictx->cct);
+ start_time = ceph_clock_now();
}
}
uint64_t left = mylen;
RWLock::RLocker owner_locker(ictx->owner_lock);
- start_time = ceph_clock_now(ictx->cct);
+ start_time = ceph_clock_now();
while (left > 0) {
uint64_t period_off = off - (off % period);
uint64_t read_len = min(period_off + period - off, left);
off += ret;
}
- elapsed = ceph_clock_now(ictx->cct) - start_time;
+ elapsed = ceph_clock_now() - start_time;
ictx->perfcounter->tinc(l_librbd_rd_latency, elapsed);
ictx->perfcounter->inc(l_librbd_rd);
ictx->perfcounter->inc(l_librbd_rd_bytes, mylen);
Entry *Log::create_entry(int level, int subsys)
{
if (true) {
- return new Entry(ceph_clock_now(NULL),
- pthread_self(),
- level, subsys);
+ return new Entry(ceph_clock_now(),
+ pthread_self(),
+ level, subsys);
} else {
// kludge for perf testing
Entry *e = m_recent.dequeue();
- e->m_stamp = ceph_clock_now(NULL);
+ e->m_stamp = ceph_clock_now();
e->m_thread = pthread_self();
e->m_prio = level;
e->m_subsys = subsys;
"Log hint");
size_t size = __atomic_load_n(expected_size, __ATOMIC_RELAXED);
void *ptr = ::operator new(sizeof(Entry) + size);
- return new(ptr) Entry(ceph_clock_now(NULL),
+ return new(ptr) Entry(ceph_clock_now(),
pthread_self(), level, subsys,
reinterpret_cast<char*>(ptr) + sizeof(Entry), size, expected_size);
} else {
// kludge for perf testing
Entry *e = m_recent.dequeue();
- e->m_stamp = ceph_clock_now(NULL);
+ e->m_stamp = ceph_clock_now();
e->m_thread = pthread_self();
e->m_prio = level;
e->m_subsys = subsys;
int sys = i % 4;
int l = 5 + (i%4);
if (subs.should_gather(sys, l)) {
- Entry *e = new Entry(ceph_clock_now(NULL),
+ Entry *e = new Entry(ceph_clock_now(),
pthread_self(),
l,
sys,
for (int i=0; i<many; i++) {
int l = 10;
if (subs.should_gather(1, l))
- log.submit_entry(new Entry(ceph_clock_now(NULL), pthread_self(), l, 1));
+ log.submit_entry(new Entry(ceph_clock_now(), pthread_self(), l, 1));
}
log.flush();
log.stop();
for (int i=0; i<many; i++) {
int l = 10;
if (subs.should_gather(1, l))
- log.submit_entry(new Entry(ceph_clock_now(NULL), pthread_self(), l, 1,
+ log.submit_entry(new Entry(ceph_clock_now(), pthread_self(), l, 1,
"this is a long string asdf asdf asdf asdf asdf asdf asd fasd fasdf "));
}
log.flush();
for (int i=0; i<many; i++) {
int l = 10;
if (subs.should_gather(1, l)) {
- Entry *e = new Entry(ceph_clock_now(NULL), pthread_self(), l, 1);
+ Entry *e = new Entry(ceph_clock_now(), pthread_self(), l, 1);
ostringstream oss;
oss << "this i a long stream asdf asdf asdf asdf asdf asdf asdf asdf asdf as fd";
e->set_str(oss.str());
for (int i=0; i<many; i++) {
int l = 10;
if (subs.should_gather(1, l)) {
- Entry *e = new Entry(ceph_clock_now(NULL), pthread_self(), l, 1);
+ Entry *e = new Entry(ceph_clock_now(), pthread_self(), l, 1);
ostringstream oss;
oss.str().reserve(80);
oss << "this i a long stream asdf asdf asdf asdf asdf asdf asdf asdf asdf as fd";
for (int i=0; i<many; i++) {
int l = 10;
if (subs.should_gather(1, l)) {
- Entry *e = new Entry(ceph_clock_now(NULL), pthread_self(), l, 1);
+ Entry *e = new Entry(ceph_clock_now(), pthread_self(), l, 1);
PrebufferedStreambuf psb(e->m_static_buf, sizeof(e->m_static_buf));
ostream oss(&psb);
oss << "this i a long stream asdf asdf asdf asdf asdf asdf asdf asdf asdf as fd";
for (int i=0; i<many; i++) {
int l = 10;
if (subs.should_gather(1, l)) {
- Entry *e = new Entry(ceph_clock_now(NULL), pthread_self(), l, 1);
+ Entry *e = new Entry(ceph_clock_now(), pthread_self(), l, 1);
PrebufferedStreambuf psb(e->m_static_buf, sizeof(e->m_static_buf));
ostream oss(&psb);
oss << "this i a long stream asdf asdf asdf asdf asdf asdf asdf asdf asdf as fd"
for (int i=0; i<many; i++) {
int l = 10;
if (subs.should_gather(1, l))
- log.submit_entry(new Entry(ceph_clock_now(NULL), pthread_self(), l, 1));
+ log.submit_entry(new Entry(ceph_clock_now(), pthread_self(), l, 1));
}
log.flush();
log.stop();
log.reopen_log_file();
log.inject_segv();
- Entry *e = new Entry(ceph_clock_now(NULL), pthread_self(), 10, 1);
+ Entry *e = new Entry(ceph_clock_now(), pthread_self(), 10, 1);
log.submit_entry(e); // this should segv
log.flush();
log.set_log_file("/tmp/big");
log.reopen_log_file();
int l = 10;
- Entry *e = new Entry(ceph_clock_now(NULL), pthread_self(), l, 1);
+ Entry *e = new Entry(ceph_clock_now(), pthread_self(), l, 1);
std::string msg(10000000, 0);
e->set_str(msg);
// update lab
if (seq_stamp.count(seq)) {
- utime_t now = ceph_clock_now(g_ceph_context);
+ utime_t now = ceph_clock_now();
if (seq_stamp[seq] > last_acked_stamp) {
last_acked_stamp = seq_stamp[seq];
utime_t rtt = now - last_acked_stamp;
<< " for up to " << duration << "s" << dendl;
utime_t timeout;
- timeout.set_from_double(ceph_clock_now(cct) + duration);
+ timeout.set_from_double(ceph_clock_now() + duration);
while ((!seq_stamp.empty() && seq_stamp.begin()->first <= awaiting_seq)
- && ceph_clock_now(cct) < timeout) {
+ && ceph_clock_now() < timeout) {
waiting_cond.WaitUntil(lock, timeout);
}
<< " seq " << last_seq
<< dendl;
- seq_stamp[last_seq] = ceph_clock_now(g_ceph_context);
+ seq_stamp[last_seq] = ceph_clock_now();
assert(want_state != MDSMap::STATE_NULL);
if (last_acked_stamp == utime_t())
return false;
- utime_t now = ceph_clock_now(g_ceph_context);
+ utime_t now = ceph_clock_now();
utime_t since = now - last_acked_stamp;
if (since > g_conf->mds_beacon_grace) {
dout(5) << "is_laggy " << since << " > " << g_conf->mds_beacon_grace
set<Session*> sessions;
mds->sessionmap.get_client_session_set(sessions);
- utime_t cutoff = ceph_clock_now(g_ceph_context);
+ utime_t cutoff = ceph_clock_now();
cutoff -= g_conf->mds_recall_state_timeout;
utime_t last_recall = mds->mdcache->last_recall_state;
ostream& CDentry::print_db_line_prefix(ostream& out)
{
- return out << ceph_clock_now(g_ceph_context) << " mds." << dir->cache->mds->get_nodeid() << ".cache.den(" << dir->ino() << " " << name << ") ";
+ return out << ceph_clock_now() << " mds." << dir->cache->mds->get_nodeid() << ".cache.den(" << dir->ino() << " " << name << ") ";
}
boost::pool<> CDentry::pool(sizeof(CDentry));
ostream& CDir::print_db_line_prefix(ostream& out)
{
- return out << ceph_clock_now(g_ceph_context) << " mds." << cache->mds->get_nodeid() << ".cache.dir(" << this->dirfrag() << ") ";
+ return out << ceph_clock_now() << " mds." << cache->mds->get_nodeid() << ".cache.dir(" << this->dirfrag() << ") ";
}
num_dirty(0), committing_version(0), committed_version(0),
dir_auth_pins(0), request_pins(0),
dir_rep(REP_NONE),
- pop_me(ceph_clock_now(g_ceph_context)),
- pop_nested(ceph_clock_now(g_ceph_context)),
- pop_auth_subtree(ceph_clock_now(g_ceph_context)),
- pop_auth_subtree_nested(ceph_clock_now(g_ceph_context)),
+ pop_me(ceph_clock_now()),
+ pop_nested(ceph_clock_now()),
+ pop_auth_subtree(ceph_clock_now()),
+ pop_auth_subtree_nested(ceph_clock_now()),
num_dentries_nested(0), num_dentries_auth_subtree(0),
num_dentries_auth_subtree_nested(0),
dir_auth(CDIR_AUTH_DEFAULT)
}
//in->hack_accessed = false;
- //in->hack_load_stamp = ceph_clock_now(g_ceph_context);
+ //in->hack_load_stamp = ceph_clock_now();
//num_new_inodes_loaded++;
} else {
dout(0) << "_fetched badness: got (but i already had) " << *in
op.omap_rm_keys(to_remove);
cache->mds->objecter->mutate(oid, oloc, op, snapc,
- ceph::real_clock::now(g_ceph_context),
+ ceph::real_clock::now(),
0, NULL, gather.new_sub());
write_size = 0;
op.omap_rm_keys(to_remove);
cache->mds->objecter->mutate(oid, oloc, op, snapc,
- ceph::real_clock::now(g_ceph_context),
+ ceph::real_clock::now(),
0, NULL, gather.new_sub());
gather.activate();
assert(scrub_infop && !scrub_infop->directory_scrubbing);
scrub_infop->recursive_start.version = get_projected_version();
- scrub_infop->recursive_start.time = ceph_clock_now(g_ceph_context);
+ scrub_infop->recursive_start.time = ceph_clock_now();
scrub_infop->directories_to_scrub.clear();
scrub_infop->directories_scrubbing.clear();
scrub_info();
if (rval) {
- scrub_infop->last_local.time = ceph_clock_now(g_ceph_context);
+ scrub_infop->last_local.time = ceph_clock_now();
scrub_infop->last_local.version = get_projected_version();
scrub_infop->pending_scrub_error = false;
scrub_infop->last_scrub_dirty = true;
//int cinode_pins[CINODE_NUM_PINS]; // counts
ostream& CInode::print_db_line_prefix(ostream& out)
{
- return out << ceph_clock_now(g_ceph_context) << " mds." << mdcache->mds->get_nodeid() << ".cache.ino(" << inode.ino << ") ";
+ return out << ceph_clock_now() << " mds." << mdcache->mds->get_nodeid() << ".cache.ino(" << inode.ino << ") ";
}
/*
new C_OnFinisher(new C_IO_Inode_Stored(this, get_version(), fin),
mdcache->mds->finisher);
mdcache->mds->objecter->mutate(oid, oloc, m, snapc,
- ceph::real_clock::now(g_ceph_context), 0,
+ ceph::real_clock::now(), 0,
NULL, newfin);
}
if (!state_test(STATE_DIRTYPOOL) || inode.old_pools.empty()) {
dout(20) << __func__ << ": no dirtypool or no old pools" << dendl;
mdcache->mds->objecter->mutate(oid, oloc, op, snapc,
- ceph::real_clock::now(g_ceph_context),
+ ceph::real_clock::now(),
0, NULL, fin2);
return;
}
C_GatherBuilder gather(g_ceph_context, fin2);
mdcache->mds->objecter->mutate(oid, oloc, op, snapc,
- ceph::real_clock::now(g_ceph_context),
+ ceph::real_clock::now(),
0, NULL, gather.new_sub());
// In the case where DIRTYPOOL is set, we update all old pools backtraces
object_locator_t oloc(*p);
mdcache->mds->objecter->mutate(oid, oloc, op, snapc,
- ceph::real_clock::now(g_ceph_context),
+ ceph::real_clock::now(),
0, NULL, gather.new_sub());
}
gather.activate();
cap->issue_norevoke(icr.capinfo.issued);
cap->reset_seq();
}
- cap->set_last_issue_stamp(ceph_clock_now(g_ceph_context));
+ cap->set_last_issue_stamp(ceph_clock_now());
return cap;
}
cap->issue_norevoke(issue);
issue = cap->pending();
cap->set_last_issue();
- cap->set_last_issue_stamp(ceph_clock_now(g_ceph_context));
+ cap->set_last_issue_stamp(ceph_clock_now());
cap->clear_new();
ecap.caps = issue;
ecap.wanted = cap->wanted();
_mark_dirty_parent(ls);
}
- ::decode(pop, ceph_clock_now(g_ceph_context), p);
+ ::decode(pop, ceph_clock_now(), p);
::decode(replica_map, p);
if (!replica_map.empty())
scrub_tag.setxattr("scrub_tag", tag_bl);
SnapContext snapc;
in->mdcache->mds->objecter->mutate(oid, object_locator_t(pool), scrub_tag, snapc,
- ceph::real_clock::now(g_ceph_context),
+ ceph::real_clock::now(),
0, NULL, NULL);
}
}
scrub_infop->header = header;
scrub_infop->scrub_start_version = get_version();
- scrub_infop->scrub_start_stamp = ceph_clock_now(g_ceph_context);
+ scrub_infop->scrub_start_stamp = ceph_clock_now();
// right now we don't handle remote inodes
}
while (i != scrub_infop->dirfrag_stamps.end()) {
if (i->second.scrub_start_version < scrub_infop->scrub_start_version) {
i->second.scrub_start_version = get_projected_version();
- i->second.scrub_start_stamp = ceph_clock_now(g_ceph_context);
+ i->second.scrub_start_stamp = ceph_clock_now();
*out_dirfrag = i->first;
dout(20) << " return frag " << *out_dirfrag << dendl;
return 0;
item_dirty_dirfrag_nest(this),
item_dirty_dirfrag_dirfragtree(this),
auth_pin_freeze_allowance(0),
- pop(ceph_clock_now(g_ceph_context)),
+ pop(ceph_clock_now()),
versionlock(this, &versionlock_type),
authlock(this, &authlock_type),
linklock(this, &linklock_type),
DamageEntry()
{
id = get_random(0, 0xffffffff);
- reported_at = ceph_clock_now(g_ceph_context);
+ reported_at = ceph_clock_now();
}
virtual damage_entry_type_t get_type() const = 0;
C_SaferCond waiter;
objecter->write_full(object_t(object_id), object_locator_t(pool_id),
SnapContext(), data,
- ceph::real_clock::now(g_ceph_context), 0, NULL,
+ ceph::real_clock::now(), 0, NULL,
&waiter);
int write_result = waiter.wait();
if (write_result < 0) {
objecter->write_full(object_t(get_object_id()), object_locator_t(pool_id),
SnapContext(), data,
- ceph::real_clock::now(g_ceph_context), 0, NULL,
+ ceph::real_clock::now(), 0, NULL,
completion);
}
}
mdr->done_locking = true;
- mdr->set_mds_stamp(ceph_clock_now(NULL));
+ mdr->set_mds_stamp(ceph_clock_now());
result = true;
marker.message = "acquired locks";
if (op == CEPH_CAP_OP_REVOKE) {
revoking_caps.push_back(&cap->item_revoking_caps);
revoking_caps_by_client[cap->get_client()].push_back(&cap->item_client_revoking_caps);
- cap->set_last_revoke_stamp(ceph_clock_now(g_ceph_context));
+ cap->set_last_revoke_stamp(ceph_clock_now());
cap->reset_num_revoke_warnings();
}
// No revoking caps at the moment
return false;
} else {
- utime_t now = ceph_clock_now(g_ceph_context);
+ utime_t now = ceph_clock_now();
utime_t age = now - (*p)->get_last_revoke_stamp();
if (age <= g_conf->mds_revoke_cap_timeout) {
return false;
void Locker::caps_tick()
{
- utime_t now = ceph_clock_now(g_ceph_context);
+ utime_t now = ceph_clock_now();
dout(20) << __func__ << " " << revoking_caps.size() << " revoking caps" << dendl;
m->h.seq = ++l->seq;
m->clear_payload();
- utime_t now = ceph_clock_now(g_ceph_context);
+ utime_t now = ceph_clock_now();
now += mdcache->client_lease_durations[pool];
mdcache->touch_client_lease(l, pool, now);
<< " - already on list since " << lock->get_update_stamp() << dendl;
} else {
updated_scatterlocks.push_back(lock->get_updated_item());
- utime_t now = ceph_clock_now(g_ceph_context);
+ utime_t now = ceph_clock_now();
lock->set_update_stamp(now);
dout(10) << "mark_updated_scatterlock " << *lock
<< " - added at " << now << dendl;
dout(10) << "scatter_tick" << dendl;
// updated
- utime_t now = ceph_clock_now(g_ceph_context);
+ utime_t now = ceph_clock_now();
int n = updated_scatterlocks.size();
while (!updated_scatterlocks.empty()) {
ScatterLock *lock = updated_scatterlocks.front();
void MDBalancer::tick()
{
static int num_bal_times = g_conf->mds_bal_max;
- static utime_t first = ceph_clock_now(g_ceph_context);
- utime_t now = ceph_clock_now(g_ceph_context);
+ static utime_t first = ceph_clock_now();
+ utime_t now = ceph_clock_now();
utime_t elapsed = now;
elapsed -= first;
<< " oid=" << oid << " oloc=" << oloc << dendl;
/* timeout: if we waste half our time waiting for RADOS, then abort! */
- double t = ceph_clock_now(g_ceph_context) + g_conf->mds_bal_interval/2;
+ double t = ceph_clock_now() + g_conf->mds_bal_interval/2;
utime_t timeout;
timeout.set_from_double(t);
lock.Lock();
void MDBalancer::send_heartbeat()
{
- utime_t now = ceph_clock_now(g_ceph_context);
+ utime_t now = ceph_clock_now();
if (mds->mdsmap->is_degraded()) {
dout(10) << "send_heartbeat degraded" << dendl;
} else {
int cluster_size = mds->get_mds_map()->get_num_in_mds();
mds_rank_t whoami = mds->get_nodeid();
- rebalance_time = ceph_clock_now(g_ceph_context);
+ rebalance_time = ceph_clock_now();
// reset
my_targets.clear();
double total_load = 0.0;
multimap<double,mds_rank_t> load_map;
for (mds_rank_t i=mds_rank_t(0); i < mds_rank_t(cluster_size); i++) {
- map<mds_rank_t, mds_load_t>::value_type val(i, mds_load_t(ceph_clock_now(g_ceph_context)));
+ map<mds_rank_t, mds_load_t>::value_type val(i, mds_load_t(ceph_clock_now()));
std::pair < map<mds_rank_t, mds_load_t>::iterator, bool > r(mds_load.insert(val));
mds_load_t &load(r.first->second);
/* prepare for balancing */
int cluster_size = mds->get_mds_map()->get_num_in_mds();
- rebalance_time = ceph_clock_now(g_ceph_context);
+ rebalance_time = ceph_clock_now();
my_targets.clear();
imported.clear();
exported.clear();
for (mds_rank_t i=mds_rank_t(0);
i < mds_rank_t(cluster_size);
i++) {
- map<mds_rank_t, mds_load_t>::value_type val(i, mds_load_t(ceph_clock_now(g_ceph_context)));
+ map<mds_rank_t, mds_load_t>::value_type val(i, mds_load_t(ceph_clock_now()));
std::pair < map<mds_rank_t, mds_load_t>::iterator, bool > r(mds_load.insert(val));
mds_load_t &load(r.first->second);
in->inode.size = 0;
in->inode.ctime =
in->inode.mtime =
- in->inode.btime = ceph_clock_now(g_ceph_context);
+ in->inode.btime = ceph_clock_now();
in->inode.nlink = 1;
in->inode.truncate_size = -1ull;
in->inode.change_attr = 0;
// adjust recursive pop counters
if (dir->is_auth()) {
- utime_t now = ceph_clock_now(g_ceph_context);
+ utime_t now = ceph_clock_now();
CDir *p = dir->get_parent_dir();
while (p) {
p->pop_auth_subtree.sub(now, decayrate, dir->pop_auth_subtree);
// adjust popularity?
if (dir->is_auth()) {
- utime_t now = ceph_clock_now(g_ceph_context);
+ utime_t now = ceph_clock_now();
CDir *p = dir->get_parent_dir();
while (p) {
p->pop_auth_subtree.add(now, decayrate, dir->pop_auth_subtree);
// make sure stamp is set
if (mut->get_mds_stamp() == utime_t())
- mut->set_mds_stamp(ceph_clock_now(g_ceph_context));
+ mut->set_mds_stamp(ceph_clock_now());
if (in->is_base())
return;
if (cap->get_last_seq() == 0) // reconnected cap
cap->inc_last_seq();
cap->set_last_issue();
- cap->set_last_issue_stamp(ceph_clock_now(g_ceph_context));
+ cap->set_last_issue_stamp(ceph_clock_now());
cap->clear_new();
MClientCaps *reap = new MClientCaps(CEPH_CAP_OP_IMPORT,
in->ino(),
mds->logger->inc("outt");
else {
mds->logger->inc("outut");
- mds->logger->fset("oututl", ceph_clock_now(g_ceph_context) - in->hack_load_stamp);
+ mds->logger->fset("oututl", ceph_clock_now() - in->hack_load_stamp);
}
}
*/
void MDCache::trim_client_leases()
{
- utime_t now = ceph_clock_now(g_ceph_context);
+ utime_t now = ceph_clock_now();
dout(10) << "trim_client_leases" << dendl;
if (num_inodes_with_caps > g_conf->mds_cache_size) {
float ratio = (float)g_conf->mds_cache_size * .9 / (float)num_inodes_with_caps;
if (ratio < 1.0) {
- last_recall_state = ceph_clock_now(g_ceph_context);
+ last_recall_state = ceph_clock_now();
mds->server->recall_client_state(ratio);
}
}
void MDCache::shutdown_check()
{
- dout(0) << "shutdown_check at " << ceph_clock_now(g_ceph_context) << dendl;
+ dout(0) << "shutdown_check at " << ceph_clock_now() << dendl;
// cache
char old_val[32] = { 0 };
MDRequestImpl::Params params;
params.reqid.name = entity_name_t::MDS(mds->get_nodeid());
params.reqid.tid = mds->issue_tid();
- params.initiated = ceph_clock_now(g_ceph_context);
+ params.initiated = ceph_clock_now();
params.internal_op = op;
MDRequestRef mdr =
mds->op_tracker.create_request<MDRequestImpl,MDRequestImpl::Params>(params);
info.mdr = mdr;
info.dirs.push_back(dir);
info.bits = bits;
- info.last_cum_auth_pins_change = ceph_clock_now(g_ceph_context);
+ info.last_cum_auth_pins_change = ceph_clock_now();
fragment_freeze_dirs(dirs);
// initial mark+complete pass
info.mdr = mdr;
info.dirs = dirs;
info.bits = -bits;
- info.last_cum_auth_pins_change = ceph_clock_now(g_ceph_context);
+ info.last_cum_auth_pins_change = ceph_clock_now();
fragment_freeze_dirs(dirs);
// initial mark+complete pass
{
dout(10) << "find_stale_fragment_freeze" << dendl;
// see comment in Migrator::find_stale_export_freeze()
- utime_t now = ceph_clock_now(g_ceph_context);
+ utime_t now = ceph_clock_now();
utime_t cutoff = now;
cutoff -= g_conf->mds_freeze_tree_timeout;
op.remove();
}
mds->objecter->mutate(oid, oloc, op, nullsnapc,
- ceph::real_clock::now(g_ceph_context),
+ ceph::real_clock::now(),
0, NULL, gather.new_sub());
}
le->_segment = ls;
le->update_segment();
- le->set_stamp(ceph_clock_now(g_ceph_context));
+ le->set_stamp(ceph_clock_now());
mdsmap_up_features = mds->mdsmap->get_up_features();
pending_events[ls->seq].push_back(PendingEvent(le, c));
}
// hack: only trim for a few seconds at a time
- utime_t stop = ceph_clock_now(g_ceph_context);
+ utime_t stop = ceph_clock_now();
stop += 2.0;
map<uint64_t,LogSegment*>::iterator p = segments.begin();
num_events - expiring_events - expired_events > max_events) ||
(segments.size() - expiring_segments.size() - expired_segments.size() > max_segments))) {
- if (stop < ceph_clock_now(g_ceph_context))
+ if (stop < ceph_clock_now())
break;
int num_expiring_segments = (int)expiring_segments.size();
}
// log
- utime_t now = ceph_clock_now(g_ceph_context);
+ utime_t now = ceph_clock_now();
mds_load_t load = balancer->get_load(now);
if (logger) {
// hack: thrash exports
static utime_t start;
- utime_t now = ceph_clock_now(g_ceph_context);
+ utime_t now = ceph_clock_now();
if (start == utime_t())
start = now;
/*double el = now - start;
object_locator_t oloc(mds->mdsmap->get_metadata_pool());
mds->objecter->write_full(oid, oloc,
snapc,
- bl, ceph::real_clock::now(g_ceph_context), 0,
+ bl, ceph::real_clock::now(), 0,
NULL,
new C_OnFinisher(new C_IO_MT_Save(this, version),
mds->finisher));
void Migrator::find_stale_export_freeze()
{
- utime_t now = ceph_clock_now(g_ceph_context);
+ utime_t now = ceph_clock_now();
utime_t cutoff = now;
cutoff -= g_conf->mds_freeze_tree_timeout;
mds->send_message_mds(discover, it->second.peer);
assert(g_conf->mds_kill_export_at != 2);
- it->second.last_cum_auth_pins_change = ceph_clock_now(g_ceph_context);
+ it->second.last_cum_auth_pins_change = ceph_clock_now();
// start the freeze, but hold it up with an auth_pin.
dir->freeze_tree();
cache->adjust_subtree_auth(dir, mds->get_nodeid(), it->second.peer);
// take away the popularity we're sending.
- utime_t now = ceph_clock_now(g_ceph_context);
+ utime_t now = ceph_clock_now();
mds->balancer->subtract_export(dir, now);
// fill export message with cache data
// finish export (adjust local cache state)
int num_dentries = 0;
C_ContextsBase<MDSInternalContextBase, MDSInternalContextGather> *fin = new C_ContextsBase<MDSInternalContextBase, MDSInternalContextGather>(g_ceph_context);
- finish_export_dir(dir, ceph_clock_now(g_ceph_context), it->second.peer,
+ finish_export_dir(dir, ceph_clock_now(), it->second.peer,
it->second.peer_imported, fin->contexts, &num_dentries);
dir->add_waiter(CDir::WAIT_UNFREEZE, fin);
assert(it->second.state == IMPORT_PREPPED);
assert(it->second.tid == m->get_tid());
- utime_t now = ceph_clock_now(g_ceph_context);
+ utime_t now = ceph_clock_now();
mds_rank_t oldauth = mds_rank_t(m->get_source().num());
dout(7) << "handle_export_dir importing " << *dir << " from " << oldauth << dendl;
assert(dir->is_auth() == false);
// timeout/stale
// (caps go stale, lease die)
- utime_t now = ceph_clock_now(g_ceph_context);
+ utime_t now = ceph_clock_now();
utime_t cutoff = now;
cutoff -= g_conf->mds_session_timeout;
while (1) {
// clients will get the mdsmap and discover we're reconnecting via the monitor.
- reconnect_start = ceph_clock_now(g_ceph_context);
+ reconnect_start = ceph_clock_now();
dout(1) << "reconnect_clients -- " << client_reconnect_gather.size() << " sessions" << dendl;
mds->sessionmap.dump();
}
return;
}
- utime_t delay = ceph_clock_now(g_ceph_context);
+ utime_t delay = ceph_clock_now();
delay -= reconnect_start;
dout(10) << " reconnect_start " << reconnect_start << " delay " << delay << dendl;
{
utime_t reconnect_end = reconnect_start;
reconnect_end += g_conf->mds_reconnect_timeout;
- if (ceph_clock_now(g_ceph_context) >= reconnect_end &&
+ if (ceph_clock_now() >= reconnect_end &&
!client_reconnect_gather.empty()) {
dout(10) << "reconnect timed out" << dendl;
for (set<client_t>::iterator p = client_reconnect_gather.begin();
mdr->did_early_reply = true;
mds->logger->inc(l_mds_reply);
- utime_t lat = ceph_clock_now(g_ceph_context) - req->get_recv_stamp();
+ utime_t lat = ceph_clock_now() - req->get_recv_stamp();
mds->logger->tinc(l_mds_reply_latency, lat);
dout(20) << "lat " << lat << dendl;
if (!did_early_reply && !is_replay) {
mds->logger->inc(l_mds_reply);
- utime_t lat = ceph_clock_now(g_ceph_context) - mdr->client_request->get_recv_stamp();
+ utime_t lat = ceph_clock_now() - mdr->client_request->get_recv_stamp();
mds->logger->tinc(l_mds_reply_latency, lat);
dout(20) << "lat " << lat << dendl;
bufferlist bl;
mds_rank_t whoami = mds->get_nodeid();
client_t client = session->get_client();
- utime_t now = ceph_clock_now(g_ceph_context);
+ utime_t now = ceph_clock_now();
dout(20) << "set_trace_dist snapid " << snapid << dendl;
// value for them. (currently this matters for xattrs and inline data)
mdr->getattr_caps = mask;
- mds->balancer->hit_inode(ceph_clock_now(g_ceph_context), ref, META_POP_IRD,
+ mds->balancer->hit_inode(ceph_clock_now(), ref, META_POP_IRD,
req->get_source().num());
// reply
dir->verify_fragstat();
#endif
- utime_t now = ceph_clock_now(NULL);
+ utime_t now = ceph_clock_now();
mdr->set_mds_stamp(now);
snapid_t snapid = mdr->snapid;
assert(g_conf->mds_kill_link_at != 2);
- mdr->set_mds_stamp(ceph_clock_now(NULL));
+ mdr->set_mds_stamp(ceph_clock_now());
// add to event
mdr->ls = mdlog->get_current_segment();
assert(g_conf->mds_kill_rename_at != 4);
// -- declare now --
- mdr->set_mds_stamp(ceph_clock_now(g_ceph_context));
+ mdr->set_mds_stamp(ceph_clock_now());
// -- prepare journal entry --
mdr->ls = mdlog->get_current_segment();
null_sessions.clear();
mds->objecter->mutate(oid, oloc, op, snapc,
- ceph::real_clock::now(g_ceph_context),
- 0, NULL, new C_OnFinisher(new C_IO_SM_Save(this, version),
- mds->finisher));
+ ceph::real_clock::now(),
+ 0, NULL,
+ new C_OnFinisher(new C_IO_SM_Save(this, version),
+ mds->finisher));
}
void SessionMap::_save_finish(version_t v)
void SessionMapStore::decode_legacy(bufferlist::iterator& p)
{
- utime_t now = ceph_clock_now(g_ceph_context);
+ utime_t now = ceph_clock_now();
uint64_t pre;
::decode(pre, p);
if (pre == (uint64_t)-1) {
new xlist<Session*>).first;
by_state_entry->second->push_back(&session->item_session_list);
- session->last_cap_renew = ceph_clock_now(g_ceph_context);
+ session->last_cap_renew = ceph_clock_now();
}
void SessionMap::_mark_dirty(Session *s)
object_locator_t oloc(mds->mdsmap->get_metadata_pool());
MDSInternalContextBase *on_safe = gather_bld->new_sub();
mds->objecter->mutate(oid, oloc, op, snapc,
- ceph::real_clock::now(g_ceph_context),
+ ceph::real_clock::now(),
0, NULL, new C_OnFinisher(
new C_IO_SM_Save_One(this, on_safe),
mds->finisher));
if (recalled_at.is_zero()) {
// Entering recall phase, set up counters so we can later
// judge whether the client has respected the recall request
- recalled_at = last_recall_sent = ceph_clock_now(g_ceph_context);
+ recalled_at = last_recall_sent = ceph_clock_now();
assert (new_limit < caps.size()); // Behaviour of Server::recall_client_state
recall_count = caps.size() - new_limit;
recall_release_count = 0;
} else {
- last_recall_sent = ceph_clock_now(g_ceph_context);
+ last_recall_sent = ceph_clock_now();
}
}
} else {
s = session_map[i.name] = new Session;
s->info.inst = i;
- s->last_cap_renew = ceph_clock_now(g_ceph_context);
+ s->last_cap_renew = ceph_clock_now();
if (logger) {
logger->set(l_mdssm_session_count, session_map.size());
logger->inc(l_mdssm_session_add);
object_t oid = CInode::get_object_name(in->inode.ino, *p, "");
dout(10) << __func__ << " remove dirfrag " << oid << dendl;
mds->objecter->remove(oid, oloc, nullsnapc,
- ceph::real_clock::now(g_ceph_context),
+ ceph::real_clock::now(),
0, NULL, gather.new_sub());
}
assert(gather.has_subs());
dout(10) << __func__ << " 0~" << to << " objects 0~" << num
<< " snapc " << snapc << " on " << *in << dendl;
filer.purge_range(in->inode.ino, &in->inode.layout, *snapc,
- 0, num, ceph::real_clock::now(g_ceph_context), 0,
+ 0, num, ceph::real_clock::now(), 0,
gather.new_sub());
}
}
dout(10) << __func__ << " remove backtrace object " << oid
<< " pool " << oloc.pool << " snapc " << snapc << dendl;
mds->objecter->remove(oid, oloc, *snapc,
- ceph::real_clock::now(g_ceph_context), 0,
+ ceph::real_clock::now(), 0,
NULL, gather.new_sub());
}
// remove old backtrace objects
dout(10) << __func__ << " remove backtrace object " << oid
<< " old pool " << *p << " snapc " << snapc << dendl;
mds->objecter->remove(oid, oloc, *snapc,
- ceph::real_clock::now(g_ceph_context), 0,
+ ceph::real_clock::now(), 0,
NULL, gather.new_sub());
}
assert(gather.has_subs());
// keep backtrace object
if (num > 1) {
filer.purge_range(in->ino(), &in->inode.layout, *snapc,
- 1, num - 1, ceph::real_clock::now(g_ceph_context),
+ 1, num - 1, ceph::real_clock::now(),
0, gather.new_sub());
}
filer.zero(in->ino(), &in->inode.layout, *snapc,
0, in->inode.layout.object_size,
- ceph::real_clock::now(g_ceph_context),
+ ceph::real_clock::now(),
0, true, NULL, gather.new_sub());
}
inline std::ostream& operator<<(std::ostream& out, dirfrag_load_vec_t& dl)
{
// ugliness!
- utime_t now = ceph_clock_now(g_ceph_context);
+ utime_t now = ceph_clock_now();
DecayRate rate(g_conf->mds_decay_halflife);
return out << "[" << dl.vec[0].get(now, rate) << "," << dl.vec[1].get(now, rate)
<< " " << dl.meta_load(now, rate)
DecayCounter count;
public:
- load_spread_t() : p(0), n(0), count(ceph_clock_now(g_ceph_context))
+ load_spread_t() : p(0), n(0), count(ceph_clock_now())
{
for (int i=0; i<MAX; i++)
last[i] = -1;
}
void decode_payload() {
bufferlist::iterator p = payload.begin();
- utime_t now(ceph_clock_now(NULL));
+ utime_t now(ceph_clock_now());
::decode(load, now, p);
::decode(beat, p);
::decode(import_map, p);
declared_types.insert(t.path);
}
- const auto now = ceph_clock_now(g_ceph_context);
+ const auto now = ceph_clock_now();
// Parse packed data according to declared set of types
bufferlist::iterator p = report->packed.begin();
ours.store_stats.bytes_sst = extra["sst"];
ours.store_stats.bytes_log = extra["log"];
ours.store_stats.bytes_misc = extra["misc"];
- ours.last_update = ceph_clock_now(g_ceph_context);
+ ours.last_update = ceph_clock_now();
return 0;
}
<< " total " << prettybyte_t(ours.fs_stats.byte_total)
<< ", used " << prettybyte_t(ours.fs_stats.byte_used)
<< ", avail " << prettybyte_t(ours.fs_stats.byte_avail) << dendl;
- ours.last_update = ceph_clock_now(g_ceph_context);
+ ours.last_update = ceph_clock_now();
return update_store_stats(ours);
}
int r = mon->store->apply_transaction(t);
assert(r >= 0);
}
- start_stamp = ceph_clock_now(g_ceph_context);
+ start_stamp = ceph_clock_now();
electing_me = true;
acked_me[mon->rank].cluster_features = CEPH_FEATURES_ALL;
acked_me[mon->rank].mon_features = ceph::features::mon::get_supported();
// ack them
leader_acked = who;
- ack_stamp = ceph_clock_now(g_ceph_context);
+ ack_stamp = ceph_clock_now();
MMonElection *m = new MMonElection(MMonElection::OP_ACK, epoch, mon->monmap);
m->mon_features = ceph::features::mon::get_supported();
m->sharing_bl = mon->get_supported_commands_bl();
dout(10) << "create_initial -- creating initial map" << dendl;
LogEntry e;
memset(&e.who, 0, sizeof(e.who));
- e.stamp = ceph_clock_now(g_ceph_context);
+ e.stamp = ceph_clock_now();
e.prio = CLOG_INFO;
std::stringstream ss;
ss << "mkfs " << mon->monmap->get_fsid();
dout(10) << __func__ << " skipped from " << sv
<< " to first_committed " << get_first_committed() << dendl;
LogEntry le;
- le.stamp = ceph_clock_now(NULL);
+ le.stamp = ceph_clock_now();
le.prio = CLOG_WARN;
ostringstream ss;
ss << "skipped log messages from " << sv << " to " << get_first_committed();
fs->mds_map.cas_pool = -1;
fs->mds_map.max_file_size = g_conf->mds_max_file_size;
fs->mds_map.compat = fsm.compat;
- fs->mds_map.created = ceph_clock_now(g_ceph_context);
- fs->mds_map.modified = ceph_clock_now(g_ceph_context);
+ fs->mds_map.created = ceph_clock_now();
+ fs->mds_map.modified = ceph_clock_now();
fs->mds_map.session_timeout = g_conf->mds_session_timeout;
fs->mds_map.session_autoclose = g_conf->mds_session_autoclose;
fs->mds_map.enabled = true;
// Set 'modified' on maps modified this epoch
for (auto &i : fsmap.filesystems) {
if (i.second->mds_map.epoch == fsmap.epoch) {
- i.second->mds_map.modified = ceph_clock_now(g_ceph_context);
+ i.second->mds_map.modified = ceph_clock_now();
}
}
version_t seq = m->get_seq();
dout(15) << "_note_beacon " << *m << " noting time" << dendl;
- last_beacon[gid].stamp = ceph_clock_now(g_ceph_context);
+ last_beacon[gid].stamp = ceph_clock_now();
last_beacon[gid].seq = seq;
}
}
// initialize the beacon timer
- last_beacon[gid].stamp = ceph_clock_now(g_ceph_context);
+ last_beacon[gid].stamp = ceph_clock_now();
last_beacon[gid].seq = seq;
// new incompat?
dout(4) << __func__ << ": marking rank "
<< info.rank << " damaged" << dendl;
- utime_t until = ceph_clock_now(g_ceph_context);
+ utime_t until = ceph_clock_now();
until += g_conf->mds_blacklist_interval;
const auto blacklist_epoch = mon->osdmon()->blacklist(info.addr, until);
request_proposal(mon->osdmon());
epoch_t blacklist_epoch = 0;
if (info.rank >= 0 && info.state != MDSMap::STATE_STANDBY_REPLAY) {
- utime_t until = ceph_clock_now(g_ceph_context);
+ utime_t until = ceph_clock_now();
until += g_conf->mds_blacklist_interval;
blacklist_epoch = mon->osdmon()->blacklist(info.addr, until);
}
new_fs->mds_map.fs_name = fs->mds_map.fs_name;
new_fs->mds_map.max_file_size = g_conf->mds_max_file_size;
new_fs->mds_map.compat = fsmap.compat;
- new_fs->mds_map.created = ceph_clock_now(g_ceph_context);
- new_fs->mds_map.modified = ceph_clock_now(g_ceph_context);
+ new_fs->mds_map.created = ceph_clock_now();
+ new_fs->mds_map.modified = ceph_clock_now();
new_fs->mds_map.session_timeout = g_conf->mds_session_timeout;
new_fs->mds_map.session_autoclose = g_conf->mds_session_autoclose;
new_fs->mds_map.enabled = true;
<< " " << ceph_mds_state_name(info.state)
<< " laggy" << dendl;
pending_fsmap.modify_daemon(info.global_id, [](MDSMap::mds_info_t *info) {
- info->laggy_since = ceph_clock_now(g_ceph_context);
+ info->laggy_since = ceph_clock_now();
});
*mds_propose = true;
}
do_propose |= maybe_expand_cluster(i.second);
}
- const auto now = ceph_clock_now(g_ceph_context);
+ const auto now = ceph_clock_now();
if (last_tick.is_zero()) {
last_tick = now;
}
MMgrBeacon *m = static_cast<MMgrBeacon*>(op->get_req());
dout(4) << "beacon from " << m->get_gid() << dendl;
- last_beacon[m->get_gid()] = ceph_clock_now(g_ceph_context);
+ last_beacon[m->get_gid()] = ceph_clock_now();
if (pending_map.active_gid == m->get_gid()
&& pending_map.active_addr == m->get_server_addr()
}
}
- last_beacon[m->get_gid()] = ceph_clock_now(g_ceph_context);
+ last_beacon[m->get_gid()] = ceph_clock_now();
// Track whether we modified pending_map
bool updated = false;
void MgrMonitor::tick()
{
- const utime_t now = ceph_clock_now(g_ceph_context);
+ const utime_t now = ceph_clock_now();
utime_t cutoff = now;
cutoff -= g_conf->mon_mgr_beacon_grace;
utime_t interval;
interval.set_from_double(cct->_conf->mon_client_hunt_interval);
- map_cond.WaitInterval(cct, monc_lock, interval);
+ map_cond.WaitInterval(monc_lock, interval);
if (monmap.fsid.is_zero() && cur_con) {
cur_con->mark_down(); // nope, clean that connection up
if (cur_mon.empty())
_reopen_session();
- utime_t until = ceph_clock_now(cct);
+ utime_t until = ceph_clock_now();
until += timeout;
if (timeout > 0.0)
ldout(cct, 10) << "authenticate will time out at " << until << dendl;
_reopen_session();
} else if (!cur_mon.empty()) {
// just renew as needed
- utime_t now = ceph_clock_now(cct);
+ utime_t now = ceph_clock_now();
if (!cur_con->has_feature(CEPH_FEATURE_MON_STATEFUL_SUB)) {
ldout(cct, 10) << "renew subs? (now: " << now
<< "; renew after: " << sub_renew_after << ") -- "
_reopen_session();
else {
if (sub_renew_sent == utime_t())
- sub_renew_sent = ceph_clock_now(cct);
+ sub_renew_sent = ceph_clock_now();
MMonSubscribe *m = new MMonSubscribe;
m->what = sub_new;
return 0;
}
- utime_t now = ceph_clock_now(cct);
+ utime_t now = ceph_clock_now();
utime_t cutoff = now;
cutoff -= MIN(30.0, cct->_conf->auth_service_ticket_ttl / 4.0);
utime_t issued_at_lower_bound = now;
int MonClient::wait_auth_rotating(double timeout)
{
Mutex::Locker l(monc_lock);
- utime_t now = ceph_clock_now(cct);
+ utime_t now = ceph_clock_now();
utime_t until = now;
until += timeout;
}
ldout(cct, 10) << "wait_auth_rotating waiting (until " << until << ")" << dendl;
auth_cond.WaitUntil(monc_lock, until);
- now = ceph_clock_now(cct);
+ now = ceph_clock_now();
}
ldout(cct, 10) << "wait_auth_rotating done" << dendl;
return 0;
{ }
int wait_for_reply(double timeout = 0.0) {
- utime_t until = ceph_clock_now(cct);
+ utime_t until = ceph_clock_now();
until += (timeout > 0 ? timeout : cct->_conf->client_mount_timeout);
done = false;
<< std::endl;
return r;
}
- created = ceph_clock_now(cct);
+ created = ceph_clock_now();
last_changed = created;
return 0;
}
errout << "no monitors specified to connect to." << std::endl;
return -ENOENT;
}
- created = ceph_clock_now(cct);
+ created = ceph_clock_now();
last_changed = created;
return 0;
}
bool r = mon_caps->parse("allow *", NULL);
assert(r);
- exited_quorum = ceph_clock_now(g_ceph_context);
+ exited_quorum = ceph_clock_now();
// assume our commands until we have an election. this only means
// we won't reply with EINVAL before the election; any command that
leader_since = utime_t();
if (!quorum.empty()) {
- exited_quorum = ceph_clock_now(g_ceph_context);
+ exited_quorum = ceph_clock_now();
}
quorum.clear();
outside_quorum.clear();
{
dout(20) << __func__ << dendl;
- utime_t now = ceph_clock_now(g_ceph_context);
+ utime_t now = ceph_clock_now();
map<uint64_t,SyncProvider>::iterator p = sync_providers.begin();
while (p != sync_providers.end()) {
if (now > p->second.timeout) {
<< dendl;
assert(is_electing());
state = STATE_LEADER;
- leader_since = ceph_clock_now(g_ceph_context);
+ leader_since = ceph_clock_now();
leader = rank;
quorum = active;
quorum_con_features = features;
utime_t Monitor::health_interval_calc_next_update()
{
- utime_t now = ceph_clock_now(cct);
+ utime_t now = ceph_clock_now();
time_t secs = now.sec();
int remainder = secs % cct->_conf->mon_health_to_clog_interval;
if (prefix == "compact" || prefix == "mon compact") {
dout(1) << "triggering manual compaction" << dendl;
- utime_t start = ceph_clock_now(g_ceph_context);
+ utime_t start = ceph_clock_now();
store->compact();
- utime_t end = ceph_clock_now(g_ceph_context);
+ utime_t end = ceph_clock_now();
end -= start;
dout(1) << "finished manual compaction in " << end << " seconds" << dendl;
ostringstream oss;
f->dump_stream("cluster_fingerprint") << fingerprint;
f->dump_string("version", ceph_version_to_str());
f->dump_string("commit", git_version_to_str());
- f->dump_stream("timestamp") << ceph_clock_now(NULL);
+ f->dump_stream("timestamp") << ceph_clock_now();
vector<string> tagsvec;
cmd_getval(g_ceph_context, cmdmap, "tags", tagsvec);
Message *m = op->get_req();
MonSession *s = op->get_session();
ConnectionRef con = op->get_connection();
- utime_t too_old = ceph_clock_now(g_ceph_context);
+ utime_t too_old = ceph_clock_now();
too_old -= g_ceph_context->_conf->mon_lease;
if (m->get_recv_stamp() > too_old &&
con->is_connected()) {
assert(s);
- s->session_timeout = ceph_clock_now(NULL);
+ s->session_timeout = ceph_clock_now();
s->session_timeout += g_conf->mon_session_timeout;
if (s->auth_handler) {
if (timecheck_round % 2) {
dout(10) << __func__ << " there's a timecheck going on" << dendl;
- utime_t curr_time = ceph_clock_now(g_ceph_context);
+ utime_t curr_time = ceph_clock_now();
double max = g_conf->mon_timecheck_interval*3;
if (curr_time - timecheck_round_start < max) {
dout(10) << __func__ << " keep current round going" << dendl;
assert(timecheck_round % 2 == 0);
timecheck_acks = 0;
timecheck_round ++;
- timecheck_round_start = ceph_clock_now(g_ceph_context);
+ timecheck_round_start = ceph_clock_now();
dout(10) << __func__ << " new " << timecheck_round << dendl;
timecheck();
continue;
entity_inst_t inst = monmap->get_inst(*it);
- utime_t curr_time = ceph_clock_now(g_ceph_context);
+ utime_t curr_time = ceph_clock_now();
timecheck_waiting[inst] = curr_time;
MTimeCheck *m = new MTimeCheck(MTimeCheck::OP_PING);
m->epoch = get_epoch();
return;
}
- utime_t curr_time = ceph_clock_now(g_ceph_context);
+ utime_t curr_time = ceph_clock_now();
assert(timecheck_waiting.count(other) > 0);
utime_t timecheck_sent = timecheck_waiting[other];
assert((timecheck_round % 2) != 0);
MTimeCheck *reply = new MTimeCheck(MTimeCheck::OP_PONG);
- utime_t curr_time = ceph_clock_now(g_ceph_context);
+ utime_t curr_time = ceph_clock_now();
reply->timestamp = curr_time;
reply->epoch = m->epoch;
reply->round = m->round;
}
// trim sessions
- utime_t now = ceph_clock_now(g_ceph_context);
+ utime_t now = ceph_clock_now();
xlist<MonSession*>::iterator p = session_map.sessions.begin();
bool out_for_too_long = (!exited_quorum.is_zero()
SyncProvider() : cookie(0), last_committed(0), full(false) {}
void reset_timeout(CephContext *cct, int grace) {
- timeout = ceph_clock_now(cct);
+ timeout = ceph_clock_now();
timeout += grace;
}
};
{
pending_map = *mon->monmap;
pending_map.epoch++;
- pending_map.last_changed = ceph_clock_now(g_ceph_context);
+ pending_map.last_changed = ceph_clock_now();
dout(10) << "create_pending monmap epoch " << pending_map.epoch << dendl;
}
*/
pending_map.add(name, addr);
- pending_map.last_changed = ceph_clock_now(g_ceph_context);
+ pending_map.last_changed = ceph_clock_now();
ss << "adding mon." << name << " at " << addr;
propose = true;
dout(0) << __func__ << " proposing new mon." << name << dendl;
entity_addr_t addr = pending_map.get_addr(name);
pending_map.remove(name);
- pending_map.last_changed = ceph_clock_now(g_ceph_context);
+ pending_map.last_changed = ceph_clock_now();
ss << "removing mon." << name << " at " << addr
<< ", there will be " << pending_map.size() << " monitors" ;
propose = true;
if (pending_map.contains(join->addr))
pending_map.remove(pending_map.get_name(join->addr));
pending_map.add(join->name, join->addr);
- pending_map.last_changed = ceph_clock_now(g_ceph_context);
+ pending_map.last_changed = ceph_clock_now();
return true;
}
g_conf->osd_pg_bits, g_conf->osd_pgp_bits);
}
newmap.set_epoch(1);
- newmap.created = newmap.modified = ceph_clock_now(g_ceph_context);
+ newmap.created = newmap.modified = ceph_clock_now();
// new clusters should sort bitwise by default.
newmap.set_flag(CEPH_OSDMAP_SORTBITWISE);
// populate down -> out map
if (found == down_pending_out.end()) {
dout(10) << " adding osd." << o << " to down_pending_out map" << dendl;
- down_pending_out[o] = ceph_clock_now(g_ceph_context);
+ down_pending_out[o] = ceph_clock_now();
}
} else {
if (found != down_pending_out.end()) {
PGMap *pg_map = &mon->pgmon()->pg_map;
- utime_t stop = ceph_clock_now(NULL);
+ utime_t stop = ceph_clock_now();
stop += g_conf->mon_osd_prime_pg_temp_max_time;
int chunk = 1000;
int n = chunk;
prime_pg_temp(next, pp);
if (--n <= 0) {
n = chunk;
- if (ceph_clock_now(NULL) > stop) {
+ if (ceph_clock_now() > stop) {
dout(10) << __func__ << " consumed more than "
<< g_conf->mon_osd_prime_pg_temp_max_time
<< " seconds, stopping"
n -= prime_pg_temp(next, pg_map, *p);
if (n <= 0) {
n = chunk;
- if (ceph_clock_now(NULL) > stop) {
+ if (ceph_clock_now() > stop) {
dout(10) << __func__ << " consumed more than "
<< g_conf->mon_osd_prime_pg_temp_max_time
<< " seconds, stopping"
<< dendl;
// finalize up pending_inc
- pending_inc.modified = ceph_clock_now(g_ceph_context);
+ pending_inc.modified = ceph_clock_now();
int r = pending_inc.propagate_snaps_to_tiers(g_ceph_context, osdmap);
assert(r == 0);
assert(osdmap.get_addr(target_osd) == m->get_target().addr);
// calculate failure time
- utime_t now = ceph_clock_now(g_ceph_context);
+ utime_t now = ceph_clock_now();
utime_t failed_since =
m->get_recv_stamp() -
utime_t(m->failed_for ? m->failed_for : g_conf->osd_heartbeat_grace, 0);
dout(10) << " not laggy, new xi " << xi << dendl;
} else {
if (xi.down_stamp.sec()) {
- int interval = ceph_clock_now(g_ceph_context).sec() -
+ int interval = ceph_clock_now().sec() -
xi.down_stamp.sec();
if (g_conf->mon_osd_laggy_max_interval &&
(interval > g_conf->mon_osd_laggy_max_interval)) {
if (!mon->is_leader()) return;
bool do_propose = false;
- utime_t now = ceph_clock_now(g_ceph_context);
+ utime_t now = ceph_clock_now();
// mark osds down?
if (check_failures(now))
string blacklistop;
cmd_getval(g_ceph_context, cmdmap, "blacklistop", blacklistop);
if (blacklistop == "add") {
- utime_t expires = ceph_clock_now(g_ceph_context);
+ utime_t expires = ceph_clock_now();
double d;
// default one hour
cmd_getval(g_ceph_context, cmdmap, "expire", d, double(60*60));
if (pp->snap_exists(snapname.c_str())) {
ss << "pool " << poolstr << " snap " << snapname << " already exists";
} else {
- pp->add_snap(snapname.c_str(), ceph_clock_now(g_ceph_context));
+ pp->add_snap(snapname.c_str(), ceph_clock_now());
pp->set_snap_epoch(pending_inc.epoch);
ss << "created pool " << poolstr << " snap " << snapname;
}
switch (m->op) {
case POOL_OP_CREATE_SNAP:
if (!pp.snap_exists(m->name.c_str())) {
- pp.add_snap(m->name.c_str(), ceph_clock_now(g_ceph_context));
+ pp.add_snap(m->name.c_str(), ceph_clock_now());
dout(10) << "create snap in pool " << m->pool << " " << m->name << " seq " << pp.get_snap_epoch() << dendl;
changed = true;
}
stats.last_deep_scrub_stamp = ps.last_deep_scrub_stamp;
stats.last_clean_scrub_stamp = ps.last_clean_scrub_stamp;
} else {
- utime_t now = ceph_clock_now(g_ceph_context);
+ utime_t now = ceph_clock_now();
stats.last_fresh = now;
stats.last_active = now;
stats.last_change = now;
handle_osd_timeouts();
if (!pg_map.pg_sum_deltas.empty()) {
- utime_t age = ceph_clock_now(g_ceph_context) - pg_map.stamp;
+ utime_t age = ceph_clock_now() - pg_map.stamp;
if (age > 2 * g_conf->mon_delta_reset_interval) {
dout(10) << " clearing pg_map delta (" << age << " > " << g_conf->mon_delta_reset_interval << " seconds old)" << dendl;
pg_map.clear_delta();
ceph::unordered_map<uint64_t,pair<pool_stat_t,utime_t> >::iterator it;
for (it = pg_map.per_pool_sum_delta.begin();
it != pg_map.per_pool_sum_delta.end(); ) {
- utime_t age = ceph_clock_now(g_ceph_context) - it->second.second;
+ utime_t age = ceph_clock_now() - it->second.second;
if (age > 2*g_conf->mon_delta_reset_interval) {
dout(10) << " clearing pg_map delta for pool " << it->first
<< " (" << age << " > " << g_conf->mon_delta_reset_interval
if (!mon->is_leader())
return;
- utime_t now(ceph_clock_now(g_ceph_context));
+ utime_t now(ceph_clock_now());
utime_t timeo(g_conf->mon_osd_report_timeout, 0);
if (now - mon->get_leader_since() < timeo) {
// We haven't been the leader for long enough to consider OSD timeouts
version_t version = pending_inc.version;
dout(10) << __func__ << " v " << version << dendl;
assert(get_last_committed() + 1 == version);
- pending_inc.stamp = ceph_clock_now(g_ceph_context);
+ pending_inc.stamp = ceph_clock_now();
uint64_t features = mon->get_quorum_con_features();
return false;
}
- last_osd_report[from] = ceph_clock_now(g_ceph_context);
+ last_osd_report[from] = ceph_clock_now();
if (!stats->get_orig_source().is_osd() ||
!mon->osdmon()->osdmap.is_up(from) ||
dout(10) << " marking pg " << pgid << " stale (acting_primary "
<< stat->acting_primary << ")" << dendl;
stat->state |= PG_STATE_STALE;
- stat->last_unstale = ceph_clock_now(g_ceph_context);
+ stat->last_unstale = ceph_clock_now();
}
}
if (since == utime_t()) {
ss << " since forever";
} else {
- utime_t dur = ceph_clock_now(g_ceph_context) - since;
+ utime_t dur = ceph_clock_now() - since;
ss << " for " << dur;
}
ss << ", current state " << pg_state_string(p->second.state)
return;
int pgs_count = 0;
- const utime_t now = ceph_clock_now(nullptr);
+ const utime_t now = ceph_clock_now();
for (const auto& pg_entry : pg_stats) {
const auto& pg_stat(pg_entry.second);
const utime_t time_since_ls = now - pg_stat.last_scrub_stamp;
}
ceph::unordered_map<pg_t, pg_stat_t> stuck_pgs;
- utime_t now(ceph_clock_now(g_ceph_context));
+ utime_t now(ceph_clock_now());
utime_t cutoff = now - utime_t(g_conf->mon_pg_stuck_threshold, 0);
uint64_t num_inactive_pgs = 0;
}
}
- utime_t now(ceph_clock_now(g_ceph_context));
+ utime_t now(ceph_clock_now());
utime_t cutoff = now - utime_t(threshold, 0);
if (!f) {
if (*p == mon->rank) continue;
MMonPaxos *collect = new MMonPaxos(mon->get_epoch(), MMonPaxos::OP_COLLECT,
- ceph_clock_now(g_ceph_context));
+ ceph_clock_now());
collect->last_committed = last_committed;
collect->first_committed = first_committed;
collect->pn = accepted_pn;
// reply
MMonPaxos *last = new MMonPaxos(mon->get_epoch(), MMonPaxos::OP_LAST,
- ceph_clock_now(g_ceph_context));
+ ceph_clock_now());
last->last_committed = last_committed;
last->first_committed = first_committed;
logger->inc(l_paxos_collect);
logger->inc(l_paxos_collect_keys, t->get_keys());
logger->inc(l_paxos_collect_bytes, t->get_bytes());
- utime_t start = ceph_clock_now(NULL);
+ utime_t start = ceph_clock_now();
get_store()->apply_transaction(t);
- utime_t end = ceph_clock_now(NULL);
+ utime_t end = ceph_clock_now();
logger->tinc(l_paxos_collect_latency, end - start);
} else {
// don't accept!
logger->inc(l_paxos_store_state);
logger->inc(l_paxos_store_state_bytes, t->get_bytes());
logger->inc(l_paxos_store_state_keys, t->get_keys());
- utime_t start = ceph_clock_now(NULL);
+ utime_t start = ceph_clock_now();
get_store()->apply_transaction(t);
- utime_t end = ceph_clock_now(NULL);
+ utime_t end = ceph_clock_now();
logger->tinc(l_paxos_store_state_latency, end - start);
// refresh first_committed; this txn may have trimmed.
dout(10) << " sending commit to mon." << p->first << dendl;
MMonPaxos *commit = new MMonPaxos(mon->get_epoch(),
MMonPaxos::OP_COMMIT,
- ceph_clock_now(g_ceph_context));
+ ceph_clock_now());
share_state(commit, peer_first_committed[p->first], p->second);
mon->messenger->send_message(commit, mon->monmap->get_inst(p->first));
}
logger->inc(l_paxos_begin);
logger->inc(l_paxos_begin_keys, t->get_keys());
logger->inc(l_paxos_begin_bytes, t->get_bytes());
- utime_t start = ceph_clock_now(NULL);
+ utime_t start = ceph_clock_now();
get_store()->apply_transaction(t);
- utime_t end = ceph_clock_now(NULL);
+ utime_t end = ceph_clock_now();
logger->tinc(l_paxos_begin_latency, end - start);
assert(g_conf->paxos_kill_at != 3);
dout(10) << " sending begin to mon." << *p << dendl;
MMonPaxos *begin = new MMonPaxos(mon->get_epoch(), MMonPaxos::OP_BEGIN,
- ceph_clock_now(g_ceph_context));
+ ceph_clock_now());
begin->values[last_committed+1] = new_value;
begin->last_committed = last_committed;
begin->pn = accepted_pn;
*_dout << dendl;
logger->inc(l_paxos_begin_bytes, t->get_bytes());
- utime_t start = ceph_clock_now(NULL);
+ utime_t start = ceph_clock_now();
get_store()->apply_transaction(t);
- utime_t end = ceph_clock_now(NULL);
+ utime_t end = ceph_clock_now();
logger->tinc(l_paxos_begin_latency, end - start);
assert(g_conf->paxos_kill_at != 5);
// reply
MMonPaxos *accept = new MMonPaxos(mon->get_epoch(), MMonPaxos::OP_ACCEPT,
- ceph_clock_now(g_ceph_context));
+ ceph_clock_now());
accept->pn = accepted_pn;
accept->last_committed = last_committed;
begin->get_connection()->send_message(accept);
logger->inc(l_paxos_commit);
logger->inc(l_paxos_commit_keys, t->get_keys());
logger->inc(l_paxos_commit_bytes, t->get_bytes());
- commit_start_stamp = ceph_clock_now(NULL);
+ commit_start_stamp = ceph_clock_now();
get_store()->queue_transaction(t, new C_Committed(this));
void Paxos::commit_finish()
{
dout(20) << __func__ << " " << (last_committed+1) << dendl;
- utime_t end = ceph_clock_now(NULL);
+ utime_t end = ceph_clock_now();
logger->tinc(l_paxos_commit_latency, end - commit_start_stamp);
assert(g_conf->paxos_kill_at != 8);
lease_expire = utime_t(); // cancel lease
last_committed++;
- last_commit_time = ceph_clock_now(NULL);
+ last_commit_time = ceph_clock_now();
// refresh first_committed; this txn may have trimmed.
first_committed = get_store()->get(get_name(), "first_committed");
dout(10) << " sending commit to mon." << *p << dendl;
MMonPaxos *commit = new MMonPaxos(mon->get_epoch(), MMonPaxos::OP_COMMIT,
- ceph_clock_now(g_ceph_context));
+ ceph_clock_now());
commit->values[last_committed] = new_value;
commit->pn = accepted_pn;
commit->last_committed = last_committed;
assert(mon->is_leader());
//assert(is_active());
- lease_expire = ceph_clock_now(g_ceph_context);
+ lease_expire = ceph_clock_now();
lease_expire += g_conf->mon_lease;
acked_lease.clear();
acked_lease.insert(mon->rank);
if (*p == mon->rank) continue;
MMonPaxos *lease = new MMonPaxos(mon->get_epoch(), MMonPaxos::OP_LEASE,
- ceph_clock_now(g_ceph_context));
+ ceph_clock_now());
lease->last_committed = last_committed;
lease->lease_timestamp = lease_expire;
lease->first_committed = first_committed;
void Paxos::warn_on_future_time(utime_t t, entity_name_t from)
{
- utime_t now = ceph_clock_now(g_ceph_context);
+ utime_t now = ceph_clock_now();
if (t > now) {
utime_t diff = t - now;
if (diff > g_conf->mon_clock_drift_allowed) {
pow(g_conf->mon_clock_drift_warn_backoff, clock_drift_warned)) {
mon->clog->warn() << "message from " << from << " was stamped " << diff
<< "s in the future, clocks not synchronized";
- last_clock_drift_warn = ceph_clock_now(g_ceph_context);
+ last_clock_drift_warn = ceph_clock_now();
++clock_drift_warned;
}
}
{
bool need_bootstrap = false;
- utime_t start = ceph_clock_now(NULL);
+ utime_t start = ceph_clock_now();
// make sure we have the latest state loaded up
mon->refresh_from_paxos(&need_bootstrap);
- utime_t end = ceph_clock_now(NULL);
+ utime_t end = ceph_clock_now();
logger->inc(l_paxos_refresh);
logger->tinc(l_paxos_refresh_latency, end - start);
if (lease_expire < lease->lease_timestamp) {
lease_expire = lease->lease_timestamp;
- utime_t now = ceph_clock_now(g_ceph_context);
+ utime_t now = ceph_clock_now();
if (lease_expire < now) {
utime_t diff = now - lease_expire;
derr << "lease_expire from " << lease->get_source_inst() << " is " << diff << " seconds in the past; mons are probably laggy (or possibly clocks are too skewed)" << dendl;
// ack
MMonPaxos *ack = new MMonPaxos(mon->get_epoch(), MMonPaxos::OP_LEASE_ACK,
- ceph_clock_now(g_ceph_context));
+ ceph_clock_now());
ack->last_committed = last_committed;
ack->first_committed = first_committed;
- ack->lease_timestamp = ceph_clock_now(g_ceph_context);
+ ack->lease_timestamp = ceph_clock_now();
lease->get_connection()->send_message(ack);
// (re)set timeout event.
*_dout << dendl;
logger->inc(l_paxos_new_pn);
- utime_t start = ceph_clock_now(NULL);
+ utime_t start = ceph_clock_now();
get_store()->apply_transaction(t);
- utime_t end = ceph_clock_now(NULL);
+ utime_t end = ceph_clock_now();
logger->tinc(l_paxos_new_pn_latency, end - start);
dout(10) << "get_new_proposal_number = " << last_pn << dendl;
(is_active() || is_updating() || is_writing()) &&
last_committed > 0 && is_lease_valid(); // must have a value alone, or have lease
dout(5) << __func__ << " = " << (int)ret
- << " - now=" << ceph_clock_now(g_ceph_context)
+ << " - now=" << ceph_clock_now()
<< " lease_expire=" << lease_expire
<< " has v" << v << " lc " << last_committed
<< dendl;
bool Paxos::is_lease_valid()
{
return ((mon->get_quorum().size() == 1)
- || (ceph_clock_now(g_ceph_context) < lease_expire));
+ || (ceph_clock_now() < lease_expire));
}
// -- WRITE --
C_Proposal(Context *c, bufferlist& proposal_bl) :
proposer_context(c),
bl(proposal_bl),
- proposed(false),
- proposal_time(ceph_clock_now(NULL))
+ proposed(false),
+ proposal_time(ceph_clock_now())
{ }
void finish(int r) {
{
string proposed = (p.proposed ? "proposed" : "unproposed");
out << " " << proposed
- << " queued " << (ceph_clock_now(NULL) - p.proposal_time)
+ << " queued " << (ceph_clock_now() - p.proposal_time)
<< " tx dump:\n";
MonitorDBStore::TransactionRef t(new MonitorDBStore::Transaction);
bufferlist::iterator p_it = p.bl.begin();
if (get_last_committed() <= 1)
delay = 0.0;
else {
- utime_t now = ceph_clock_now(g_ceph_context);
+ utime_t now = ceph_clock_now();
if ((now - paxos->last_commit_time) > g_conf->paxos_propose_interval)
delay = (double)g_conf->paxos_min_wait;
else
osd_epoch(0),
auth_handler(NULL),
proxy_con(NULL), proxy_tid(0) {
- time_established = ceph_clock_now(g_ceph_context);
+ time_established = ceph_clock_now();
}
~MonSession() {
//generic_dout(0) << "~MonSession " << this << dendl;
void DispatchQueue::local_delivery(Message *m, int priority)
{
- m->set_recv_stamp(ceph_clock_now(msgr->cct));
+ m->set_recv_stamp(ceph_clock_now());
Mutex::Locker l(local_delivery_lock);
if (local_messages.empty())
local_delivery_cond.Signal();
* of one reference to it.
*/
void ms_fast_dispatch(Message *m) {
- m->set_dispatch_stamp(ceph_clock_now(cct));
+ m->set_dispatch_stamp(ceph_clock_now());
for (list<Dispatcher*>::iterator p = fast_dispatchers.begin();
p != fast_dispatchers.end();
++p) {
* one reference to it.
*/
void ms_deliver_dispatch(Message *m) {
- m->set_dispatch_stamp(ceph_clock_now(cct));
+ m->set_dispatch_stamp(ceph_clock_now());
for (list<Dispatcher*>::iterator p = dispatchers.begin();
p != dispatchers.end();
++p) {
if (tag == CEPH_MSGR_TAG_KEEPALIVE) {
ldout(async_msgr->cct, 20) << __func__ << " got KEEPALIVE" << dendl;
- set_last_keepalive(ceph_clock_now(NULL));
+ set_last_keepalive(ceph_clock_now());
} else if (tag == CEPH_MSGR_TAG_KEEPALIVE2) {
state = STATE_OPEN_KEEPALIVE2;
} else if (tag == CEPH_MSGR_TAG_KEEPALIVE2_ACK) {
_append_keepalive_or_ack(true, &kp_t);
write_lock.unlock();
ldout(async_msgr->cct, 20) << __func__ << " got KEEPALIVE2 " << kp_t << dendl;
- set_last_keepalive(ceph_clock_now(NULL));
+ set_last_keepalive(ceph_clock_now());
need_dispatch_writer = true;
state = STATE_OPEN;
break;
front.clear();
middle.clear();
data.clear();
- recv_stamp = ceph_clock_now(async_msgr->cct);
+ recv_stamp = ceph_clock_now();
current_header = header;
state = STATE_OPEN_MESSAGE_THROTTLE_MESSAGE;
break;
}
}
- throttle_stamp = ceph_clock_now(msgr->cct);
+ throttle_stamp = ceph_clock_now();
state = STATE_OPEN_MESSAGE_READ_FRONT;
break;
}
message->set_recv_stamp(recv_stamp);
message->set_throttle_stamp(throttle_stamp);
- message->set_recv_complete_stamp(ceph_clock_now(async_msgr->cct));
+ message->set_recv_complete_stamp(ceph_clock_now());
// check received seq#. if it is old, drop the message.
// note that incoming messages may skip ahead. this is convenient for the client
utime_t release = delay_queue.front().first;
m = delay_queue.front().second;
string delay_msg_type = msgr->cct->_conf->ms_inject_delay_msg_type;
- utime_t now = ceph_clock_now(msgr->cct);
+ utime_t now = ceph_clock_now();
if ((release > now &&
(delay_msg_type.empty() || m->get_type_name() == delay_msg_type))) {
utime_t t = release - now;
outcoming_bl.append((char*)&ts, sizeof(ts));
} else if (has_feature(CEPH_FEATURE_MSGR_KEEPALIVE2)) {
struct ceph_timespec ts;
- utime_t t = ceph_clock_now(async_msgr->cct);
+ utime_t t = ceph_clock_now();
t.encode_timeval(&ts);
outcoming_bl.append(CEPH_MSGR_TAG_KEEPALIVE2);
outcoming_bl.append((char*)&ts, sizeof(ts));
// This is a newly created frag_id
if (frag.mem_size == 0) {
_frags_age.push_back(frag_id);
- frag.rx_time = ceph_clock_now(cct);
+ frag.rx_time = ceph_clock_now();
}
auto added_size = frag.merge(h, offset, std::move(p));
_frag_mem += added_size;
if (_frags.empty()) {
return;
}
- auto now = ceph_clock_now(cct);
+ auto now = ceph_clock_now();
for (auto it = _frags_age.begin(); it != _frags_age.end();) {
auto frag_id = *it;
auto& frag = _frags[frag_id];
frag_timefd.construct(center->create_time_event(tp.to_nsec() / 1000, frag_handler));
}
void frag_arm() {
- auto now = ceph_clock_now(cct);
+ auto now = ceph_clock_now();
frag_timefd.construct(center->create_time_event(now.to_nsec() / 1000, frag_handler));
}
Message *m = delay_queue.front().second;
string delay_msg_type = pipe->msgr->cct->_conf->ms_inject_delay_msg_type;
if (!flush_count &&
- (release > ceph_clock_now(pipe->msgr->cct) &&
+ (release > ceph_clock_now() &&
(delay_msg_type.empty() || m->get_type_name() == delay_msg_type))) {
lgeneric_subdout(pipe->msgr->cct, ms, 10) << *pipe << "DelayedDelivery::entry sleeping on delay_cond until " << release << dendl;
delay_cond.WaitUntil(delay_lock, release);
backoff.set_from_double(conf->ms_initial_backoff);
} else {
ldout(msgr->cct,10) << "fault waiting " << backoff << dendl;
- cond.WaitInterval(msgr->cct, pipe_lock, backoff);
+ cond.WaitInterval(pipe_lock, backoff);
backoff += backoff;
if (backoff > conf->ms_max_backoff)
backoff.set_from_double(conf->ms_max_backoff);
if (tag == CEPH_MSGR_TAG_KEEPALIVE) {
ldout(msgr->cct,2) << "reader got KEEPALIVE" << dendl;
pipe_lock.Lock();
- connection_state->set_last_keepalive(ceph_clock_now(NULL));
+ connection_state->set_last_keepalive(ceph_clock_now());
continue;
}
if (tag == CEPH_MSGR_TAG_KEEPALIVE2) {
keepalive_ack_stamp = utime_t(t);
ldout(msgr->cct,2) << "reader got KEEPALIVE2 " << keepalive_ack_stamp
<< dendl;
- connection_state->set_last_keepalive(ceph_clock_now(NULL));
+ connection_state->set_last_keepalive(ceph_clock_now());
cond.Signal();
}
continue;
if (connection_state->has_feature(CEPH_FEATURE_MSGR_KEEPALIVE2)) {
pipe_lock.Unlock();
rc = write_keepalive2(CEPH_MSGR_TAG_KEEPALIVE2,
- ceph_clock_now(msgr->cct));
+ ceph_clock_now());
} else {
pipe_lock.Unlock();
rc = write_keepalive();
unsigned data_len, data_off;
int aborted;
Message *message;
- utime_t recv_stamp = ceph_clock_now(msgr->cct);
+ utime_t recv_stamp = ceph_clock_now();
if (policy.throttler_messages) {
ldout(msgr->cct,10) << "reader wants " << 1 << " message from policy throttler "
in_q->dispatch_throttler.get(message_size);
}
- utime_t throttle_stamp = ceph_clock_now(msgr->cct);
+ utime_t throttle_stamp = ceph_clock_now();
// read front
front_len = header.front_len;
message->set_recv_stamp(recv_stamp);
message->set_throttle_stamp(throttle_stamp);
- message->set_recv_complete_stamp(ceph_clock_now(msgr->cct));
+ message->set_recv_complete_stamp(ceph_clock_now());
*pm = message;
return 0;
xcmd->get_bl_ref().append(CEPH_MSGR_TAG_KEEPALIVE2_ACK);
xcmd->get_bl_ref().append((char*)&ts, sizeof(ts));
} else if (has_feature(CEPH_FEATURE_MSGR_KEEPALIVE2)) {
- utime_t t = ceph_clock_now(msgr->cct);
+ utime_t t = ceph_clock_now();
t.encode_timeval(&ts);
xcmd->get_bl_ref().append(CEPH_MSGR_TAG_KEEPALIVE2);
xcmd->get_bl_ref().append((char*)&ts, sizeof(ts));
ceph_msg_footer footer;
buffer::list payload, middle, data;
- const utime_t recv_stamp = ceph_clock_now(msgr->cct);
+ const utime_t recv_stamp = ceph_clock_now();
ldout(msgr->cct,4) << __func__ << " " << "msg_seq.size()=" << msg_seq.size() <<
dendl;
/* update timestamps */
m->set_recv_stamp(recv_stamp);
- m->set_recv_complete_stamp(ceph_clock_now(msgr->cct));
+ m->set_recv_complete_stamp(ceph_clock_now());
m->set_seq(header.seq);
/* MP-SAFE */
case CEPH_MSGR_TAG_KEEPALIVE:
ldout(msgr->cct, 20) << __func__ << " got KEEPALIVE" << dendl;
- set_last_keepalive(ceph_clock_now(nullptr));
+ set_last_keepalive(ceph_clock_now());
break;
case CEPH_MSGR_TAG_KEEPALIVE2:
utime_t kp_t = utime_t(*t);
ldout(msgr->cct, 20) << __func__ << " got KEEPALIVE2 with timestamp" << kp_t << dendl;
send_keepalive_or_ack(true, &kp_t);
- set_last_keepalive(ceph_clock_now(nullptr));
+ set_last_keepalive(ceph_clock_now());
}
break;
void XioLoopbackConnection::send_keepalive()
{
- utime_t t = ceph_clock_now(nullptr);
+ utime_t t = ceph_clock_now();
set_last_keepalive(t);
set_last_keepalive_ack(t);
}
}
}
if (must_dirty) {
- h->file->fnode.mtime = ceph_clock_now(NULL);
+ h->file->fnode.mtime = ceph_clock_now();
assert(h->file->fnode.ino >= 1);
if (h->file->dirty_seq == 0) {
h->file->dirty_seq = log_seq + 1;
// NOTE: this is safe to call without a lock, as long as our reference is
// stable.
dout(10) << __func__ << " " << h << dendl;
- utime_t start = ceph_clock_now(NULL);
+ utime_t start = ceph_clock_now();
for (auto p : h->iocv) {
if (p) {
p->aio_wait();
}
}
- utime_t end = ceph_clock_now(NULL);
+ utime_t end = ceph_clock_now();
utime_t dur = end - start;
dout(10) << __func__ << " " << h << " done in " << dur << dendl;
}
return;
}
dout(10) << __func__ << dendl;
- utime_t start = ceph_clock_now(NULL);
+ utime_t start = ceph_clock_now();
vector<interval_set<uint64_t>> to_release(pending_release.size());
to_release.swap(pending_release);
_flush_and_sync_log(l);
}
}
- utime_t end = ceph_clock_now(NULL);
+ utime_t end = ceph_clock_now();
utime_t dur = end - start;
dout(10) << __func__ << " done in " << dur << dendl;
}
}
assert(file->fnode.ino > 1);
- file->fnode.mtime = ceph_clock_now(NULL);
+ file->fnode.mtime = ceph_clock_now();
file->fnode.prefer_bdev = BlueFS::BDEV_DB;
if (dirname.length() > 5) {
// the "db.slow" and "db.wal" directory names are hard-coded at
<< " not found, creating" << dendl;
file = new File;
file->fnode.ino = ++ino_last;
- file->fnode.mtime = ceph_clock_now(NULL);
+ file->fnode.mtime = ceph_clock_now();
file_map[ino_last] = file;
dir->file_map[filename] = file;
++file->refs;
++store->mempool_seq;
utime_t wait;
wait += g_conf->bluestore_cache_trim_interval;
- cond.WaitInterval(g_ceph_context, lock, wait);
+ cond.WaitInterval(lock, wait);
}
stop = false;
return NULL;
if (create) {
label.osd_uuid = fsid;
label.size = size;
- label.btime = ceph_clock_now(NULL);
+ label.btime = ceph_clock_now();
label.description = desc;
int r = _write_bdev_label(path, label);
if (r < 0)
uint64_t num_sharded_objects = 0;
uint64_t num_object_shards = 0;
- utime_t start = ceph_clock_now(NULL);
+ utime_t start = ceph_clock_now();
int r = _open_path();
if (r < 0)
<< num_shared_blobs << " shared."
<< dendl;
- utime_t duration = ceph_clock_now(NULL) - start;
+ utime_t duration = ceph_clock_now() - start;
dout(1) << __func__ << " finish with " << errors << " errors in "
<< duration << " seconds" << dendl;
return errors;
{
int bad;
uint64_t bad_csum;
- utime_t start = ceph_clock_now(g_ceph_context);
+ utime_t start = ceph_clock_now();
int r = blob->verify_csum(blob_xoffset, bl, &bad, &bad_csum);
if (r < 0) {
if (r == -1) {
derr << __func__ << " failed with exit code: " << cpp_strerror(r) << dendl;
}
}
- logger->tinc(l_bluestore_csum_lat, ceph_clock_now(g_ceph_context) - start);
+ logger->tinc(l_bluestore_csum_lat, ceph_clock_now() - start);
return r;
}
int BlueStore::_decompress(bufferlist& source, bufferlist* result)
{
int r = 0;
- utime_t start = ceph_clock_now(g_ceph_context);
+ utime_t start = ceph_clock_now();
bufferlist::iterator i = source.begin();
bluestore_compression_header_t chdr;
::decode(chdr, i);
r = -EIO;
}
}
- logger->tinc(l_bluestore_decompress_lat, ceph_clock_now(g_ceph_context) - start);
+ logger->tinc(l_bluestore_decompress_lat, ceph_clock_now() - start);
return r;
}
}
unsigned n = txc->osr->parent->shard_hint.hash_to_shard(m_finisher_num);
if (txc->oncommit) {
- logger->tinc(l_bluestore_commit_lat, ceph_clock_now(g_ceph_context) - txc->start);
+ logger->tinc(l_bluestore_commit_lat, ceph_clock_now() - txc->start);
finishers[n]->queue(txc->oncommit);
txc->oncommit = NULL;
}
kv_committing.swap(kv_queue);
kv_submitting.swap(kv_queue_unsubmitted);
wal_cleaning.swap(wal_cleanup_queue);
- utime_t start = ceph_clock_now(NULL);
+ utime_t start = ceph_clock_now();
l.unlock();
dout(30) << __func__ << " committing txc " << kv_committing << dendl;
dout(10) << __func__ << " blobid_max now " << blobid_max << dendl;
}
- utime_t finish = ceph_clock_now(NULL);
+ utime_t finish = ceph_clock_now();
utime_t dur = finish - start;
dout(20) << __func__ << " committed " << kv_committing.size()
<< " cleaned " << wal_cleaning.size()
bool compressed = false;
if(c && wi.blob_length > min_alloc_size) {
- utime_t start = ceph_clock_now(g_ceph_context);
+ utime_t start = ceph_clock_now();
// compress
assert(b_off == 0);
logger->inc(l_bluestore_compress_rejected_count);
}
logger->tinc(l_bluestore_compress_lat,
- ceph_clock_now(g_ceph_context) - start);
+ ceph_clock_now() - start);
}
if (!compressed) {
dblob.set_flag(bluestore_blob_t::FLAG_MUTABLE);
}
void log_state_latency(PerfCounters *logger, int state) {
- utime_t lat, now = ceph_clock_now(g_ceph_context);
+ utime_t lat, now = ceph_clock_now();
lat = now - last_stamp;
logger->tinc(state, lat);
last_stamp = now;
onreadable_sync(NULL),
wal_txn(NULL),
ioc(this),
- start(ceph_clock_now(g_ceph_context)) {
+ start(ceph_clock_now()) {
last_stamp = start;
}
~TransContext() {
g_ceph_context->_log->flush();
_exit(1);
}
- utime_t start = ceph_clock_now(NULL);
+ utime_t start = ceph_clock_now();
int r = ::fdatasync(fd_direct);
- utime_t end = ceph_clock_now(NULL);
+ utime_t end = ceph_clock_now();
utime_t dur = end - start;
if (r < 0) {
r = -errno;
}
}
if (g_conf->bdev_debug_aio) {
- utime_t now = ceph_clock_now(NULL);
+ utime_t now = ceph_clock_now();
std::lock_guard<std::mutex> l(debug_queue_lock);
if (debug_oldest) {
if (debug_stall_since == utime_t()) {
void FileJournal::queue_completions_thru(uint64_t seq)
{
assert(finisher_lock.is_locked());
- utime_t now = ceph_clock_now(g_ceph_context);
+ utime_t now = ceph_clock_now();
list<completion_item> items;
batch_pop_completions(items);
list<completion_item>::iterator it = items.begin();
<< (hbp.length() ? " + header":"")
<< dendl;
- utime_t from = ceph_clock_now(g_ceph_context);
+ utime_t from = ceph_clock_now();
// entry
off64_t pos = write_pos;
#endif
}
- utime_t lat = ceph_clock_now(g_ceph_context) - from;
+ utime_t lat = ceph_clock_now() - from;
dout(20) << "do_write latency " << lat << dendl;
write_lock.Lock();
completions.push_back(
completion_item(
- seq, oncommit, ceph_clock_now(g_ceph_context), osd_op));
+ seq, oncommit, ceph_clock_now(), osd_op));
if (writeq.empty())
writeq_cond.Signal();
writeq.push_back(write_item(seq, e, orig_len, osd_op));
}
Op *o = new Op;
- o->start = ceph_clock_now(g_ceph_context);
+ o->start = ceph_clock_now();
o->tls = std::move(tls);
o->onreadable = onreadable;
o->onreadable_sync = onreadable_sync;
list<Context*> to_queue;
Op *o = osr->dequeue(&to_queue);
- utime_t lat = ceph_clock_now(g_ceph_context);
+ utime_t lat = ceph_clock_now();
lat -= o->start;
dout(10) << "_finish_op " << o << " seq " << o->op << " " << *osr << "/" << osr->parent << " lat " << lat << dendl;
return 0;
}
- utime_t start = ceph_clock_now(g_ceph_context);
+ utime_t start = ceph_clock_now();
// set up the sequencer
OpSequencer *osr;
assert(posr);
ceph_abort();
}
submit_manager.op_submit_finish(op_num);
- utime_t end = ceph_clock_now(g_ceph_context);
+ utime_t end = ceph_clock_now();
logger->tinc(l_filestore_queue_transaction_latency_avg, end - start);
return 0;
}
if (ondisk)
apply_manager.add_waiter(op_num, ondisk);
submit_manager.op_submit_finish(op_num);
- utime_t end = ceph_clock_now(g_ceph_context);
+ utime_t end = ceph_clock_now();
logger->tinc(l_filestore_queue_transaction_latency_avg, end - start);
return 0;
}
submit_manager.op_submit_finish(op);
apply_manager.op_apply_finish(op);
- utime_t end = ceph_clock_now(g_ceph_context);
+ utime_t end = ceph_clock_now();
logger->tinc(l_filestore_queue_transaction_latency_avg, end - start);
return r;
}
utime_t min_interval;
min_interval.set_from_double(m_filestore_min_sync_interval);
- utime_t startwait = ceph_clock_now(g_ceph_context);
+ utime_t startwait = ceph_clock_now();
if (!force_sync) {
dout(20) << "sync_entry waiting for max_interval " << max_interval << dendl;
- sync_cond.WaitInterval(g_ceph_context, lock, max_interval);
+ sync_cond.WaitInterval(lock, max_interval);
} else {
dout(20) << "sync_entry not waiting, force_sync set" << dendl;
}
break;
} else {
// wait for at least the min interval
- utime_t woke = ceph_clock_now(g_ceph_context);
+ utime_t woke = ceph_clock_now();
woke -= startwait;
dout(20) << "sync_entry woke after " << woke << dendl;
if (woke < min_interval) {
t -= woke;
dout(20) << "sync_entry waiting for another " << t
<< " to reach min interval " << min_interval << dendl;
- sync_cond.WaitInterval(g_ceph_context, lock, t);
+ sync_cond.WaitInterval(lock, t);
}
}
op_tp.pause();
if (apply_manager.commit_start()) {
- utime_t start = ceph_clock_now(g_ceph_context);
+ utime_t start = ceph_clock_now();
uint64_t cp = apply_manager.get_committing_seq();
sync_entry_timeo_lock.Lock();
}
}
- utime_t done = ceph_clock_now(g_ceph_context);
+ utime_t done = ceph_clock_now();
utime_t lat = done - start;
utime_t dur = done - startwait;
dout(10) << "sync_entry commit took " << lat << ", interval was " << dur << dendl;
} else {
dout(20) << __func__ << " committing " << kv_queue.size() << dendl;
kv_committing.swap(kv_queue);
- utime_t start = ceph_clock_now(NULL);
+ utime_t start = ceph_clock_now();
l.unlock();
dout(30) << __func__ << " committing txc " << kv_committing << dendl;
}
int r = db->submit_transaction_sync(t);
assert(r == 0);
- utime_t finish = ceph_clock_now(NULL);
+ utime_t finish = ceph_clock_now();
utime_t dur = finish - start;
dout(20) << __func__ << " committed " << kv_committing.size()
<< " in " << dur << dendl;
return "???";
}
- void log_state_latency(PerfCounters *logger, int state) {
- utime_t lat, now = ceph_clock_now(g_ceph_context);
+ void log_state_latency(PerfCounters *logger, int state) {
+ utime_t lat, now = ceph_clock_now();
lat = now - start;
logger->tinc(state, lat);
start = now;
oncommit(NULL),
onreadable(NULL),
onreadable_sync(NULL),
- start(ceph_clock_now(g_ceph_context)){
+ start(ceph_clock_now()){
//cout << "txc new " << this << std::endl;
}
~TransContext() {
agent_stop_flag(false),
agent_timer_lock("OSDService::agent_timer_lock"),
agent_timer(osd->client_messenger->cct, agent_timer_lock),
- last_recalibrate(ceph_clock_now(NULL)),
+ last_recalibrate(ceph_clock_now()),
promote_max_objects(0),
promote_max_bytes(0),
objecter(new Objecter(osd->client_messenger->cct, osd->objecter_messenger, osd->monc, NULL, 0, 0)),
void OSDService::promote_throttle_recalibrate()
{
- utime_t now = ceph_clock_now(NULL);
+ utime_t now = ceph_clock_now();
double dur = now - last_recalibrate;
last_recalibrate = now;
unsigned prob = promote_probability_millis;
Mutex::Locker l(full_status_lock);
enum s_names new_state;
- time_t now = ceph_clock_gettime(NULL);
+ time_t now = ceph_clock_gettime();
// We base ratio on kb_avail rather than kb_used because they can
// differ significantly e.g. on btrfs volumes with a large number of
osdmap->get_epoch(),
true // request ack
));
- utime_t now = ceph_clock_now(cct);
+ utime_t now = ceph_clock_now();
utime_t timeout;
timeout.set_from_double(now + cct->_conf->osd_mon_shutdown_timeout);
- while ((ceph_clock_now(cct) < timeout) &&
+ while ((ceph_clock_now() < timeout) &&
(get_state() != STOPPING)) {
is_stopping_cond.WaitUntil(is_stopping_lock, timeout);
}
PGSnapTrim(pg->get_osdmap()->get_epoch()),
cct->_conf->osd_snap_trim_cost,
cct->_conf->osd_snap_trim_priority,
- ceph_clock_now(cct),
+ ceph_clock_now(),
entity_inst_t())));
}
mgrc.set_pgstats_cb([this](){
RWLock::RLocker l(map_lock);
- utime_t had_for = ceph_clock_now(cct) - had_map_since;
+ utime_t had_for = ceph_clock_now() - had_map_since;
osd_stat_t cur_stat = service.get_osd_stat();
cur_stat.os_perf_stat = store->get_cur_stats();
assert(osd_lock.is_locked());
if (is_waiting_for_healthy()) {
- utime_t now = ceph_clock_now(cct);
+ utime_t now = ceph_clock_now();
if (last_heartbeat_resample == utime_t()) {
last_heartbeat_resample = now;
heartbeat_set_peers_need_update();
i->second.last_rx_front = m->stamp;
}
- utime_t cutoff = ceph_clock_now(cct);
+ utime_t cutoff = ceph_clock_now();
cutoff -= cct->_conf->osd_heartbeat_grace;
if (i->second.is_healthy(cutoff)) {
// Cancel false reports
utime_t w;
w.set_from_double(wait);
dout(30) << "heartbeat_entry sleeping for " << wait << dendl;
- heartbeat_cond.WaitInterval(cct, heartbeat_lock, w);
+ heartbeat_cond.WaitInterval(heartbeat_lock, w);
if (is_stopping())
return;
dout(30) << "heartbeat_entry woke up" << dendl;
void OSD::heartbeat_check()
{
assert(heartbeat_lock.is_locked());
- utime_t now = ceph_clock_now(cct);
+ utime_t now = ceph_clock_now();
// check for heartbeat replies (move me elsewhere?)
utime_t cutoff = now;
dout(5) << "heartbeat: " << service.get_osd_stat() << dendl;
- utime_t now = ceph_clock_now(cct);
+ utime_t now = ceph_clock_now();
// send heartbeats
for (map<int,HeartbeatInfo>::iterator i = heartbeat_peers.begin();
// mon report?
bool reset = false;
bool report = false;
- utime_t now = ceph_clock_now(cct);
+ utime_t now = ceph_clock_now();
pg_stat_queue_lock.Lock();
double backoff = stats_ack_timeout / g_conf->osd_mon_ack_timeout;
double adjusted_min = cct->_conf->osd_mon_report_interval_min * backoff;
double scrub_max_interval = pool_scrub_max_interval > 0 ?
pool_scrub_max_interval : g_conf->osd_scrub_max_interval;
// Instead of marking must_scrub force a schedule scrub
- utime_t stamp = ceph_clock_now(service->cct);
+ utime_t stamp = ceph_clock_now();
stamp -= scrub_max_interval;
stamp -= 100.0; // push back last scrub more for good measure
pg->info.history.last_scrub_stamp = stamp;
map_lock.get_read();
Mutex::Locker l2(mon_report_lock);
- utime_t now = ceph_clock_now(NULL);
+ utime_t now = ceph_clock_now();
last_mon_report = now;
// resend everything, it's a new session
if (is_waiting_for_healthy()) {
Mutex::Locker l(heartbeat_lock);
- utime_t cutoff = ceph_clock_now(cct);
+ utime_t cutoff = ceph_clock_now();
cutoff -= cct->_conf->osd_heartbeat_grace;
int num = 0, up = 0;
for (map<int,HeartbeatInfo>::iterator p = heartbeat_peers.begin();
assert(map_lock.is_locked());
assert(mon_report_lock.is_locked());
Mutex::Locker l(heartbeat_lock);
- utime_t now = ceph_clock_now(cct);
+ utime_t now = ceph_clock_now();
while (!failure_queue.empty()) {
int osd = failure_queue.begin()->first;
entity_inst_t i = osdmap->get_inst(osd);
}
if (last_pg_stats_ack == utime_t() || !outstanding_pg_stats.empty()) {
- last_pg_stats_ack = ceph_clock_now(cct);
+ last_pg_stats_ack = ceph_clock_now();
}
outstanding_pg_stats.insert(tid);
dout(20) << __func__ << " updates pending: " << outstanding_pg_stats << dendl;
pg_stat_queue_lock.Lock();
- last_pg_stats_ack = ceph_clock_now(cct);
+ last_pg_stats_ack = ceph_clock_now();
// decay timeout slowly (analogous to TCP)
stats_ack_timeout =
{
dout(10) << "flush_pg_stats" << dendl;
osd_lock.Unlock();
- utime_t now = ceph_clock_now(cct);
+ utime_t now = ceph_clock_now();
map_lock.get_read();
mon_report_lock.Lock();
send_pg_stats(now);
}
}
- utime_t start = ceph_clock_now(cct);
+ utime_t start = ceph_clock_now();
for (int64_t pos = 0; pos < count; pos += bsize) {
char nm[30];
unsigned offset = 0;
waiter.wait();
}
}
- utime_t end = ceph_clock_now(cct);
+ utime_t end = ceph_clock_now();
// clean up
store->queue_transaction(osr.get(), std::move(cleanupt), NULL);
return;
}
- utime_t now = ceph_clock_now(cct);
+ utime_t now = ceph_clock_now();
bool time_permit = scrub_time_permit(now);
bool load_is_low = scrub_load_below_threshold();
dout(20) << "sched_scrub load_is_low=" << (int)load_is_low << dendl;
}
}
- had_map_since = ceph_clock_now(cct);
+ had_map_since = ceph_clock_now();
epoch_t _bind_epoch = service.get_bind_epoch();
if (osdmap->is_up(whoami) &&
do_restart = true;
//add markdown log
- utime_t now = ceph_clock_now(g_ceph_context);
+ utime_t now = ceph_clock_now();
utime_t grace = utime_t(g_conf->osd_max_markdown_period, 0);
osd_markdown_log.push_back(now);
//clear all out-of-date log
{
assert(osd_lock.is_locked());
- utime_t now = ceph_clock_now(cct);
+ utime_t now = ceph_clock_now();
list< pair<spg_t,utime_t> > pgids;
replay_queue_lock.Lock();
while (!replay_queue.empty() &&
if (available_pushes)
*available_pushes = max - recovery_ops_active - recovery_ops_reserved;
- if (ceph_clock_now(cct) < defer_recovery_until) {
+ if (ceph_clock_now() < defer_recovery_until) {
dout(15) << "_recover_now defer until " << defer_recovery_until << dendl;
return false;
}
void OSD::enqueue_op(PGRef pg, OpRequestRef& op)
{
- utime_t latency = ceph_clock_now(cct) - op->get_req()->get_recv_stamp();
+ utime_t latency = ceph_clock_now() - op->get_req()->get_recv_stamp();
dout(15) << "enqueue_op " << op << " prio " << op->get_req()->get_priority()
<< " cost " << op->get_req()->get_cost()
<< " latency " << latency
osd->cct->get_heartbeat_map()->reset_timeout(hb,
osd->cct->_conf->threadpool_default_timeout, 0);
sdata->sdata_lock.Lock();
- sdata->sdata_cond.WaitInterval(osd->cct, sdata->sdata_lock,
+ sdata->sdata_cond.WaitInterval(sdata->sdata_lock,
utime_t(osd->cct->_conf->threadpool_empty_queue_max_wait, 0));
sdata->sdata_lock.Unlock();
sdata->sdata_op_ordering_lock.Lock();
PGRef pg, OpRequestRef op,
ThreadPool::TPHandle &handle)
{
- utime_t now = ceph_clock_now(cct);
+ utime_t now = ceph_clock_now();
op->set_dequeued_time(now);
utime_t latency = now - op->get_req()->get_recv_stamp();
dout(10) << "dequeue_op " << op << " prio " << op->get_req()->get_priority()
PGScrub(pg->get_osdmap()->get_epoch()),
cct->_conf->osd_scrub_cost,
pg->get_scrub_priority(),
- ceph_clock_now(cct),
+ ceph_clock_now(),
entity_inst_t())));
}
PGRecovery(p.first, reserved_pushes),
cct->_conf->osd_recovery_cost,
cct->_conf->osd_recovery_priority,
- ceph_clock_now(cct),
+ ceph_clock_now(),
entity_inst_t()));
op_wq.queue(to_queue);
}
_maybe_queue_recovery();
}
void defer_recovery(float defer_for) {
- defer_recovery_until = ceph_clock_now(cct);
+ defer_recovery_until = ceph_clock_now();
defer_recovery_until += defer_for;
}
void pause_recovery() {
<< dendl;
epoch = e;
set_fsid(fsid);
- created = modified = ceph_clock_now(cct);
+ created = modified = ceph_clock_now();
if (nosd >= 0) {
set_max_osd(nosd);
is_primary() &&
pool.info.crash_replay_interval > 0 &&
may_need_replay(get_osdmap())) {
- replay_until = ceph_clock_now(cct);
+ replay_until = ceph_clock_now();
replay_until += pool.info.crash_replay_interval;
dout(10) << "activate starting replay interval for " << pool.info.crash_replay_interval
<< " until " << replay_until << dendl;
else
state_clear(PG_STATE_INCONSISTENT);
- utime_t now = ceph_clock_now(cct);
+ utime_t now = ceph_clock_now();
if (info.stats.state != state) {
info.stats.last_change = now;
if ((state & PG_STATE_ACTIVE) &&
if (deep_scrub_interval <= 0) {
deep_scrub_interval = cct->_conf->osd_deep_scrub_interval;
}
- bool time_for_deep = ceph_clock_now(cct) >=
+ bool time_for_deep = ceph_clock_now() >=
info.history.last_deep_scrub_stamp + deep_scrub_interval;
bool deep_coin_flip = false;
utime_t reg_stamp;
if (scrubber.must_scrub ||
(info.stats.stats_invalid && g_conf->osd_scrub_invalid_stats)) {
- reg_stamp = ceph_clock_now(cct);
+ reg_stamp = ceph_clock_now();
} else {
reg_stamp = info.history.last_scrub_stamp;
}
// finish up
unreg_next_scrub();
- utime_t now = ceph_clock_now(cct);
+ utime_t now = ceph_clock_now();
info.history.last_scrub = info.last_update;
info.history.last_scrub_stamp = now;
if (scrubber.deep) {
{
context< RecoveryMachine >().log_exit(state_name, enter_time);
PG *pg = context< RecoveryMachine >().pg;
- utime_t dur = ceph_clock_now(pg->cct) - enter_time;
+ utime_t dur = ceph_clock_now() - enter_time;
pg->osd->recoverystate_perf->tinc(rs_initial_latency, dur);
}
{
context< RecoveryMachine >().log_exit(state_name, enter_time);
PG *pg = context< RecoveryMachine >().pg;
- utime_t dur = ceph_clock_now(pg->cct) - enter_time;
+ utime_t dur = ceph_clock_now() - enter_time;
pg->osd->recoverystate_perf->tinc(rs_started_latency, dur);
}
{
context< RecoveryMachine >().log_exit(state_name, enter_time);
PG *pg = context< RecoveryMachine >().pg;
- utime_t dur = ceph_clock_now(pg->cct) - enter_time;
+ utime_t dur = ceph_clock_now() - enter_time;
pg->osd->recoverystate_perf->tinc(rs_reset_latency, dur);
}
{
context< RecoveryMachine >().log_exit(state_name, enter_time);
PG *pg = context< RecoveryMachine >().pg;
- utime_t dur = ceph_clock_now(pg->cct) - enter_time;
+ utime_t dur = ceph_clock_now() - enter_time;
pg->osd->recoverystate_perf->tinc(rs_start_latency, dur);
}
context< RecoveryMachine >().log_exit(state_name, enter_time);
PG *pg = context< RecoveryMachine >().pg;
pg->want_acting.clear();
- utime_t dur = ceph_clock_now(pg->cct) - enter_time;
+ utime_t dur = ceph_clock_now() - enter_time;
pg->osd->recoverystate_perf->tinc(rs_primary_latency, dur);
pg->clear_primary_state();
pg->state_clear(PG_STATE_CREATING);
pg->state_clear(PG_STATE_PEERING);
pg->clear_probe_targets();
- utime_t dur = ceph_clock_now(pg->cct) - enter_time;
+ utime_t dur = ceph_clock_now() - enter_time;
pg->osd->recoverystate_perf->tinc(rs_peering_latency, dur);
}
pg->backfill_reserved = false;
pg->backfill_reserving = false;
pg->state_clear(PG_STATE_BACKFILL);
- utime_t dur = ceph_clock_now(pg->cct) - enter_time;
+ utime_t dur = ceph_clock_now() - enter_time;
pg->osd->recoverystate_perf->tinc(rs_backfilling_latency, dur);
}
{
context< RecoveryMachine >().log_exit(state_name, enter_time);
PG *pg = context< RecoveryMachine >().pg;
- utime_t dur = ceph_clock_now(pg->cct) - enter_time;
+ utime_t dur = ceph_clock_now() - enter_time;
pg->osd->recoverystate_perf->tinc(rs_waitremotebackfillreserved_latency, dur);
}
{
context< RecoveryMachine >().log_exit(state_name, enter_time);
PG *pg = context< RecoveryMachine >().pg;
- utime_t dur = ceph_clock_now(pg->cct) - enter_time;
+ utime_t dur = ceph_clock_now() - enter_time;
pg->osd->recoverystate_perf->tinc(rs_waitlocalbackfillreserved_latency, dur);
}
{
context< RecoveryMachine >().log_exit(state_name, enter_time);
PG *pg = context< RecoveryMachine >().pg;
- utime_t dur = ceph_clock_now(pg->cct) - enter_time;
+ utime_t dur = ceph_clock_now() - enter_time;
pg->osd->recoverystate_perf->tinc(rs_notbackfilling_latency, dur);
}
{
context< RecoveryMachine >().log_exit(state_name, enter_time);
PG *pg = context< RecoveryMachine >().pg;
- utime_t dur = ceph_clock_now(pg->cct) - enter_time;
+ utime_t dur = ceph_clock_now() - enter_time;
pg->osd->recoverystate_perf->tinc(rs_repnotrecovering_latency, dur);
}
{
context< RecoveryMachine >().log_exit(state_name, enter_time);
PG *pg = context< RecoveryMachine >().pg;
- utime_t dur = ceph_clock_now(pg->cct) - enter_time;
+ utime_t dur = ceph_clock_now() - enter_time;
pg->osd->recoverystate_perf->tinc(rs_repwaitrecoveryreserved_latency, dur);
}
{
context< RecoveryMachine >().log_exit(state_name, enter_time);
PG *pg = context< RecoveryMachine >().pg;
- utime_t dur = ceph_clock_now(pg->cct) - enter_time;
+ utime_t dur = ceph_clock_now() - enter_time;
pg->osd->recoverystate_perf->tinc(rs_repwaitbackfillreserved_latency, dur);
}
context< RecoveryMachine >().log_exit(state_name, enter_time);
PG *pg = context< RecoveryMachine >().pg;
pg->osd->remote_reserver.cancel_reservation(pg->info.pgid);
- utime_t dur = ceph_clock_now(pg->cct) - enter_time;
+ utime_t dur = ceph_clock_now() - enter_time;
pg->osd->recoverystate_perf->tinc(rs_reprecovering_latency, dur);
}
{
context< RecoveryMachine >().log_exit(state_name, enter_time);
PG *pg = context< RecoveryMachine >().pg;
- utime_t dur = ceph_clock_now(pg->cct) - enter_time;
+ utime_t dur = ceph_clock_now() - enter_time;
pg->osd->recoverystate_perf->tinc(rs_activating_latency, dur);
}
{
context< RecoveryMachine >().log_exit(state_name, enter_time);
PG *pg = context< RecoveryMachine >().pg;
- utime_t dur = ceph_clock_now(pg->cct) - enter_time;
+ utime_t dur = ceph_clock_now() - enter_time;
pg->osd->recoverystate_perf->tinc(rs_waitlocalrecoveryreserved_latency, dur);
}
{
context< RecoveryMachine >().log_exit(state_name, enter_time);
PG *pg = context< RecoveryMachine >().pg;
- utime_t dur = ceph_clock_now(pg->cct) - enter_time;
+ utime_t dur = ceph_clock_now() - enter_time;
pg->osd->recoverystate_perf->tinc(rs_waitremoterecoveryreserved_latency, dur);
}
{
context< RecoveryMachine >().log_exit(state_name, enter_time);
PG *pg = context< RecoveryMachine >().pg;
- utime_t dur = ceph_clock_now(pg->cct) - enter_time;
+ utime_t dur = ceph_clock_now() - enter_time;
pg->osd->recoverystate_perf->tinc(rs_recovering_latency, dur);
}
{
context< RecoveryMachine >().log_exit(state_name, enter_time);
PG *pg = context< RecoveryMachine >().pg;
- utime_t dur = ceph_clock_now(pg->cct) - enter_time;
+ utime_t dur = ceph_clock_now() - enter_time;
pg->osd->recoverystate_perf->tinc(rs_recovered_latency, dur);
}
context< RecoveryMachine >().log_exit(state_name, enter_time);
PG *pg = context< RecoveryMachine >().pg;
pg->state_clear(PG_STATE_CLEAN);
- utime_t dur = ceph_clock_now(pg->cct) - enter_time;
+ utime_t dur = ceph_clock_now() - enter_time;
pg->osd->recoverystate_perf->tinc(rs_clean_latency, dur);
}
pg->state_clear(PG_STATE_BACKFILL_WAIT);
pg->state_clear(PG_STATE_RECOVERY_WAIT);
pg->state_clear(PG_STATE_REPLAY);
- utime_t dur = ceph_clock_now(pg->cct) - enter_time;
+ utime_t dur = ceph_clock_now() - enter_time;
pg->osd->recoverystate_perf->tinc(rs_active_latency, dur);
pg->agent_stop();
}
context< RecoveryMachine >().log_exit(state_name, enter_time);
PG *pg = context< RecoveryMachine >().pg;
pg->osd->remote_reserver.cancel_reservation(pg->info.pgid);
- utime_t dur = ceph_clock_now(pg->cct) - enter_time;
+ utime_t dur = ceph_clock_now() - enter_time;
pg->osd->recoverystate_perf->tinc(rs_replicaactive_latency, dur);
}
{
context< RecoveryMachine >().log_exit(state_name, enter_time);
PG *pg = context< RecoveryMachine >().pg;
- utime_t dur = ceph_clock_now(pg->cct) - enter_time;
+ utime_t dur = ceph_clock_now() - enter_time;
pg->osd->recoverystate_perf->tinc(rs_stray_latency, dur);
}
{
context< RecoveryMachine >().log_exit(state_name, enter_time);
PG *pg = context< RecoveryMachine >().pg;
- utime_t dur = ceph_clock_now(pg->cct) - enter_time;
+ utime_t dur = ceph_clock_now() - enter_time;
pg->osd->recoverystate_perf->tinc(rs_getinfo_latency, dur);
pg->blocked_by.clear();
pg->publish_stats_to_osd();
{
context< RecoveryMachine >().log_exit(state_name, enter_time);
PG *pg = context< RecoveryMachine >().pg;
- utime_t dur = ceph_clock_now(pg->cct) - enter_time;
+ utime_t dur = ceph_clock_now() - enter_time;
pg->osd->recoverystate_perf->tinc(rs_getlog_latency, dur);
pg->blocked_by.clear();
pg->publish_stats_to_osd();
{
context< RecoveryMachine >().log_exit(state_name, enter_time);
PG *pg = context< RecoveryMachine >().pg;
- utime_t dur = ceph_clock_now(pg->cct) - enter_time;
+ utime_t dur = ceph_clock_now() - enter_time;
pg->osd->recoverystate_perf->tinc(rs_waitactingchange_latency, dur);
}
PG *pg = context< RecoveryMachine >().pg;
pg->state_clear(PG_STATE_DOWN);
- utime_t dur = ceph_clock_now(pg->cct) - enter_time;
+ utime_t dur = ceph_clock_now() - enter_time;
pg->osd->recoverystate_perf->tinc(rs_down_latency, dur);
pg->blocked_by.clear();
PG *pg = context< RecoveryMachine >().pg;
pg->state_clear(PG_STATE_INCOMPLETE);
- utime_t dur = ceph_clock_now(pg->cct) - enter_time;
+ utime_t dur = ceph_clock_now() - enter_time;
pg->osd->recoverystate_perf->tinc(rs_incomplete_latency, dur);
pg->blocked_by.clear();
{
context< RecoveryMachine >().log_exit(state_name, enter_time);
PG *pg = context< RecoveryMachine >().pg;
- utime_t dur = ceph_clock_now(pg->cct) - enter_time;
+ utime_t dur = ceph_clock_now() - enter_time;
pg->osd->recoverystate_perf->tinc(rs_getmissing_latency, dur);
pg->blocked_by.clear();
pg->publish_stats_to_osd();
{
context< RecoveryMachine >().log_exit(state_name, enter_time);
PG *pg = context< RecoveryMachine >().pg;
- utime_t dur = ceph_clock_now(pg->cct) - enter_time;
+ utime_t dur = ceph_clock_now() - enter_time;
pg->osd->recoverystate_perf->tinc(rs_waitupthru_latency, dur);
}
void PG::RecoveryState::RecoveryMachine::log_exit(const char *state_name, utime_t enter_time)
{
- utime_t dur = ceph_clock_now(pg->cct) - enter_time;
+ utime_t dur = ceph_clock_now() - enter_time;
dout(5) << "exit " << state_name << " " << dur << " " << event_count << " " << event_time << dendl;
- pg->osd->pg_recovery_stats.log_exit(state_name, ceph_clock_now(pg->cct) - enter_time,
+ pg->osd->pg_recovery_stats.log_exit(state_name, ceph_clock_now() - enter_time,
event_count, event_time);
event_count = 0;
event_time = utime_t();
} else {
rctx = *new_ctx;
}
- rctx->start_time = ceph_clock_now(pg->cct);
+ rctx->start_time = ceph_clock_now();
}
}
void PG::RecoveryState::end_handle() {
if (rctx) {
- utime_t dur = ceph_clock_now(pg->cct) - rctx->start_time;
+ utime_t dur = ceph_clock_now() - rctx->start_time;
machine.event_time += dur;
}
const char *get_state_name() { return state_name; }
NamedState(CephContext *cct_, const char *state_name_)
: state_name(state_name_),
- enter_time(ceph_clock_now(cct_)) {}
+ enter_time(ceph_clock_now()) {}
virtual ~NamedState() {}
};
map<hobject_t,ScrubMap::object, hobject_t::BitwiseComparator>::const_iterator i;
map<pg_shard_t, ScrubMap *, hobject_t::BitwiseComparator>::const_iterator j;
set<hobject_t, hobject_t::BitwiseComparator> master_set;
- utime_t now = ceph_clock_now(NULL);
+ utime_t now = ceph_clock_now();
// Construct master set
for (j = maps.begin(); j != maps.end(); ++j) {
ctx->src_obc.swap(src_obc);
execute_ctx(ctx);
- utime_t prepare_latency = ceph_clock_now(cct);
+ utime_t prepare_latency = ceph_clock_now();
prepare_latency -= op->get_dequeued_time();
osd->logger->tinc(l_osd_op_prepare_lat, prepare_latency);
if (op->may_read() && op->may_write()) {
C_ProxyRead(PrimaryLogPG *p, hobject_t o, epoch_t lpr,
const PrimaryLogPG::ProxyReadOpRef& prd)
: pg(p), oid(o), last_peering_reset(lpr),
- tid(0), prdop(prd), start(ceph_clock_now(NULL))
+ tid(0), prdop(prd), start(ceph_clock_now())
{}
void finish(int r) {
if (prdop->canceled)
}
if (last_peering_reset == pg->get_last_peering_reset()) {
pg->finish_proxy_read(oid, tid, r);
- pg->osd->logger->tinc(l_osd_tier_r_lat, ceph_clock_now(NULL) - start);
+ pg->osd->logger->tinc(l_osd_tier_r_lat, ceph_clock_now() - start);
}
pg->unlock();
}
PromoteCallback(ObjectContextRef obc_, PrimaryLogPG *pg_)
: obc(obc_),
pg(pg_),
- start(ceph_clock_now(NULL)) {}
+ start(ceph_clock_now()) {}
virtual void finish(PrimaryLogPG::CopyCallbackResults results) {
PrimaryLogPG::CopyResults *results_data = results.get<1>();
int r = results.get<0>();
pg->finish_promote(r, results_data, obc);
- pg->osd->logger->tinc(l_osd_tier_promote_lat, ceph_clock_now(NULL) - start);
+ pg->osd->logger->tinc(l_osd_tier_promote_lat, ceph_clock_now() - start);
}
};
// _prior_ to being committed; it will not get set with
// writeahead journaling, for instance.
if (ctx->readable_stamp == utime_t())
- ctx->readable_stamp = ceph_clock_now(cct);
+ ctx->readable_stamp = ceph_clock_now();
});
ctx->register_on_commit(
[m, ctx, this](){
OpRequestRef op = ctx->op;
MOSDOp *m = static_cast<MOSDOp*>(op->get_req());
- utime_t now = ceph_clock_now(cct);
+ utime_t now = ceph_clock_now();
utime_t latency = now;
latency -= ctx->op->get_req()->get_recv_stamp();
utime_t process_latency = now;
break;
}
dout(10) << " found existing watch " << w << " by " << entity << dendl;
- p->second->got_ping(ceph_clock_now(NULL));
+ p->second->got_ping(ceph_clock_now());
result = 0;
} else if (op.watch.op == CEPH_OSD_WATCH_OP_UNWATCH) {
map<pair<uint64_t, entity_name_t>, watch_info_t>::iterator oi_iter =
dout(20) << __func__ << " " << soid << " " << ctx
<< " op " << pg_log_entry_t::get_op_name(log_op_type)
<< dendl;
- utime_t now = ceph_clock_now(cct);
+ utime_t now = ceph_clock_now();
// snapset
bufferlist bss;
utime_t start;
C_Flush(PrimaryLogPG *p, hobject_t o, epoch_t lpr)
: pg(p), oid(o), last_peering_reset(lpr),
- tid(0), start(ceph_clock_now(NULL))
+ tid(0), start(ceph_clock_now())
{}
void finish(int r) {
if (r == -ECANCELED)
pg->lock();
if (last_peering_reset == pg->get_last_peering_reset()) {
pg->finish_flush(oid, tid, r);
- pg->osd->logger->tinc(l_osd_tier_flush_lat, ceph_clock_now(NULL) - start);
+ pg->osd->logger->tinc(l_osd_tier_flush_lat, ceph_clock_now() - start);
}
pg->unlock();
}
RepGather *repop = new RepGather(
ctx, rep_tid, info.last_complete, false);
- repop->start = ceph_clock_now(cct);
+ repop->start = ceph_clock_now();
repop_queue.push_back(&repop->queue_item);
repop->get();
r);
repop->v = version;
- repop->start = ceph_clock_now(cct);
+ repop->start = ceph_clock_now();
repop_queue.push_back(&repop->queue_item);
osd_reqid_t reqid(osd->get_cluster_msgr_name(), 0, rep_tid);
OpContextUPtr ctx(new OpContext(OpRequestRef(), reqid, ops, obc, this));
ctx->op_t.reset(new PGTransaction());
- ctx->mtime = ceph_clock_now(g_ceph_context);
+ ctx->mtime = ceph_clock_now();
return ctx;
}
mempool::osd::list<pg_log_entry_t> log_entries;
- utime_t mtime = ceph_clock_now(cct);
+ utime_t mtime = ceph_clock_now();
map<hobject_t, pg_missing_item, hobject_t::ComparatorWithDefault>::const_iterator m =
missing_loc.get_needs_recovery().begin();
map<hobject_t, pg_missing_item, hobject_t::ComparatorWithDefault>::const_iterator mend =
OpContextUPtr ctx = simple_opc_create(obc);
ctx->at_version = get_next_version();
ctx->updated_hset_history = info.hit_set;
- utime_t now = ceph_clock_now(cct);
+ utime_t now = ceph_clock_now();
ctx->mtime = now;
hit_set_trim(ctx, 0);
simple_opc_submit(std::move(ctx));
void PrimaryLogPG::hit_set_create()
{
- utime_t now = ceph_clock_now(NULL);
+ utime_t now = ceph_clock_now();
// make a copy of the params to modify
HitSet::Params params(pool.info.hit_set_params);
bufferlist bl;
unsigned max = pool.info.hit_set_count;
- utime_t now = ceph_clock_now(cct);
+ utime_t now = ceph_clock_now();
hobject_t oid;
time_t flush_time = 0;
return false;
}
- utime_t now = ceph_clock_now(NULL);
+ utime_t now = ceph_clock_now();
utime_t ob_local_mtime;
if (obc->obs.oi.local_mtime != utime_t()) {
ob_local_mtime = obc->obs.oi.local_mtime;
if (agent_state->evict_mode != TierAgentState::EVICT_MODE_FULL) {
// is this object old than cache_min_evict_age?
- utime_t now = ceph_clock_now(NULL);
+ utime_t now = ceph_clock_now();
utime_t ob_local_mtime;
if (obc->obs.oi.local_mtime != utime_t()) {
ob_local_mtime = obc->obs.oi.local_mtime;
PerfCounters *logger,
OpRequestRef op, int subop)
{
- utime_t now = ceph_clock_now(g_ceph_context);
+ utime_t now = ceph_clock_now();
utime_t latency = now;
latency -= op->get_req()->get_recv_stamp();
}
}
if (will_ping) {
- last_ping = ceph_clock_now(NULL);
+ last_ping = ceph_clock_now();
register_cb();
} else {
unregister_cb();
assert(in_progress_notifies.find(notif->notify_id) ==
in_progress_notifies.end());
if (will_ping) {
- utime_t cutoff = ceph_clock_now(NULL);
+ utime_t cutoff = ceph_clock_now();
cutoff.sec_ref() -= timeout;
if (last_ping < cutoff) {
dout(10) << __func__ << " " << notif->notify_id
assert(last_written.write_pos >= last_written.expire_pos);
assert(last_written.expire_pos >= last_written.trimmed_pos);
- last_wrote_head = ceph::real_clock::now(cct);
+ last_wrote_head = ceph::real_clock::now();
bufferlist bl;
::encode(last_written, bl);
object_t oid = file_object_t(ino, 0);
object_locator_t oloc(pg_pool);
- objecter->write_full(oid, oloc, snapc, bl, ceph::real_clock::now(cct), 0,
+ objecter->write_full(oid, oloc, snapc, bl, ceph::real_clock::now(), 0,
NULL, wrap_finisher(new C_WriteHead(
this, last_written,
wrap_finisher(oncommit))),
// calc latency?
if (logger) {
- ceph::timespan lat = ceph::real_clock::now(cct) - stamp;
+ ceph::timespan lat = ceph::real_clock::now() - stamp;
logger->tinc(logger_key_lat, lat);
}
// submit write for anything pending
// flush _start_ pos to _finish_flush
- ceph::real_time now = ceph::real_clock::now(cct);
+ ceph::real_time now = ceph::real_clock::now();
SnapContext snapc;
Context *onsafe = new C_Flush(this, flush_pos, now); // on COMMIT
}
filer.write(ino, &layout, snapc,
- flush_pos, len, write_bl, ceph::real_clock::now(cct),
+ flush_pos, len, write_bl, ceph::real_clock::now(),
0,
NULL, wrap_finisher(onsafe), write_iohint);
// write head?
if (last_wrote_head + seconds(cct->_conf->journaler_write_head_interval)
- < ceph::real_clock::now(cct)) {
+ < ceph::real_clock::now()) {
_write_head();
}
}
Context *c = wrap_finisher(new C_Journaler_Prezero(this, prezeroing_pos,
len));
filer.zero(ino, &layout, snapc, prezeroing_pos, len,
- ceph::real_clock::now(cct), 0, NULL, c);
+ ceph::real_clock::now(), 0, NULL, c);
prezeroing_pos += len;
}
}
uint64_t first = trimmed_pos / get_layout_period();
uint64_t num = (write_pos - trimmed_pos) / get_layout_period() + 2;
filer.purge_range(ino, &layout, SnapContext(), first, num,
- ceph::real_clock::now(cct), 0,
+ ceph::real_clock::now(), 0,
wrap_finisher(new C_EraseFinish(
this, wrap_finisher(completion))));
if (data_result == 0) {
// Async delete the journal header
- filer.purge_range(ino, &layout, SnapContext(), 0, 1, ceph::real_clock::now(cct),
+ filer.purge_range(ino, &layout, SnapContext(), 0, 1,
+ ceph::real_clock::now(),
0, wrap_finisher(completion));
} else {
lderr(cct) << "Failed to delete journal " << ino << " data: "
uint64_t num = (trim_to - trimming_pos) / period;
SnapContext snapc;
filer.purge_range(ino, &layout, snapc, first, num,
- ceph::real_clock::now(cct), 0,
+ ceph::real_clock::now(), 0,
wrap_finisher(new C_Trim(this, trim_to)));
trimming_pos = trim_to;
}
if (flusher_stop)
break;
- flusher_cond.WaitInterval(cct, lock, seconds(1));
+ flusher_cond.WaitInterval(lock, seconds(1));
}
/* Wait for reads to finish. This is only possible if handling
// XXX move RGWLibIO and timing setup into process_request
#if 0 /* XXX */
- utime_t tm = ceph_clock_now(NULL);
+ utime_t tm = ceph_clock_now();
#endif
RGWLibIO io_ctx;
// XXX move RGWLibIO and timing setup into process_request
#if 0 /* XXX */
- utime_t tm = ceph_clock_now(NULL);
+ utime_t tm = ceph_clock_now();
#endif
RGWLibIO io_ctx;
cerr << "ERROR: marker was not specified" <<std::endl;
return EINVAL;
}
- utime_t time = ceph_clock_now(NULL);
+ utime_t time = ceph_clock_now();
if (!date.empty()) {
ret = parse_date_str(date, time);
if (ret < 0) {
int interval = cct->_conf->rgw_data_log_window * 3 / 4;
lock.Lock();
- cond.WaitInterval(cct, lock, utime_t(interval, 0));
+ cond.WaitInterval(lock, utime_t(interval, 0));
lock.Unlock();
} while (!log->going_down());
system_request = false;
- time = ceph_clock_now(cct);
+ time = ceph_clock_now();
perm_mask = 0;
bucket_instance_shard_id = -1;
content_length = 0;
if (history.size() > (size_t)max_history) {
history.pop_front();
}
- timestamp = ceph_clock_now(cct);
+ timestamp = ceph_clock_now();
return status;
}
protected:
int _send_request() {
Mutex::Locker l(*lock);
- return cond->WaitInterval(cct, *lock, interval);
+ return cond->WaitInterval(*lock, interval);
}
public:
RGWAsyncWait(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, CephContext *_cct,
done:
dispose_processor(processor);
perfcounter->tinc(l_rgw_put_lat,
- (ceph_clock_now(s->cct) - s->time));
+ (ceph_clock_now() - s->time));
return op_ret;
} /* exec_finish */
int RGWGC::process(int index, int max_secs)
{
rados::cls::lock::Lock l(gc_index_lock_name);
- utime_t end = ceph_clock_now(g_ceph_context);
+ utime_t end = ceph_clock_now();
std::list<string> remove_tags;
/* max_secs should be greater than zero. We don't want a zero max_secs
std::list<cls_rgw_obj>::iterator liter;
cls_rgw_obj_chain& chain = info.chain;
- utime_t now = ceph_clock_now(g_ceph_context);
+ utime_t now = ceph_clock_now();
if (now >= end)
goto done;
void *RGWGC::GCWorker::entry() {
do {
- utime_t start = ceph_clock_now(cct);
+ utime_t start = ceph_clock_now();
dout(2) << "garbage collection: start" << dendl;
int r = gc->process();
if (r < 0) {
if (gc->going_down())
break;
- utime_t end = ceph_clock_now(cct);
+ utime_t end = ceph_clock_now();
end -= start;
int secs = cct->_conf->rgw_gc_processor_period;
secs -= end.sec();
lock.Lock();
- cond.WaitInterval(cct, lock, utime_t(secs, 0));
+ cond.WaitInterval(lock, utime_t(secs, 0));
lock.Unlock();
} while (!gc->going_down());
}
lock.Lock();
- cond.WaitInterval(cct, lock,
- utime_t(cct->_conf->rgw_keystone_revocation_interval, 0));
+ cond.WaitInterval(lock,
+ utime_t(cct->_conf->rgw_keystone_revocation_interval, 0));
lock.Unlock();
} while (!cache->going_down());
const std::string& get_user_name() const {return user.name;};
bool has_role(const string& r) const;
bool expired() {
- uint64_t now = ceph_clock_now(NULL).sec();
+ uint64_t now = ceph_clock_now().sec();
return (now >= (uint64_t)get_expires());
}
int parse(CephContext *cct,
void *RGWLC::LCWorker::entry() {
do {
- utime_t start = ceph_clock_now(cct);
+ utime_t start = ceph_clock_now();
if (should_work(start)) {
dout(5) << "life cycle: start" << dendl;
int r = lc->process();
if (lc->going_down())
break;
- utime_t end = ceph_clock_now(cct);
+ utime_t end = ceph_clock_now();
int secs = schedule_next_start_time(start, end);
time_t next_time = end + secs;
char buf[30];
dout(5) << "schedule life cycle next start time: " << nt <<dendl;
lock.Lock();
- cond.WaitInterval(cct, lock, utime_t(secs, 0));
+ cond.WaitInterval(lock, utime_t(secs, 0));
lock.Unlock();
} while (!lc->going_down());
{
struct tm bdt;
time_t begin_of_day;
- utime_t now = ceph_clock_now(cct);
+ utime_t now = ceph_clock_now();
localtime_r(&start_date, &bdt);
if (cct->_conf->rgw_lc_debug_interval > 0) {
vector<RGWObjEnt>::iterator obj_iter;
int pos = 0;
- utime_t now = ceph_clock_now(cct);
+ utime_t now = ceph_clock_now();
for (obj_iter = objs.begin(); obj_iter != objs.end(); obj_iter++) {
bool prefix_match = false;
int match_days = 0;
vector<RGWObjEnt>::iterator obj_iter;
int days = prefix_iter->second;
- utime_t now = ceph_clock_now(cct);
+ utime_t now = ceph_clock_now();
for (obj_iter = objs.begin(); obj_iter != objs.end(); obj_iter++) {
if (obj_has_expired(now - ceph::real_clock::to_time_t((*obj_iter).mtime), days)) {
{
rados::cls::lock::Lock l(lc_index_lock_name);
do {
- utime_t now = ceph_clock_now(cct);
+ utime_t now = ceph_clock_now();
pair<string, int > entry;//string = bucket_name:bucket_id ,int = LC_BUCKET_STATUS
if (max_lock_secs <= 0)
return -EAGAIN;
RGWLoadGenRequestEnv env;
- utime_t tm = ceph_clock_now(NULL);
+ utime_t tm = ceph_clock_now();
env.port = 80;
env.content_length = req->content_length;
timer.init();
Mutex::Locker l(timer_lock);
set_timer();
- utime_t ts = ceph_clock_now(cct);
+ utime_t ts = ceph_clock_now();
recalc_round_timestamp(ts);
}
entry.add(op_name, data);
- utime_t ts = ceph_clock_now(s->cct);
+ utime_t ts = ceph_clock_now();
usage_logger->insert(ts, entry);
}
uint64_t bytes_received = ACCOUNTING_IO(s)->get_bytes_received();
entry.time = s->time;
- entry.total_time = ceph_clock_now(s->cct) - s->time;
+ entry.total_time = ceph_clock_now() - s->time;
entry.bytes_sent = bytes_sent;
entry.bytes_received = bytes_received;
if (s->err.http_ret) {
int num_entries = cct->_conf->rgw_objexp_chunk_size;
int max_secs = cct->_conf->rgw_objexp_gc_interval;
- utime_t end = ceph_clock_now(cct);
+ utime_t end = ceph_clock_now();
end += max_secs;
rados::cls::lock::Lock l(objexp_lock_name);
trim_chunk(shard, last_run, round_start, marker, out_marker);
}
- utime_t now = ceph_clock_now(g_ceph_context);
+ utime_t now = ceph_clock_now();
if (now >= end) {
done = false;
break;
void *RGWObjectExpirer::OEWorker::entry() {
utime_t last_run;
do {
- utime_t start = ceph_clock_now(cct);
+ utime_t start = ceph_clock_now();
ldout(cct, 2) << "object expiration: start" << dendl;
if (oe->inspect_all_shards(last_run, start)) {
/* All shards have been processed properly. Next time we can start
if (oe->going_down())
break;
- utime_t end = ceph_clock_now(cct);
+ utime_t end = ceph_clock_now();
end -= start;
int secs = cct->_conf->rgw_objexp_gc_interval;
secs -= end.sec();
lock.Lock();
- cond.WaitInterval(cct, lock, utime_t(secs, 0));
+ cond.WaitInterval(lock, utime_t(secs, 0));
lock.Unlock();
} while (!oe->going_down());
bool is_truncated;
vector<RGWObjEnt> objs;
- utime_t start_time = ceph_clock_now(cct);
+ utime_t start_time = ceph_clock_now();
RGWRados::Bucket target(store, *pbucket_info);
RGWRados::Bucket::List list_op(&target);
}
perfcounter->tinc(l_rgw_get_lat,
- (ceph_clock_now(cct) - start_time));
+ (ceph_clock_now() - start_time));
if (found_start && !handled_end) {
len_count += end_ofs - start_ofs;
}
handled_end = found_end;
- start_time = ceph_clock_now(cct);
+ start_time = ceph_clock_now();
}
} while (is_truncated);
return 0;
}
- utime_t start_time = ceph_clock_now(cct);
+ utime_t start_time = ceph_clock_now();
map<uint64_t, rgw_slo_part>::iterator iter = slo_parts.upper_bound(ofs);
if (iter != slo_parts.begin()) {
}
perfcounter->tinc(l_rgw_get_lat,
- (ceph_clock_now(cct) - start_time));
+ (ceph_clock_now() - start_time));
if (found_start) {
if (cb) {
}
}
- start_time = ceph_clock_now(cct);
+ start_time = ceph_clock_now();
}
return 0;
int RGWGetObj::get_data_cb(bufferlist& bl, off_t bl_ofs, off_t bl_len)
{
/* garbage collection related handling */
- utime_t start_time = ceph_clock_now(s->cct);
+ utime_t start_time = ceph_clock_now();
if (start_time > gc_invalidate_time) {
int r = store->defer_gc(s->obj_ctx, obj);
if (r < 0) {
return false;
}
- if (delete_at <= ceph_clock_now(g_ceph_context)) {
+ if (delete_at <= ceph_clock_now()) {
return true;
}
}
{
utime_t start_time = s->time;
bufferlist bl;
- gc_invalidate_time = ceph_clock_now(s->cct);
+ gc_invalidate_time = ceph_clock_now();
gc_invalidate_time += (s->cct->_conf->rgw_gc_obj_min_wait / 2);
bool need_decompress;
op_ret = filter->flush();
perfcounter->tinc(l_rgw_get_lat,
- (ceph_clock_now(s->cct) - start_time));
+ (ceph_clock_now() - start_time));
if (op_ret < 0) {
goto done_err;
}
done:
dispose_processor(processor);
perfcounter->tinc(l_rgw_put_lat,
- (ceph_clock_now(s->cct) - s->time));
+ (ceph_clock_now() - s->time));
}
int RGWPostObj::verify_permission()
search_info = *info;
search_info.job_name = job_name;
search_info.num_shards = (info->num_shards ? info->num_shards : DEFAULT_NUM_SHARDS);
- search_info.start_time = ceph_clock_now(store->ctx());
+ search_info.start_time = ceph_clock_now();
search_stage = RGWOrphanSearchStage(ORPHAN_SEARCH_STAGE_INIT);
r = save_state();
int RGWPolicy::check(RGWPolicyEnv *env, string& err_msg)
{
- uint64_t now = ceph_clock_now(NULL).sec();
+ uint64_t now = ceph_clock_now().sec();
if (expires <= now) {
dout(0) << "NOTICE: policy calculated as expired: " << expiration_str << dendl;
err_msg = "Policy expired";
void RGWQuotaCache<T>::set_stats(const rgw_user& user, rgw_bucket& bucket, RGWQuotaCacheStats& qs, RGWStorageStats& stats)
{
qs.stats = stats;
- qs.expiration = ceph_clock_now(store->ctx());
+ qs.expiration = ceph_clock_now();
qs.async_refresh_time = qs.expiration;
qs.expiration += store->ctx()->_conf->rgw_bucket_quota_ttl;
qs.async_refresh_time += store->ctx()->_conf->rgw_bucket_quota_ttl / 2;
template<class T>
int RGWQuotaCache<T>::get_stats(const rgw_user& user, rgw_bucket& bucket, RGWStorageStats& stats, RGWQuotaInfo& quota) {
RGWQuotaCacheStats qs;
- utime_t now = ceph_clock_now(store->ctx());
+ utime_t now = ceph_clock_now();
if (map_find(user, bucket, qs)) {
if (qs.async_refresh_time.sec() > 0 && now >= qs.async_refresh_time) {
int r = async_refresh(user, bucket, qs);
}
}
- if (can_use_cached_stats(quota, qs.stats) && qs.expiration > ceph_clock_now(store->ctx())) {
+ if (can_use_cached_stats(quota, qs.stats) && qs.expiration >
+ ceph_clock_now()) {
stats = qs.stats;
return 0;
}
break;
lock.Lock();
- cond.WaitInterval(cct, lock, utime_t(cct->_conf->rgw_user_quota_bucket_sync_interval, 0));
+ cond.WaitInterval(lock, utime_t(cct->_conf->rgw_user_quota_bucket_sync_interval, 0));
lock.Unlock();
} while (!stats->going_down());
ldout(cct, 20) << "BucketsSyncThread: done" << dendl;
}
lock.Lock();
- cond.WaitInterval(cct, lock, utime_t(cct->_conf->rgw_user_quota_sync_interval, 0));
+ cond.WaitInterval(lock, utime_t(cct->_conf->rgw_user_quota_sync_interval, 0));
lock.Unlock();
} while (!stats->going_down());
ldout(cct, 20) << "UserSyncThread: done" << dendl;
utime_t interval = utime_t(msec / 1000, (msec % 1000) * 1000000);
do {
- utime_t start = ceph_clock_now(cct);
+ utime_t start = ceph_clock_now();
int r = processor->process();
if (r < 0) {
dout(0) << "ERROR: processor->process() returned error r=" << r << dendl;
if (processor->going_down())
break;
- utime_t end = ceph_clock_now(cct);
+ utime_t end = ceph_clock_now();
end -= start;
uint64_t cur_msec = processor->interval_msec();
wait_time -= end;
lock.Lock();
- cond.WaitInterval(cct, lock, wait_time);
+ cond.WaitInterval(lock, wait_time);
lock.Unlock();
} else {
lock.Lock();
info.bucket_index_shard_hash_type = RGWBucketInfo::MOD;
info.requester_pays = false;
if (real_clock::is_zero(creation_time)) {
- info.creation_time = ceph::real_clock::now(cct);
+ info.creation_time = ceph::real_clock::now();
} else {
info.creation_time = creation_time;
}
int RGWRados::process_expire_objects()
{
- obj_expirer->inspect_all_shards(utime_t(), ceph_clock_now(cct));
+ obj_expirer->inspect_all_shards(utime_t(), ceph_clock_now());
return 0;
}
if (check_state) {
cls_statelog_check_state(op, client_id, op_id, object, *check_state);
}
- utime_t ts = ceph_clock_now(store->ctx());
+ utime_t ts = ceph_clock_now();
bufferlist nobl;
cls_statelog_add(op, client_id, op_id, object, ts, state, (bl ? *bl : nobl));
r = ioctx.operate(oid, &op);
} /* RGWRequest::log_format */
void RGWRequest::log_init() {
- ts = ceph_clock_now(g_ceph_context);
+ ts = ceph_clock_now();
}
void RGWRequest::log(struct req_state *s, const char *msg) {
req_str.append(" ");
req_str.append(s->info.request_uri);
}
- utime_t t = ceph_clock_now(g_ceph_context) - ts;
+ utime_t t = ceph_clock_now() - ts;
dout(2) << "req " << id << ":" << t << ":" << s->dialect << ":"
<< req_str << ":" << (op ? op->name() : "") << ":" << msg
<< dendl;
return 0;
}
-static void get_new_date_str(CephContext *cct, string& date_str)
+static void get_new_date_str(string& date_str)
{
- utime_t tm = ceph_clock_now(cct);
+ utime_t tm = ceph_clock_now();
stringstream s;
tm.asctime(s);
date_str = s.str();
new_url.append(new_resource);
string date_str;
- get_new_date_str(cct, date_str);
+ get_new_date_str(date_str);
headers.push_back(pair<string, string>("HTTP_DATE", date_str));
string canonical_header;
{
string date_str;
- get_new_date_str(cct, date_str);
+ get_new_date_str(date_str);
RGWEnv new_env;
req_info new_info(cct, &new_env);
new_url.append("/");
string date_str;
- get_new_date_str(cct, date_str);
+ get_new_date_str(date_str);
RGWEnv new_env;
req_info new_info(cct, &new_env);
new_url.append("/");
string date_str;
- get_new_date_str(cct, date_str);
+ get_new_date_str(date_str);
RGWEnv new_env;
req_info new_info(cct, &new_env);
bool using_qs;
uint64_t now_req = 0;
- uint64_t now = ceph_clock_now(s->cct);
+ uint64_t now = ceph_clock_now();
/* v4 requires rados auth */
if (!store->ctx()->_conf->rgw_s3_auth_use_rados) {
const RGWAccessControlPolicy_SWIFTAcct &policy)
{
/* Adding X-Timestamp to keep align with Swift API */
- dump_header(s, "X-Timestamp", ceph_clock_now(g_ceph_context));
+ dump_header(s, "X-Timestamp", ceph_clock_now());
dump_header(s, "X-Account-Container-Count", buckets_count);
dump_header(s, "X-Account-Object-Count", buckets_object_count);
bool RGWInfo_ObjStore_SWIFT::is_expired(const std::string& expires, CephContext* cct)
{
string err;
- const utime_t now = ceph_clock_now(cct);
+ const utime_t now = ceph_clock_now();
const uint64_t expiration = (uint64_t)strict_strtoll(expires.c_str(),
10, &err);
if (!err.empty()) {
bool RGWTempURLAuthEngine::is_expired(const std::string& expires) const
{
string err;
- const utime_t now = ceph_clock_now(g_ceph_context);
+ const utime_t now = ceph_clock_now();
const uint64_t expiration = (uint64_t)strict_strtoll(expires.c_str(),
10, &err);
if (!err.empty()) {
if (ret < 0)
return ret;
- utime_t expiration = ceph_clock_now(cct);
+ utime_t expiration = ceph_clock_now();
expiration += cct->_conf->rgw_swift_token_expiration;
return build_token(swift_user, key, nonce, expiration, bl);
throw -EINVAL;
}
- const utime_t now = ceph_clock_now(cct);
+ const utime_t now = ceph_clock_now();
if (expiration < now) {
ldout(cct, 0) << "NOTICE: old timed out token was used now=" << now
<< " token.expiration=" << expiration
if (lock)
lock->Lock();
utime_t inc(2 * i, 0);
- utime_t t = ceph_clock_now(g_ceph_context) + inc;
+ utime_t t = ceph_clock_now() + inc;
timer.add_event_at(t, test_contexts[i]);
if (lock)
lock->Unlock();
{
utime_t inc(100, 0);
- utime_t t = ceph_clock_now(g_ceph_context) + inc;
+ utime_t t = ceph_clock_now() + inc;
lock->Lock();
timer.add_event_at(t, test_contexts[0]);
lock->Unlock();
{
utime_t inc(2, 0);
- utime_t t = ceph_clock_now(g_ceph_context) + inc;
+ utime_t t = ceph_clock_now() + inc;
lock->Lock();
timer.add_event_at(t, test_contexts[1]);
lock->Unlock();
safe_timer_lock.Lock();
for (int i = 0; i < MAX_TEST_CONTEXTS; ++i) {
utime_t inc(4 * i, 0);
- utime_t t = ceph_clock_now(g_ceph_context) + inc;
+ utime_t t = ceph_clock_now() + inc;
safe_timer.add_event_at(t, test_contexts[i]);
}
safe_timer_lock.Unlock();
safe_timer_lock.Lock();
for (int i = 0; i < MAX_TEST_CONTEXTS; ++i) {
utime_t inc(4 * i, 0);
- utime_t t = ceph_clock_now(g_ceph_context) + inc;
+ utime_t t = ceph_clock_now() + inc;
safe_timer.add_event_at(t, test_contexts[i]);
}
safe_timer_lock.Unlock();
auto cct = global_init(NULL, args, CEPH_ENTITY_TYPE_OSD,
CODE_ENVIRONMENT_UTILITY, 0);
- utime_t start = ceph_clock_now(NULL);
+ utime_t start = ceph_clock_now();
list<T*> ls;
for (int i=0; i<threads; i++) {
delete t;
}
- utime_t t = ceph_clock_now(NULL);
+ utime_t t = ceph_clock_now();
t -= start;
cout << " flushing.. " << t << " so far ..." << std::endl;
g_ceph_context->_log->flush();
- utime_t end = ceph_clock_now(NULL);
+ utime_t end = ceph_clock_now();
utime_t dur = end - start;
cout << dur << std::endl;
void bench_buffer_alloc(int size, int num)
{
- utime_t start = ceph_clock_now(NULL);
+ utime_t start = ceph_clock_now();
for (int i=0; i<num; ++i) {
bufferptr p = buffer::create(size);
p.zero();
}
- utime_t end = ceph_clock_now(NULL);
+ utime_t end = ceph_clock_now();
cout << num << " alloc of size " << size
<< " in " << (end - start) << std::endl;
}
TEST(BufferPtr, copy_out_bench) {
for (int s=1; s<=8; s*=2) {
- utime_t start = ceph_clock_now(NULL);
+ utime_t start = ceph_clock_now();
int buflen = 1048576;
int count = 1000;
uint64_t v;
bp.copy_out(j, s, (char *)&v);
}
}
- utime_t end = ceph_clock_now(NULL);
+ utime_t end = ceph_clock_now();
cout << count << " fills of buffer len " << buflen
<< " with " << s << " byte copy_out in"
<< (end - start) << std::endl;
TEST(BufferPtr, copy_in_bench) {
for (int s=1; s<=8; s*=2) {
- utime_t start = ceph_clock_now(NULL);
+ utime_t start = ceph_clock_now();
int buflen = 1048576;
int count = 1000;
for (int i=0; i<count; ++i) {
bp.copy_in(j, s, (char *)&j, false);
}
}
- utime_t end = ceph_clock_now(NULL);
+ utime_t end = ceph_clock_now();
cout << count << " fills of buffer len " << buflen
<< " with " << s << " byte copy_in in "
<< (end - start) << std::endl;
char src[1048576];
memset(src, 0, sizeof(src));
for (int s=4; s<=16384; s*=4) {
- utime_t start = ceph_clock_now(NULL);
+ utime_t start = ceph_clock_now();
int buflen = 1048576;
int count = 4000;
for (int i=0; i<count; ++i) {
bp.append(src, s);
}
}
- utime_t end = ceph_clock_now(NULL);
+ utime_t end = ceph_clock_now();
cout << count << " fills of buffer len " << buflen
<< " with " << s << " byte appends in "
<< (end - start) << std::endl;
void bench_bufferlist_alloc(int size, int num, int per)
{
- utime_t start = ceph_clock_now(NULL);
+ utime_t start = ceph_clock_now();
for (int i=0; i<num; ++i) {
bufferlist bl;
for (int j=0; j<per; ++j)
bl.append(buffer::create(size));
}
- utime_t end = ceph_clock_now(NULL);
+ utime_t end = ceph_clock_now();
cout << num << " alloc of size " << size
<< " in " << (end - start) << std::endl;
}
bufferlist blb;
blb.push_back(b);
{
- utime_t start = ceph_clock_now(NULL);
+ utime_t start = ceph_clock_now();
uint32_t r = bla.crc32c(0);
- utime_t end = ceph_clock_now(NULL);
+ utime_t end = ceph_clock_now();
float rate = (float)len / (float)(1024*1024) / (float)(end - start);
std::cout << "a.crc32c(0) = " << r << " at " << rate << " MB/sec" << std::endl;
ASSERT_EQ(r, 1138817026u);
}
assert(buffer::get_cached_crc() == 0 + base_cached);
{
- utime_t start = ceph_clock_now(NULL);
+ utime_t start = ceph_clock_now();
uint32_t r = bla.crc32c(0);
- utime_t end = ceph_clock_now(NULL);
+ utime_t end = ceph_clock_now();
float rate = (float)len / (float)(1024*1024) / (float)(end - start);
std::cout << "a.crc32c(0) (again) = " << r << " at " << rate << " MB/sec" << std::endl;
ASSERT_EQ(r, 1138817026u);
assert(buffer::get_cached_crc() == 1 + base_cached);
{
- utime_t start = ceph_clock_now(NULL);
+ utime_t start = ceph_clock_now();
uint32_t r = bla.crc32c(5);
- utime_t end = ceph_clock_now(NULL);
+ utime_t end = ceph_clock_now();
float rate = (float)len / (float)(1024*1024) / (float)(end - start);
std::cout << "a.crc32c(5) = " << r << " at " << rate << " MB/sec" << std::endl;
ASSERT_EQ(r, 3239494520u);
assert(buffer::get_cached_crc() == 1 + base_cached);
assert(buffer::get_cached_crc_adjusted() == 1 + base_cached_adjusted);
{
- utime_t start = ceph_clock_now(NULL);
+ utime_t start = ceph_clock_now();
uint32_t r = bla.crc32c(5);
- utime_t end = ceph_clock_now(NULL);
+ utime_t end = ceph_clock_now();
float rate = (float)len / (float)(1024*1024) / (float)(end - start);
std::cout << "a.crc32c(5) (again) = " << r << " at " << rate << " MB/sec" << std::endl;
ASSERT_EQ(r, 3239494520u);
assert(buffer::get_cached_crc_adjusted() == 2 + base_cached_adjusted);
{
- utime_t start = ceph_clock_now(NULL);
+ utime_t start = ceph_clock_now();
uint32_t r = blb.crc32c(0);
- utime_t end = ceph_clock_now(NULL);
+ utime_t end = ceph_clock_now();
float rate = (float)len / (float)(1024*1024) / (float)(end - start);
std::cout << "b.crc32c(0) = " << r << " at " << rate << " MB/sec" << std::endl;
ASSERT_EQ(r, 2481791210u);
}
assert(buffer::get_cached_crc() == 1 + base_cached);
{
- utime_t start = ceph_clock_now(NULL);
+ utime_t start = ceph_clock_now();
uint32_t r = blb.crc32c(0);
- utime_t end = ceph_clock_now(NULL);
+ utime_t end = ceph_clock_now();
float rate = (float)len / (float)(1024*1024) / (float)(end - start);
std::cout << "b.crc32c(0) (again)= " << r << " at " << rate << " MB/sec" << std::endl;
ASSERT_EQ(r, 2481791210u);
ab.push_back(a);
ab.push_back(b);
{
- utime_t start = ceph_clock_now(NULL);
+ utime_t start = ceph_clock_now();
uint32_t r = ab.crc32c(0);
- utime_t end = ceph_clock_now(NULL);
+ utime_t end = ceph_clock_now();
float rate = (float)ab.length() / (float)(1024*1024) / (float)(end - start);
std::cout << "ab.crc32c(0) = " << r << " at " << rate << " MB/sec" << std::endl;
ASSERT_EQ(r, 2988268779u);
ac.push_back(a);
ac.push_back(c);
{
- utime_t start = ceph_clock_now(NULL);
+ utime_t start = ceph_clock_now();
uint32_t r = ac.crc32c(0);
- utime_t end = ceph_clock_now(NULL);
+ utime_t end = ceph_clock_now();
float rate = (float)ac.length() / (float)(1024*1024) / (float)(end - start);
std::cout << "ac.crc32c(0) = " << r << " at " << rate << " MB/sec" << std::endl;
ASSERT_EQ(r, 2988268779u);
ba.push_back(b);
ba.push_back(a);
{
- utime_t start = ceph_clock_now(NULL);
+ utime_t start = ceph_clock_now();
uint32_t r = ba.crc32c(0);
- utime_t end = ceph_clock_now(NULL);
+ utime_t end = ceph_clock_now();
float rate = (float)ba.length() / (float)(1024*1024) / (float)(end - start);
std::cout << "ba.crc32c(0) = " << r << " at " << rate << " MB/sec" << std::endl;
ASSERT_EQ(r, 169240695u);
assert(buffer::get_cached_crc() == 5 + base_cached);
assert(buffer::get_cached_crc_adjusted() == 4 + base_cached_adjusted);
{
- utime_t start = ceph_clock_now(NULL);
+ utime_t start = ceph_clock_now();
uint32_t r = ba.crc32c(5);
- utime_t end = ceph_clock_now(NULL);
+ utime_t end = ceph_clock_now();
float rate = (float)ba.length() / (float)(1024*1024) / (float)(end - start);
std::cout << "ba.crc32c(5) = " << r << " at " << rate << " MB/sec" << std::endl;
ASSERT_EQ(r, 1265464778u);
Lock l("lock");
utime_t dur(5, 0);
l.set_duration(dur);
- utime_t start = ceph_clock_now(NULL);
+ utime_t start = ceph_clock_now();
ASSERT_EQ(0, l.lock_exclusive(&ioctx, oid));
int r = l.lock_exclusive(&ioctx, oid);
if (r == 0) {
// it's possible to get success if we were just really slow...
- ASSERT_TRUE(ceph_clock_now(NULL) > start + dur);
+ ASSERT_TRUE(ceph_clock_now() > start + dur);
} else {
ASSERT_EQ(-EEXIST, r);
}
ASSERT_EQ(0, ioctx.create(oid, true));
/* generate log */
- utime_t start_time = ceph_clock_now(g_ceph_context);
+ utime_t start_time = ceph_clock_now();
generate_log(ioctx, oid, 10, start_time, false);
librados::ObjectReadOperation *rop = new_rop();
ASSERT_EQ(0, ioctx.create(oid, true));
/* generate log */
- utime_t start_time = ceph_clock_now(g_ceph_context);
+ utime_t start_time = ceph_clock_now();
generate_log(ioctx, oid, 10, start_time, true);
librados::ObjectReadOperation *rop = new_rop();
ASSERT_EQ(0, ioctx.create(oid, true));
/* generate log */
- utime_t start_time = ceph_clock_now(g_ceph_context);
+ utime_t start_time = ceph_clock_now();
generate_log(ioctx, oid, 10, start_time, true);
librados::ObjectReadOperation *rop = new_rop();
bufferlist bl;
::encode(state, bl);
- utime_t ts = ceph_clock_now(g_ceph_context);
+ utime_t ts = ceph_clock_now();
cls_statelog_add(*op, client_id, op_id, obj, ts, state, bl);
}
std::cout << "calculating crc" << std::endl;
{
- utime_t start = ceph_clock_now(NULL);
+ utime_t start = ceph_clock_now();
unsigned val = ceph_crc32c(0, (unsigned char *)a, len);
- utime_t end = ceph_clock_now(NULL);
+ utime_t end = ceph_clock_now();
float rate = (float)len / (float)(1024*1024) / (float)(end - start);
std::cout << "best choice = " << rate << " MB/sec" << std::endl;
ASSERT_EQ(261108528u, val);
}
{
- utime_t start = ceph_clock_now(NULL);
+ utime_t start = ceph_clock_now();
unsigned val = ceph_crc32c(0xffffffff, (unsigned char *)a, len);
- utime_t end = ceph_clock_now(NULL);
+ utime_t end = ceph_clock_now();
float rate = (float)len / (float)(1024*1024) / (float)(end - start);
std::cout << "best choice 0xffffffff = " << rate << " MB/sec" << std::endl;
ASSERT_EQ(3895876243u, val);
}
{
- utime_t start = ceph_clock_now(NULL);
+ utime_t start = ceph_clock_now();
unsigned val = ceph_crc32c_sctp(0, (unsigned char *)a, len);
- utime_t end = ceph_clock_now(NULL);
+ utime_t end = ceph_clock_now();
float rate = (float)len / (float)(1024*1024) / (float)(end - start);
std::cout << "sctp = " << rate << " MB/sec" << std::endl;
ASSERT_EQ(261108528u, val);
}
{
- utime_t start = ceph_clock_now(NULL);
+ utime_t start = ceph_clock_now();
unsigned val = ceph_crc32c_intel_baseline(0, (unsigned char *)a, len);
- utime_t end = ceph_clock_now(NULL);
+ utime_t end = ceph_clock_now();
float rate = (float)len / (float)(1024*1024) / (float)(end - start);
std::cout << "intel baseline = " << rate << " MB/sec" << std::endl;
ASSERT_EQ(261108528u, val);
}
if (ceph_arch_aarch64_crc32) // Skip if CRC32C instructions are not defined.
{
- utime_t start = ceph_clock_now(NULL);
+ utime_t start = ceph_clock_now();
unsigned val = ceph_crc32c_aarch64(0, (unsigned char *)a, len);
- utime_t end = ceph_clock_now(NULL);
+ utime_t end = ceph_clock_now();
float rate = (float)len / (float)(1024*1024) / (float)(end - start);
std::cout << "aarch64 = " << rate << " MB/sec" << std::endl;
ASSERT_EQ(261108528u, val);
TEST(AES, LoopKey) {
bufferptr k(16);
get_random_bytes(k.c_str(), k.length());
- CryptoKey key(CEPH_CRYPTO_AES, ceph_clock_now(NULL), k);
+ CryptoKey key(CEPH_CRYPTO_AES, ceph_clock_now(), k);
bufferlist data;
bufferptr r(128);
get_random_bytes(r.c_str(), r.length());
data.append(r);
- utime_t start = ceph_clock_now(NULL);
+ utime_t start = ceph_clock_now();
int n = 100000;
for (int i=0; i<n; ++i) {
ASSERT_EQ(r, 0);
}
- utime_t end = ceph_clock_now(NULL);
+ utime_t end = ceph_clock_now();
utime_t dur = end - start;
cout << n << " encoded in " << dur << std::endl;
}
for (int i = 0; i < k + m; i++) {
want_to_encode.insert(i);
}
- utime_t begin_time = ceph_clock_now(g_ceph_context);
+ utime_t begin_time = ceph_clock_now();
for (int i = 0; i < max_iterations; i++) {
map<int,bufferlist> encoded;
code = erasure_code->encode(want_to_encode, in, &encoded);
if (code)
return code;
}
- utime_t end_time = ceph_clock_now(g_ceph_context);
+ utime_t end_time = ceph_clock_now();
cout << (end_time - begin_time) << "\t" << (max_iterations * (in_size / 1024)) << endl;
return 0;
}
display_chunks(encoded, erasure_code->get_chunk_count());
}
- utime_t begin_time = ceph_clock_now(g_ceph_context);
+ utime_t begin_time = ceph_clock_now();
for (int i = 0; i < max_iterations; i++) {
if (exhaustive_erasures) {
code = decode_erasures(encoded, encoded, 0, erasures, erasure_code);
return code;
}
}
- utime_t end_time = ceph_clock_now(g_ceph_context);
+ utime_t end_time = ceph_clock_now();
cout << (end_time - begin_time) << "\t" << (max_iterations * (in_size / 1024)) << endl;
return 0;
}
Mutex::Locker locker(m_listener.mutex);
while (m_listener.updates[metadata.get()] == 0) {
if (m_listener.cond.WaitInterval(
- reinterpret_cast<CephContext*>(m_ioctx.cct()),
- m_listener.mutex, utime_t(10, 0)) != 0) {
+ m_listener.mutex, utime_t(10, 0)) != 0) {
return false;
}
}
if (m_replay_hander.entries_available) {
m_replay_hander.entries_available = false;
} else if (m_replay_hander.cond.WaitInterval(
- reinterpret_cast<CephContext*>(m_ioctx.cct()),
m_replay_hander.lock, utime_t(10, 0)) != 0) {
break;
}
player->try_pop_front(&entry, &commit_tid);
if (m_replay_hander.cond.WaitInterval(
- reinterpret_cast<CephContext*>(m_ioctx.cct()),
m_replay_hander.lock, utime_t(10, 0)) != 0) {
return false;
}
Mutex::Locker locker(m_handler.lock);
while (!m_handler.is_closed) {
if (m_handler.cond.WaitInterval(
- reinterpret_cast<CephContext*>(m_ioctx.cct()),
m_handler.lock, utime_t(10, 0)) != 0) {
break;
}
Mutex::Locker locker(m_handler.lock);
while (m_handler.overflows == 0) {
if (m_handler.cond.WaitInterval(
- reinterpret_cast<CephContext*>(m_ioctx.cct()),
m_handler.lock, utime_t(10, 0)) != 0) {
break;
}
//throughput
args->kvsb->data.throughput_jf.open_object_section("throughput");
args->kvsb->data.throughput_jf.dump_unsigned(string(1, args->op).c_str(),
- ceph_clock_now(g_ceph_context));
+ ceph_clock_now());
args->kvsb->data.throughput_jf.close_section();
data_lock->Unlock();
utime_t end_time;
void start_time() {
- begin_time = ceph_clock_now(g_ceph_context);
+ begin_time = ceph_clock_now();
}
void stop_time() {
- end_time = ceph_clock_now(g_ceph_context);
+ end_time = ceph_clock_now();
}
double get_time() {
return (end_time - begin_time) * 1000;
cache_ioctx.set_namespace("");
// keep reading until we see our object appear in the HitSet
- utime_t start = ceph_clock_now(NULL);
+ utime_t start = ceph_clock_now();
utime_t hard_stop = start + utime_t(600, 0);
while (true) {
- utime_t now = ceph_clock_now(NULL);
+ utime_t now = ceph_clock_now();
ASSERT_TRUE(now < hard_stop);
string name = "foo";
cache_ioctx.set_namespace("");
// do a bunch of writes and make sure the hitsets rotate
- utime_t start = ceph_clock_now(NULL);
+ utime_t start = ceph_clock_now();
utime_t hard_stop = start + utime_t(count * period * 50, 0);
time_t first = 0;
}
}
- utime_t now = ceph_clock_now(NULL);
+ utime_t now = ceph_clock_now();
ASSERT_TRUE(now < hard_stop);
sleep(1);
cache_ioctx.set_namespace("");
// keep reading until we see our object appear in the HitSet
- utime_t start = ceph_clock_now(NULL);
+ utime_t start = ceph_clock_now();
utime_t hard_stop = start + utime_t(600, 0);
while (true) {
- utime_t now = ceph_clock_now(NULL);
+ utime_t now = ceph_clock_now();
ASSERT_TRUE(now < hard_stop);
string name = "foo";
cache_ioctx.set_namespace("");
// do a bunch of writes and make sure the hitsets rotate
- utime_t start = ceph_clock_now(NULL);
+ utime_t start = ceph_clock_now();
utime_t hard_stop = start + utime_t(count * period * 50, 0);
time_t first = 0;
}
}
- utime_t now = ceph_clock_now(NULL);
+ utime_t now = ceph_clock_now();
ASSERT_TRUE(now < hard_stop);
sleep(1);
if (new_version) {
file->snap_id = snapc.seq;
- file->mtime = ceph_clock_now(m_client->cct()).sec();
+ file->mtime = ceph_clock_now().sec();
m_pool->files[oid].push_back(file);
}
return file;
bool wait_for_entries_available(librbd::ImageCtx *ictx) {
Mutex::Locker locker(m_replay_handler.lock);
while (!m_replay_handler.entries_available) {
- if (m_replay_handler.cond.WaitInterval(ictx->cct, m_replay_handler.lock,
- utime_t(10, 0)) != 0) {
- return false;
+ if (m_replay_handler.cond.WaitInterval(m_replay_handler.lock,
+ utime_t(10, 0)) != 0) {
+ return false;
}
}
m_replay_handler.entries_available = false;
bool wait_for_notifies(librbd::ImageCtx &ictx) {
Mutex::Locker l(m_callback_lock);
while (m_notifies.size() < m_notify_acks.size()) {
- int r = m_callback_cond.WaitInterval(ictx.cct, m_callback_lock,
- utime_t(10, 0));
+ int r = m_callback_cond.WaitInterval(m_callback_lock,
+ utime_t(10, 0));
if (r != 0) {
break;
}
bool wait(librbd::ImageCtx *ictx, uint64_t offset_, uint64_t total_) {
Mutex::Locker l(mutex);
while (!received) {
- int r = cond.WaitInterval(ictx->cct, mutex, utime_t(10, 0));
+ int r = cond.WaitInterval(mutex, utime_t(10, 0));
if (r != 0) {
break;
}
void wait_for_size(size_t size) {
Mutex::Locker locker(m_lock);
while (m_size != size) {
- CephContext* cct = reinterpret_cast<CephContext*>(_rados.cct());
- ASSERT_EQ(0, m_cond.WaitInterval(cct, m_lock, seconds(5)));
+ ASSERT_EQ(0, m_cond.WaitInterval(m_lock, seconds(5)));
}
}
rbd_image_t &m_image;
void wait_for_size(size_t size) {
Mutex::Locker locker(m_lock);
while (m_size != size) {
- CephContext* cct = reinterpret_cast<CephContext*>(_rados.cct());
- ASSERT_EQ(0, m_cond.WaitInterval(cct, m_lock, seconds(5)));
+ ASSERT_EQ(0, m_cond.WaitInterval(m_lock, seconds(5)));
}
}
librbd::Image &m_image;
bool wait_for_watch(MockImageCtx &mock_image_ctx, size_t count) {
Mutex::Locker locker(m_lock);
while (m_watch_count < count) {
- if (m_cond.WaitInterval(mock_image_ctx.cct, m_lock,
- utime_t(10, 0)) != 0) {
+ if (m_cond.WaitInterval(m_lock, utime_t(10, 0)) != 0) {
return false;
}
}
if (timeout > 0) {
utime_t cond_timeout;
cond_timeout.set_from_double(timeout);
- utime_t s = ceph_clock_now(g_ceph_context);
- err = cond.WaitInterval(g_ceph_context, lock, cond_timeout);
- utime_t e = ceph_clock_now(g_ceph_context);
+ utime_t s = ceph_clock_now();
+ err = cond.WaitInterval(lock, cond_timeout);
+ utime_t e = ceph_clock_now();
dout(20) << __func__ << " took " << (e-s) << " seconds" << dendl;
} else {
err = cond.Wait(lock);
void boot() {
dout(1) << __func__ << " boot?" << dendl;
- utime_t now = ceph_clock_now(messenger->cct);
+ utime_t now = ceph_clock_now();
if ((last_boot_attempt > 0.0)
&& ((now - last_boot_attempt)) <= STUB_BOOT_INTERVAL) {
dout(1) << __func__ << " backoff and try again later." << dendl;
void add_pg(pg_t pgid, epoch_t epoch, pg_t parent) {
- utime_t now = ceph_clock_now(messenger->cct);
+ utime_t now = ceph_clock_now();
pg_stat_t s;
s.created = epoch;
void send_pg_stats() {
dout(10) << __func__
<< " pgs " << pgs.size() << " osdmap " << osdmap << dendl;
- utime_t now = ceph_clock_now(messenger->cct);
+ utime_t now = ceph_clock_now();
MPGStats *mstats = new MPGStats(monc.get_fsid(), osdmap.get_epoch(), now);
mstats->set_tid(1);
assert(pgs.count(pgid) > 0);
pg_stat_t &s = pgs[pgid];
- utime_t now = ceph_clock_now(messenger->cct);
+ utime_t now = ceph_clock_now();
if (now - s.last_change < 10.0) {
dout(10) << __func__
dout(10) << __func__
<< " send " << num_entries << " log messages" << dendl;
- utime_t now = ceph_clock_now(messenger->cct);
+ utime_t now = ceph_clock_now();
int seq = 0;
for (; num_entries > 0; --num_entries) {
LogEntry e;
woken = true;
}
bool poll(int milliseconds) {
- auto start = ceph::coarse_real_clock::now(g_ceph_context);
+ auto start = ceph::coarse_real_clock::now();
while (!woken) {
center->process_events(sleepus);
usleep(sleepus);
auto r = std::chrono::duration_cast<std::chrono::milliseconds>(
- ceph::coarse_real_clock::now(g_ceph_context) - start);
+ ceph::coarse_real_clock::now() - start);
if (r >= std::chrono::milliseconds(milliseconds))
break;
}
t += 1000*1000*500;
Mutex::Locker l(cli_dispatcher.lock);
while (!cli_dispatcher.got_new)
- cli_dispatcher.cond.WaitInterval(g_ceph_context, cli_dispatcher.lock, t);
+ cli_dispatcher.cond.WaitInterval(cli_dispatcher.lock, t);
ASSERT_TRUE(cli_dispatcher.got_new);
cli_dispatcher.got_new = false;
}
t += 1000*1000*500;
Mutex::Locker l(cli_dispatcher.lock);
while (!cli_dispatcher.got_new)
- cli_dispatcher.cond.WaitInterval(g_ceph_context, cli_dispatcher.lock, t);
+ cli_dispatcher.cond.WaitInterval(cli_dispatcher.lock, t);
ASSERT_TRUE(cli_dispatcher.got_new);
cli_dispatcher.got_new = false;
}
TEST_P(KVTest, BenchCommit) {
int n = 1024;
ASSERT_EQ(0, db->create_and_open(cout));
- utime_t start = ceph_clock_now(NULL);
+ utime_t start = ceph_clock_now();
{
cout << "priming" << std::endl;
// prime
t->set("prefix", "key" + stringify(i), data);
db->submit_transaction_sync(t);
}
- utime_t end = ceph_clock_now(NULL);
+ utime_t end = ceph_clock_now();
utime_t dur = end - start;
cout << n << " commits in " << dur << ", avg latency " << (dur / (double)n)
<< std::endl;
cout << "get_encoded_bytes: ";
}
- utime_t start = ceph_clock_now(NULL);
+ utime_t start = ceph_clock_now();
if (legacy) {
for (int i = 0; i < max; ++i) {
a.get_encoded_bytes_test();
}
}
- utime_t end = ceph_clock_now(NULL);
+ utime_t end = ceph_clock_now();
cout << max << " encodes in " << (end - start) << std::endl;
}
void WorkloadGenerator::do_stats()
{
- utime_t now = ceph_clock_now(NULL);
+ utime_t now = ceph_clock_now();
m_stats_lock.Lock();
utime_t duration = (now - m_stats_begin);
int ops_run = 0;
utime_t stats_interval(m_stats_show_secs, 0);
- utime_t now = ceph_clock_now(NULL);
+ utime_t now = ceph_clock_now();
utime_t stats_time = now;
m_stats_begin = now;
if (m_do_stats) {
- utime_t now = ceph_clock_now(NULL);
+ utime_t now = ceph_clock_now();
utime_t elapsed = now - stats_time;
if (elapsed >= stats_interval) {
do_stats();
}
- utime_t start = ceph_clock_now(g_ceph_context);
+ utime_t start = ceph_clock_now();
while (loop++ < count) {
int ret = safe_write(fd, buf, bsize);
if (ret)
}
::fsync(fd);
::close(fd);
- utime_t end = ceph_clock_now(g_ceph_context);
+ utime_t end = ceph_clock_now();
end -= start;
int s = blocks*4096;
- utime_t start = ceph_clock_now(g_ceph_context);
+ utime_t start = ceph_clock_now();
for (int i=0; i<count; i++) {
off64_t o = (lrand48() % numblocks) * 4096;
//cout << "s = " << s << " o = " << o << endl;
//int r = ::read(fd, buf, s);
if (r < 0) cout << "r = " << r << " " << strerror(errno) << endl;
}
- utime_t end = ceph_clock_now(g_ceph_context);
+ utime_t end = ceph_clock_now();
double timeper = end - start;
timeper /= count;
so = o + 4096*((rand() % range) );//- range/2);
//cout << o << " " << so << " " << (so-o) << endl;
- utime_t start = ceph_clock_now(g_ceph_context);
+ utime_t start = ceph_clock_now();
lseek64(fd, so, SEEK_SET);
r = ::read(fd, buf, blocks*4096);
- utime_t end = ceph_clock_now(g_ceph_context);
+ utime_t end = ceph_clock_now();
timeper += (end-start);
}
DecayCounter dc(hl);
RealCounter rc;
- utime_t now = ceph_clock_now(g_ceph_context);
+ utime_t now = ceph_clock_now();
for (int ms=0; ms < 300*1000; ms++) {
if (ms % 30000 == 0) {
oid = name.str();
}
void Writer::start_time() {
- begin_time = ceph_clock_now(g_ceph_context);
+ begin_time = ceph_clock_now();
}
void Writer::stop_time() {
- end_time = ceph_clock_now(g_ceph_context);
+ end_time = ceph_clock_now();
}
double Writer::get_time() {
return (end_time - begin_time) * 1000;
int count = 100000;
uint64_t start = Cycles::rdtsc();
for (int i = 0; i < count; i++) {
- ceph_clock_now(g_ceph_context);
+ ceph_clock_now();
}
uint64_t stop = Cycles::rdtsc();
return Cycles::to_seconds(stop - start)/count;
std::function<void()> fn = []() {}) {
Mutex::Locker locker(mock_object_copy_request.lock);
while (mock_object_copy_request.object_contexts.count(object_num) == 0) {
- if (mock_object_copy_request.cond.WaitInterval(m_local_image_ctx->cct,
- mock_object_copy_request.lock,
+ if (mock_object_copy_request.cond.WaitInterval(mock_object_copy_request.lock,
utime_t(10, 0)) != 0) {
return false;
}
MockImageCopyRequest::SnapMap wait_for_snap_map(MockObjectCopyRequest &mock_object_copy_request) {
Mutex::Locker locker(mock_object_copy_request.lock);
while (mock_object_copy_request.snap_map == nullptr) {
- if (mock_object_copy_request.cond.WaitInterval(m_local_image_ctx->cct,
- mock_object_copy_request.lock,
+ if (mock_object_copy_request.cond.WaitInterval(mock_object_copy_request.lock,
utime_t(10, 0)) != 0) {
return MockImageCopyRequest::SnapMap();
}
while (in_flight > max) {
utime_t dur;
dur.set_from_double(.2);
- cond.WaitInterval(g_ceph_context, lock, dur);
+ cond.WaitInterval(lock, dur);
}
}
Mutex::Locker locker(m_watch_ctx->lock);
while (!m_watch_ctx->notified) {
- if (m_watch_ctx->cond.WaitInterval(g_ceph_context, m_watch_ctx->lock,
+ if (m_watch_ctx->cond.WaitInterval(m_watch_ctx->lock,
utime_t(seconds, 0)) != 0) {
return false;
}
}
while (true) {
- utime_t now = ceph_clock_now(NULL);
+ utime_t now = ceph_clock_now();
int r = ::pwrite(fd, fn, strlen(fn), 0);
assert(r >= 0);
- utime_t lat = ceph_clock_now(NULL);
+ utime_t lat = ceph_clock_now();
lat -= now;
utime_t oldmin;
if (!latency.empty())
char aes_key[AES_KEY_LEN];
memset(aes_key, 0x77, sizeof(aes_key));
bufferptr keybuf(aes_key, sizeof(aes_key));
- CryptoKey key(CEPH_CRYPTO_AES, ceph_clock_now(g_ceph_context), keybuf);
+ CryptoKey key(CEPH_CRYPTO_AES, ceph_clock_now(), keybuf);
const char *msg="hello! this is a message\n";
char pad[16];
char aes_key[AES_KEY_LEN];
memset(aes_key, 0x77, sizeof(aes_key));
bufferptr keybuf(aes_key, sizeof(aes_key));
- CryptoKey key(CEPH_CRYPTO_AES, ceph_clock_now(g_ceph_context), keybuf);
+ CryptoKey key(CEPH_CRYPTO_AES, ceph_clock_now(), keybuf);
const char *msg="hello! this is a message\n";
char pad[16];
uint64_t total_size = 0;
uint64_t total_txs = 0;
- utime_t started_at = ceph_clock_now(g_ceph_context);
+ utime_t started_at = ceph_clock_now();
do {
int num_keys = 0;
if (num_keys > 0)
other->submit_transaction_sync(tx);
- utime_t cur_duration = ceph_clock_now(g_ceph_context) - started_at;
+ utime_t cur_duration = ceph_clock_now() - started_at;
std::cout << "ts = " << cur_duration << "s, copied " << total_keys
<< " keys so far (" << stringify(si_t(total_size)) << ")"
<< std::endl;
} while (it->valid());
- utime_t time_taken = ceph_clock_now(g_ceph_context) - started_at;
+ utime_t time_taken = ceph_clock_now() - started_at;
std::cout << "summary:" << std::endl;
std::cout << " copied " << total_keys << " keys" << std::endl;
}
::encode(ps->second, dirty_pgs);
}
- utime_t inc_stamp = ceph_clock_now(NULL);
+ utime_t inc_stamp = ceph_clock_now();
::encode(inc_stamp, trans_bl);
::encode_destructively(dirty_pgs, trans_bl);
bufferlist dirty_osds;
// the first pgmap_meta
t->put(prefix, "version", 1);
{
- auto stamp = ceph_clock_now(g_ceph_context);
+ auto stamp = ceph_clock_now();
bufferlist bl;
::encode(stamp, bl);
t->put(prefix, "stamp", bl);
// (we won't actually give the *correct* dirstat here though)
inode.inode.dirstat.nfiles = 1;
- inode.inode.ctime =
- inode.inode.mtime = ceph_clock_now(g_ceph_context);
+ inode.inode.ctime =
+ inode.inode.mtime = ceph_clock_now();
inode.inode.nlink = 1;
inode.inode.truncate_size = -1ull;
inode.inode.truncate_seq = 1;
C_SaferCond header_cond;
lock.Lock();
objecter->write_full(oid, oloc, snapc, hbl,
- ceph::real_clock::now(g_ceph_context), 0,
+ ceph::real_clock::now(), 0,
NULL, &header_cond);
lock.Unlock();
cout << "Purging " << purge_count << " objects from " << last_obj << std::endl;
lock.Lock();
filer.purge_range(ino, &h.layout, snapc, last_obj, purge_count,
- ceph::real_clock::now(g_ceph_context), 0, &purge_cond);
+ ceph::real_clock::now(), 0, &purge_cond);
lock.Unlock();
purge_cond.wait();
}
C_SaferCond write_cond;
lock.Lock();
filer.write(ino, &h.layout, snapc, pos, l, j,
- ceph::real_clock::now(g_ceph_context), 0, NULL, &write_cond);
+ ceph::real_clock::now(), 0, NULL, &write_cond);
lock.Unlock();
r = write_cond.wait();
if (create) {
monmap.epoch = 0;
- monmap.created = ceph_clock_now(g_ceph_context);
+ monmap.created = ceph_clock_now();
monmap.last_changed = monmap.created;
srand(getpid() + time(0));
if (g_conf->fsid.is_zero()) {
}
float time_passed() {
- utime_t now = ceph_clock_now(g_ceph_context);
+ utime_t now = ceph_clock_now();
now -= start_time;
uint64_t ns = now.nsec();
float total = (float) ns / 1000000000.0;
int LoadGen::run()
{
- start_time = ceph_clock_now(g_ceph_context);
+ start_time = ceph_clock_now();
utime_t end_time = start_time;
end_time += run_length;
utime_t stamp_time = start_time;
while (1) {
lock.Lock();
utime_t one_second(1, 0);
- cond.WaitInterval(g_ceph_context, lock, one_second);
+ cond.WaitInterval(lock, one_second);
lock.Unlock();
- utime_t now = ceph_clock_now(g_ceph_context);
+ utime_t now = ceph_clock_now();
if (now > end_time)
break;
- uint64_t expected = total_expected();
+ uint64_t expected = total_expected();
lock.Lock();
uint64_t sent = total_sent;
uint64_t completed = total_completed;
while (in_flight > max) {
utime_t dur;
dur.set_from_double(.2);
- cond.WaitInterval(g_ceph_context, lock, dur);
+ cond.WaitInterval(lock, dur);
}
}
srand(time(NULL) % (unsigned long) -1);
- utime_t start = ceph_clock_now(NULL);
+ utime_t start = ceph_clock_now();
utime_t last;
unsigned ios = 0;
cur_off += io_size;
}
- utime_t now = ceph_clock_now(NULL);
+ utime_t now = ceph_clock_now();
utime_t elapsed = now - start;
if (last.is_zero()) {
last = elapsed;
<< std::endl;
}
- utime_t now = ceph_clock_now(NULL);
+ utime_t now = ceph_clock_now();
double elapsed = now - start;
printf("elapsed: %5d ops: %8d ops/sec: %8.2lf bytes/sec: %8.2lf\n",
if (!m_manual_stop) {
update_replayers(m_local_cluster_watcher->get_pool_peers());
}
- m_cond.WaitInterval(g_ceph_context, m_lock,
- utime_t(m_cct->_conf->rbd_mirror_pool_replayers_refresh_interval, 0));
+ m_cond.WaitInterval(
+ m_lock,
+ utime_t(m_cct->_conf->rbd_mirror_pool_replayers_refresh_interval, 0));
}
// stop all replayers in parallel
if (m_blacklisted) {
break;
}
- m_cond.WaitInterval(g_ceph_context, m_lock,
- utime_t(g_ceph_context->_conf->rbd_mirror_image_state_check_interval, 0));
+ m_cond.WaitInterval(m_lock,
+ utime_t(g_ceph_context->_conf
+ ->rbd_mirror_image_state_check_interval, 0));
}
ImageIds empty_sources;
if (m_image_replayers.empty()) {
break;
}
- m_cond.WaitInterval(g_ceph_context, m_lock, seconds(1));
+ m_cond.WaitInterval(m_lock, seconds(1));
}
}