int CryptoKey::set_secret(int type, bufferptr& s)
{
this->type = type;
- created = g_clock.now();
+ created = ceph_clock_now(&g_ceph_context);
CryptoHandler *h = get_crypto_handler(type);
if (!h)
int CryptoKey::create(int t)
{
type = t;
- created = g_clock.now();
+ created = ceph_clock_now(&g_ceph_context);
CryptoHandler *h = get_crypto_handler(type);
if (!h)
if (secrets.secrets.size() > 1)
++riter;
- if (riter->second.expiration < g_clock.now())
+ if (riter->second.expiration < ceph_clock_now(&g_ceph_context))
++riter; // "current" key has expired, use "next" key instead
secret_id = riter->first;
if (added) {
data.rotating_ver++;
- //data.next_rotating_time = g_clock.now();
+ //data.next_rotating_time = ceph_clock_now(&g_ceph_context);
//data.next_rotating_time += MIN(g_conf->auth_mon_ticket_ttl, g_conf->auth_service_ticket_ttl);
_dump_rotating_secrets();
return true;
{
RotatingSecrets& r = data.rotating_secrets[service_id];
int added = 0;
- utime_t now = g_clock.now();
+ utime_t now = ceph_clock_now(&g_ceph_context);
double ttl = service_id == CEPH_ENTITY_TYPE_AUTH ? g_conf->auth_mon_ticket_ttl : g_conf->auth_service_ticket_ttl;
while (r.need_new_secrets(now)) {
{
info.service_id = service_id;
info.ticket = auth_ticket_info.ticket;
- info.ticket.init_timestamps(g_clock.now(), g_conf->auth_service_ticket_ttl);
+ info.ticket.init_timestamps(ceph_clock_now(&g_ceph_context), g_conf->auth_service_ticket_ttl);
generate_secret(info.session_key);
<< " validity=" << msg_a.validity << dendl;
session_key = msg_a.session_key;
if (!msg_a.validity.is_zero()) {
- expires = g_clock.now();
+ expires = ceph_clock_now(cct);
expires += msg_a.validity;
renew_after = expires;
renew_after -= ((double)msg_a.validity.sec() / 4);
bool CephXTicketHandler::have_key()
{
if (have_key_flag) {
- //dout(20) << "have_key: g_clock.now()=" << g_clock.now() << " renew_after=" << renew_after << " expires=" << expires << dendl;
- have_key_flag = g_clock.now() < expires;
+ have_key_flag = ceph_clock_now(cct) < expires;
}
return have_key_flag;
bool CephXTicketHandler::need_key()
{
if (have_key_flag) {
- //dout(20) << "need_key: g_clock.now()=" << g_clock.now() << " renew_after=" << renew_after << " expires=" << expires << dendl;
- return (!expires.is_zero()) && (g_clock.now() >= renew_after);
+ return (!expires.is_zero()) && (ceph_clock_now(cct) >= renew_after);
}
return true;
}
CephXServiceTicketInfo old_ticket_info;
- if (cephx_decode_ticket(&g_ceph_context, key_server, CEPH_ENTITY_TYPE_AUTH,
+ if (cephx_decode_ticket(cct, key_server, CEPH_ENTITY_TYPE_AUTH,
req.old_ticket, old_ticket_info)) {
global_id = old_ticket_info.ticket.global_id;
ldout(cct, 10) << "decoded old_ticket with global_id=" << global_id << dendl;
should_enc_ticket = true;
}
- info.ticket.init_timestamps(g_clock.now(), cct->_conf->auth_mon_ticket_ttl);
+ info.ticket.init_timestamps(ceph_clock_now(cct), cct->_conf->auth_mon_ticket_ttl);
info.ticket.name = entity_name;
info.ticket.global_id = global_id;
info.ticket.auid = eauth.auid;
bufferlist tmp_bl;
CephXServiceTicketInfo auth_ticket_info;
- if (!cephx_verify_authorizer(&g_ceph_context, key_server, indata, auth_ticket_info, tmp_bl)) {
+ if (!cephx_verify_authorizer(cct, key_server, indata, auth_ticket_info, tmp_bl)) {
ret = -EPERM;
break;
}
bufferlist *pdirbl)
{
// time the call
- utime_t start = g_clock.now();
+ utime_t start = ceph_clock_now(&g_ceph_context);
bool nojournal = false;
int op = request->get_op();
// -- log times --
if (client_logger) {
- utime_t lat = g_clock.now();
+ utime_t lat = ceph_clock_now(&g_ceph_context);
lat -= request->sent_stamp;
dout(20) << "lat " << lat << dendl;
client_logger->favg(l_c_lat,(double)lat);
r->releases = request->cap_releases;
if (request->mds == -1) {
- request->sent_stamp = g_clock.now();
+ request->sent_stamp = ceph_clock_now(&g_ceph_context);
dout(20) << "send_request set sent_stamp to " << request->sent_stamp << dendl;
}
request->mds = mds;
void Client::release_lease(Inode *in, Dentry *dn, int mask)
{
- utime_t now = g_clock.now();
+ utime_t now = ceph_clock_now(&g_ceph_context);
assert(dn);
void Client::cap_delay_requeue(Inode *in)
{
dout(10) << "cap_delay_requeue on " << *in << dendl;
- in->hold_caps_until = g_clock.now();
+ in->hold_caps_until = ceph_clock_now(&g_ceph_context);
in->hold_caps_until += 5.0;
delayed_caps.push_back(&in->cap_item);
else
in->hold_caps_until = utime_t();
- utime_t now = g_clock.now();
+ utime_t now = ceph_clock_now(&g_ceph_context);
map<int,InodeCap*>::iterator it = in->caps.begin();
while (it != in->caps.end()) {
tick_event = new C_C_Tick(this);
timer.add_event_after(g_conf->client_tick_interval, tick_event);
- utime_t now = g_clock.now();
+ utime_t now = ceph_clock_now(&g_ceph_context);
if (mdsmap->get_epoch()) {
// renew caps?
void Client::renew_caps()
{
dout(10) << "renew_caps()" << dendl;
- last_cap_renew = g_clock.now();
+ last_cap_renew = ceph_clock_now(&g_ceph_context);
for (map<int,MDSSession*>::iterator p = mds_sessions.begin();
p != mds_sessions.end();
void Client::renew_caps(const int mds) {
dout(10) << "renew_caps mds" << mds << dendl;
MDSSession *session = mds_sessions[mds];
- session->last_cap_renew_request = g_clock.now();
+ session->last_cap_renew_request = ceph_clock_now(&g_ceph_context);
uint64_t seq = ++session->cap_renew_seq;
messenger->send_message(new MClientSession(CEPH_SESSION_REQUEST_RENEWCAPS, seq),
mdsmap->get_inst(mds));
<< dendl;
// is dn lease valid?
- utime_t now = g_clock.now();
+ utime_t now = ceph_clock_now(&g_ceph_context);
if (dn->lease_mds >= 0 &&
dn->lease_ttl > now &&
mds_sessions.count(dn->lease_mds)) {
Dentry *dn = *pdn = dir->dir->dentries[name];
// is dn lease valid?
- utime_t now = g_clock.now();
+ utime_t now = ceph_clock_now(&g_ceph_context);
if (dn->inode &&
dn->lease_mds >= 0 &&
dn->lease_ttl > now &&
if (!mask) {
// caller just needs us to bump the ctime
- in->ctime = g_clock.now();
+ in->ctime = ceph_clock_now(&g_ceph_context);
if (issued & CEPH_CAP_AUTH_EXCL)
mark_caps_dirty(in, CEPH_CAP_AUTH_EXCL);
else if (issued & CEPH_CAP_FILE_EXCL)
if (in->caps_issued_mask(CEPH_CAP_AUTH_EXCL)) {
if (mask & CEPH_SETATTR_MODE) {
- in->ctime = g_clock.now();
+ in->ctime = ceph_clock_now(&g_ceph_context);
in->mode = (in->mode & ~07777) | (attr->st_mode & 07777);
mark_caps_dirty(in, CEPH_CAP_AUTH_EXCL);
mask &= ~CEPH_SETATTR_MODE;
}
if (mask & CEPH_SETATTR_UID) {
- in->ctime = g_clock.now();
+ in->ctime = ceph_clock_now(&g_ceph_context);
in->uid = attr->st_uid;
mark_caps_dirty(in, CEPH_CAP_AUTH_EXCL);
mask &= ~CEPH_SETATTR_UID;
}
if (mask & CEPH_SETATTR_GID) {
- in->ctime = g_clock.now();
+ in->ctime = ceph_clock_now(&g_ceph_context);
in->gid = attr->st_gid;
mark_caps_dirty(in, CEPH_CAP_AUTH_EXCL);
mask &= ~CEPH_SETATTR_GID;
in->mtime = utime_t(attr->st_mtim.tv_sec, attr->st_mtim.tv_nsec);
if (mask & CEPH_SETATTR_ATIME)
in->atime = utime_t(attr->st_atim.tv_sec, attr->st_atim.tv_nsec);
- in->ctime = g_clock.now();
+ in->ctime = ceph_clock_now(&g_ceph_context);
in->time_warp_seq++;
mark_caps_dirty(in, CEPH_CAP_FILE_EXCL);
mask &= ~(CEPH_SETATTR_MTIME|CEPH_SETATTR_ATIME);
dout(10) << "cur file size is " << in->size << dendl;
// time it.
- utime_t start = g_clock.now();
+ utime_t start = ceph_clock_now(&g_ceph_context);
// copy into fresh buffer (since our write may be resub, async)
bufferptr bp;
// async, caching, non-blocking.
objectcacher->file_write(&in->oset, &in->layout, in->snaprealm->get_snap_context(),
- offset, size, bl, g_clock.now(), 0);
+ offset, size, bl, ceph_clock_now(&g_ceph_context), 0);
put_cap_ref(in, CEPH_CAP_FILE_BUFFER);
} else {
/*
// atomic, synchronous, blocking.
objectcacher->file_atomic_sync_write(in->ino, &in->layout, in->snaprealm->get_snap_context(),
- offset, size, bl, g_clock.now(), 0, client_lock);
+ offset, size, bl, ceph_clock_now(&g_ceph_context), 0, client_lock);
*/
// simple, non-atomic sync write
Mutex flock("Client::_write flock");
get_cap_ref(in, CEPH_CAP_FILE_BUFFER); // released by onsafe callback
filer->write_trunc(in->ino, &in->layout, in->snaprealm->get_snap_context(),
- offset, size, bl, g_clock.now(), filer_flags,
+ offset, size, bl, ceph_clock_now(&g_ceph_context), filer_flags,
in->truncate_size, in->truncate_seq,
onfinish, onsafe);
}
// time
- utime_t lat = g_clock.now();
+ utime_t lat = ceph_clock_now(&g_ceph_context);
lat -= start;
if (client_logger)
client_logger->favg(l_c_wrlat,(double)lat);
}
// mtime
- in->mtime = g_clock.now();
+ in->mtime = ceph_clock_now(&g_ceph_context);
mark_caps_dirty(in, CEPH_CAP_FILE_WR);
put_cap_ref(in, CEPH_CAP_FILE_WR);
Inode *diri = 0;
Inode *in = 0;
int r = 0;
- utime_t now = g_clock.now();
+ utime_t now = ceph_clock_now(&g_ceph_context);
if (inode_map.count(parent) == 0) {
dout(1) << "ll_lookup " << parent << " " << name << " -> ENOENT (parent DNE... WTF)" << dendl;
/*cout << "cap_gen " << cap->session-> cap_gen << std::endl
<< "session gen " << cap->gen << std::endl
<< "cap expire " << cap->session->cap_ttl << std::endl
- << "cur time " << g_clock.now() << std::endl;*/
+ << "cur time " << ceph_clock_now(&g_ceph_context) << std::endl;*/
if ((cap->session->cap_gen <= cap->gen)
- && (g_clock.now() < cap->session->cap_ttl)) {
+ && (ceph_clock_now(&g_ceph_context) < cap->session->cap_ttl)) {
return true;
}
//if we make it here, the capabilities aren't up-to-date
this->iargs = syn_iargs;
this->sargs = syn_sargs;
- run_start = g_clock.now();
+ run_start = ceph_clock_now(&g_ceph_context);
}
return -1;
}
- //run_start = g_clock.now();
+ //run_start = ceph_clock_now(&g_ceph_context);
run_until = utime_t(0,0);
dout(5) << "run" << dendl;
iargs.pop_front();
if (iarg1 && run_me()) {
dout(2) << "sleepuntil " << iarg1 << dendl;
- utime_t at = g_clock.now() - run_start;
+ utime_t at = ceph_clock_now(&g_ceph_context) - run_start;
if (at.sec() < iarg1)
sleep(iarg1 - at.sec());
}
if (iarg1 == 0) iarg1 = 1; // play trace at least once!
for (int i=0; i<iarg1; i++) {
- utime_t start = g_clock.now();
+ utime_t start = ceph_clock_now(&g_ceph_context);
if (time_to_stop()) break;
play_trace(t, prefix, !playdata);
if (time_to_stop()) break;
if (iarg1 > 1) clean_dir(prefix); // clean only if repeat
- utime_t lat = g_clock.now();
+ utime_t lat = ceph_clock_now(&g_ceph_context);
lat -= start;
dout(0) << " trace " << tfile << " loop " << (i+1) << "/" << iarg1 << " done in " << (double)lat << " seconds" << dendl;
char buf[1024];
char buf2[1024];
- utime_t start = g_clock.now();
+ utime_t start = ceph_clock_now(&g_ceph_context);
hash_map<int64_t, int64_t> open_files;
hash_map<int64_t, dir_result_t*> open_dirs;
bufferlist bl;
bl.push_back(bp);
SnapContext snapc;
- client->objecter->write(oid, oloc, off, len, snapc, bl, g_clock.now(), 0,
+ client->objecter->write(oid, oloc, off, len, snapc, bl, ceph_clock_now(&g_ceph_context), 0,
new C_SafeCond(&lock, &cond, &ack),
safeg->new_sub());
while (!ack) cond.Wait(lock);
object_locator_t oloc(CEPH_DATA_RULE);
lock.Lock();
SnapContext snapc;
- client->objecter->zero(oid, oloc, off, len, snapc, g_clock.now(), 0,
+ client->objecter->zero(oid, oloc, off, len, snapc, ceph_clock_now(&g_ceph_context), 0,
new C_SafeCond(&lock, &cond, &ack),
safeg->new_sub());
while (!ack) cond.Wait(lock);
dout(3) << "read_dirs " << basedir << " dirs " << dirs << " files " << files << " depth " << depth << dendl;
list<string> contents;
- utime_t s = g_clock.now();
+ utime_t s = ceph_clock_now(&g_ceph_context);
int r = client->getdir(basedir, contents);
- utime_t e = g_clock.now();
+ utime_t e = ceph_clock_now(&g_ceph_context);
e -= s;
if (r < 0) {
dout(0) << "read_dirs couldn't readdir " << basedir << ", stopping" << dendl;
for (int i=0; i<files; i++) {
snprintf(d, sizeof(d), "%s/file.%d", basedir, i);
- utime_t s = g_clock.now();
+ utime_t s = ceph_clock_now(&g_ceph_context);
if (client->lstat(d, &st) < 0) {
dout(2) << "read_dirs failed stat on " << d << ", stopping" << dendl;
return -1;
}
- utime_t e = g_clock.now();
+ utime_t e = ceph_clock_now(&g_ceph_context);
e -= s;
}
// files
struct stat st;
- utime_t start = g_clock.now();
+ utime_t start = ceph_clock_now(&g_ceph_context);
for (int c=0; c<count; c++) {
for (int n=0; n<num; n++) {
snprintf(d, sizeof(d), "dir.%d.run%d/file.client%d.%d", priv ? whoami:0, c, whoami, n);
if (time_to_stop()) return 0;
}
}
- utime_t end = g_clock.now();
+ utime_t end = ceph_clock_now(&g_ceph_context);
end -= start;
dout(0) << "makefiles time is " << end << " or " << ((double)end / (double)num) <<" per file" << dendl;
client->mkdir("orig", 0755);
client->mkdir("copy", 0755);
- utime_t start = g_clock.now();
+ utime_t start = ceph_clock_now(&g_ceph_context);
for (int i=0; i<num; i++) {
snprintf(d, sizeof(d), "orig/file.%d", i);
client->mknod(d, 0755);
}
- utime_t end = g_clock.now();
+ utime_t end = ceph_clock_now(&g_ceph_context);
end -= start;
dout(0) << "orig " << end << dendl;
// link
- start = g_clock.now();
+ start = ceph_clock_now(&g_ceph_context);
for (int i=0; i<num; i++) {
snprintf(d, sizeof(d), "orig/file.%d", i);
snprintf(e, sizeof(e), "copy/file.%d", i);
client->link(d, e);
}
- end = g_clock.now();
+ end = ceph_clock_now(&g_ceph_context);
end -= start;
dout(0) << "copy " << end << dendl;
dout(5) << "writing to " << fn << " fd " << fd << dendl;
if (fd < 0) return fd;
- utime_t from = g_clock.now();
+ utime_t from = ceph_clock_now(&g_ceph_context);
utime_t start = from;
uint64_t bytes = 0, total = 0;
bytes += wrsize;
total += wrsize;
- utime_t now = g_clock.now();
+ utime_t now = ceph_clock_now(&g_ceph_context);
if (now - from >= 1.0) {
double el = now - from;
dout(0) << "write " << (bytes / el / 1048576.0) << " MB/sec" << dendl;
client->fsync(fd, true);
- utime_t stop = g_clock.now();
+ utime_t stop = ceph_clock_now(&g_ceph_context);
double el = stop - start;
dout(0) << "write total " << (total / el / 1048576.0) << " MB/sec ("
<< total << " bytes in " << el << " seconds)" << dendl;
dout(5) << "reading from " << fn << " fd " << fd << dendl;
if (fd < 0) return fd;
- utime_t from = g_clock.now();
+ utime_t from = ceph_clock_now(&g_ceph_context);
utime_t start = from;
uint64_t bytes = 0, total = 0;
bytes += rdsize;
total += rdsize;
- utime_t now = g_clock.now();
+ utime_t now = ceph_clock_now(&g_ceph_context);
if (now - from >= 1.0) {
double el = now - from;
dout(0) << "read " << (bytes / el / 1048576.0) << " MB/sec" << dendl;
dout(0) << " + " << (bad-1) << " other bad 16-byte bits in this block" << dendl;
}
- utime_t stop = g_clock.now();
+ utime_t stop = ceph_clock_now(&g_ceph_context);
double el = stop - start;
dout(0) << "read total " << (total / el / 1048576.0) << " MB/sec ("
<< total << " bytes in " << el << " seconds)" << dendl;
}
dout(10) << "writing " << oid << dendl;
- starts.push_back(g_clock.now());
+ starts.push_back(ceph_clock_now(&g_ceph_context));
client->client_lock.Lock();
- client->objecter->write(oid, oloc, 0, osize, snapc, bl, g_clock.now(), 0,
+ client->objecter->write(oid, oloc, 0, osize, snapc, bl, ceph_clock_now(&g_ceph_context), 0,
new C_Ref(lock, cond, &unack),
new C_Ref(lock, cond, &unsafe));
client->client_lock.Unlock();
}
lock.Unlock();
- utime_t lat = g_clock.now();
+ utime_t lat = ceph_clock_now(&g_ceph_context);
lat -= starts.front();
starts.pop_front();
}
SnapContext snapc;
client->client_lock.Lock();
- utime_t start = g_clock.now();
+ utime_t start = ceph_clock_now(&g_ceph_context);
if (write) {
dout(10) << "write to " << oid << dendl;
op.op.op = CEPH_OSD_OP_STARTSYNC;
m.ops.push_back(op);
}
- client->objecter->mutate(oid, oloc, m, snapc, g_clock.now(), 0,
+ client->objecter->mutate(oid, oloc, m, snapc, ceph_clock_now(&g_ceph_context), 0,
NULL, new C_Ref(lock, cond, &unack));
/*client->objecter->write(oid, layout, 0, osize, snapc, bl, 0,
new C_Ref(lock, cond, &unack),
}
lock.Unlock();
- utime_t lat = g_clock.now();
+ utime_t lat = ceph_clock_now(&g_ceph_context);
lat -= start;
if (client_logger) {
if (write)
}
bool time_to_stop() {
- utime_t now = g_clock.now();
+ utime_t now = ceph_clock_now(&g_ceph_context);
if (0) cout << "time_to_stop .. now " << now
<< " until " << run_until
<< " start " << run_start
*/
-#include "common/config.h"
#include "common/Clock.h"
+#include "common/config.h"
+#include "include/utime.h"
#include <time.h>
ret += ((time_t)cct->_conf->clock_offset);
return ret;
}
-
-// old global clock stuff
-// TODO: remove
-Clock g_clock;
-
-Clock::Clock() {
-}
-
-Clock::~Clock() {
-}
-
-void Clock::make_timespec(utime_t& t, struct timespec *ts) {
- utime_t time = t;
-
- memset(ts, 0, sizeof(*ts));
- ts->tv_sec = time.sec();
- ts->tv_nsec = time.nsec();
-}
-
-utime_t Clock::now() {
- struct timeval tv;
- gettimeofday(&tv, NULL);
- utime_t n(&tv);
- n += g_conf->clock_offset;
- return n;
-}
-
-time_t Clock::gettime() {
- return now().sec();
-}
#include <time.h>
-struct timespec;
-struct utime_t;
+class CephContext;
extern utime_t ceph_clock_now(CephContext *cct);
extern time_t ceph_clock_gettime(CephContext *cct);
-class Clock {
- public:
- Clock();
- ~Clock();
-
- utime_t now();
- void make_timespec(utime_t& t, struct timespec *ts);
- time_t gettime();
-};
-
-extern Clock g_clock;
-
#endif
int WaitUntil(Mutex &mutex, utime_t when) {
struct timespec ts;
- g_clock.make_timespec(when, &ts);
- //cout << "timedwait for " << ts.tv_sec << " sec " << ts.tv_nsec << " nsec" << endl;
+ when.to_timespec(&ts);
int r = pthread_cond_timedwait(&_c, &mutex._m, &ts);
return r;
}
- int WaitInterval(Mutex &mutex, utime_t interval) {
- utime_t when = g_clock.now();
+ int WaitInterval(CephContext *cct, Mutex &mutex, utime_t interval) {
+ utime_t when = ceph_clock_now(cct);
when += interval;
return WaitUntil(mutex, when);
}
*/
void reset() {
- reset(g_clock.now());
+ reset(ceph_clock_now(&g_ceph_context));
}
void reset(utime_t now) {
- last_decay = g_clock.now();
+ last_decay = ceph_clock_now(&g_ceph_context);
val = delta = 0;
}
dout(0) << "log " << type << " : " << s << dendl;
LogEntry e;
e.who = messenger->get_myinst();
- e.stamp = g_clock.now();
+ e.stamp = ceph_clock_now(cct);
e.seq = ++last_log;
e.type = type;
e.msg = s;
start = s;
- utime_t fromstart = g_clock.now();
+ utime_t fromstart = ceph_clock_now(&g_ceph_context);
if (fromstart < start) {
derr << "logger_tare time jumped backwards from "
<< start << " to " << fromstart << dendl;
if (logger_list.empty()) {
if (start == utime_t())
- start = g_clock.now();
+ start = ceph_clock_now(&g_ceph_context);
last_flush = 0;
}
logger_list.push_back(logger);
if (!g_conf->profiling_logger)
return;
- utime_t now = g_clock.now();
+ utime_t now = ceph_clock_now(&g_ceph_context);
utime_t fromstart = now;
if (fromstart < start) {
derr << "logger time jumped backwards from " << start << " to "
lock.Lock();
dout(10) << "timer_thread starting" << dendl;
while (!stopping) {
- utime_t now = g_clock.now();
+ utime_t now = ceph_clock_now(&g_ceph_context);
while (!schedule.empty()) {
scheduled_map_t::iterator p = schedule.begin();
{
assert(lock.is_locked());
- utime_t when = g_clock.now();
+ utime_t when = ceph_clock_now(&g_ceph_context);
when += seconds;
add_event_at(when, callback);
}
OPTION(paxos_propose_interval, OPT_DOUBLE, 1.0), // gather updates for this long before proposing a map update
OPTION(paxos_min_wait, OPT_DOUBLE, 0.05), // min time to gather updates for after period of inactivity
OPTION(paxos_observer_timeout, OPT_DOUBLE, 5*60), // gather updates for this long before proposing a map update
- OPTION(clock_offset, OPT_DOUBLE, 0), // how much to offset the system clock by with g_clock
+ OPTION(clock_offset, OPT_DOUBLE, 0), // how much to offset the system clock in Clock.cc
OPTION(auth_supported, OPT_STR, "none"),
OPTION(auth_mon_ticket_ttl, OPT_DOUBLE, 60*60*12),
OPTION(auth_service_ticket_ttl, OPT_DOUBLE, 60*60),
utime_t(const struct timeval *v) {
set_from_timeval(v);
}
-
+ void to_timespec(struct timespec *ts) const {
+ ts->tv_sec = tv.tv_sec;
+ ts->tv_nsec = tv.tv_nsec;
+ }
void set_from_double(double d) {
tv.tv_sec = (__u32)trunc(d);
tv.tv_nsec = (__u32)((d - (double)tv.tv_sec) * (double)1000000000.0);
lock.Lock();
objecter->rollback_object(oid, ctx->oloc, snapc, snapid,
- g_clock.now(), onack, NULL);
+ ceph_clock_now(cct), onack, NULL);
lock.Unlock();
mylock.Lock();
int librados::RadosClient::
create(IoCtxImpl& io, const object_t& oid, bool exclusive)
{
- utime_t ut = g_clock.now();
+ utime_t ut = ceph_clock_now(cct);
/* can't write to a snapshot */
if (io.snap_seq != CEPH_NOSNAP)
int librados::RadosClient::
write(IoCtxImpl& io, const object_t& oid, bufferlist& bl, size_t len, uint64_t off)
{
- utime_t ut = g_clock.now();
+ utime_t ut = ceph_clock_now(cct);
/* can't write to a snapshot */
if (io.snap_seq != CEPH_NOSNAP)
int librados::RadosClient::
append(IoCtxImpl& io, const object_t& oid, bufferlist& bl, size_t len)
{
- utime_t ut = g_clock.now();
+ utime_t ut = ceph_clock_now(cct);
/* can't write to a snapshot */
if (io.snap_seq != CEPH_NOSNAP)
int librados::RadosClient::
write_full(IoCtxImpl& io, const object_t& oid, bufferlist& bl)
{
- utime_t ut = g_clock.now();
+ utime_t ut = ceph_clock_now(cct);
/* can't write to a snapshot */
if (io.snap_seq != CEPH_NOSNAP)
int librados::RadosClient::
clone_range(IoCtxImpl& io, const object_t& dst_oid, uint64_t dst_offset, const object_t& src_oid, uint64_t src_offset, uint64_t len)
{
- utime_t ut = g_clock.now();
+ utime_t ut = ceph_clock_now(cct);
/* can't write to a snapshot */
if (io.snap_seq != CEPH_NOSNAP)
int librados::RadosClient::
operate(IoCtxImpl& io, const object_t& oid, ::ObjectOperation *o, bufferlist *pbl)
{
- utime_t ut = g_clock.now();
+ utime_t ut = ceph_clock_now(cct);
/* can't write to a snapshot */
if (io.snap_seq != CEPH_NOSNAP)
aio_operate(IoCtxImpl& io, const object_t& oid, ::ObjectOperation *o, AioCompletionImpl *c,
bufferlist *pbl)
{
- utime_t ut = g_clock.now();
+ utime_t ut = ceph_clock_now(cct);
Context *onack = new C_aio_Ack(c);
Context *oncommit = new C_aio_Safe(c);
aio_write(IoCtxImpl& io, const object_t &oid, AioCompletionImpl *c,
const bufferlist& bl, size_t len, uint64_t off)
{
- utime_t ut = g_clock.now();
+ utime_t ut = ceph_clock_now(cct);
/* can't write to a snapshot */
if (io.snap_seq != CEPH_NOSNAP)
aio_append(IoCtxImpl& io, const object_t &oid, AioCompletionImpl *c,
const bufferlist& bl, size_t len)
{
- utime_t ut = g_clock.now();
+ utime_t ut = ceph_clock_now(cct);
/* can't write to a snapshot */
if (io.snap_seq != CEPH_NOSNAP)
aio_write_full(IoCtxImpl& io, const object_t &oid,
AioCompletionImpl *c, const bufferlist& bl)
{
- utime_t ut = g_clock.now();
+ utime_t ut = ceph_clock_now(cct);
/* can't write to a snapshot */
if (io.snap_seq != CEPH_NOSNAP)
remove(IoCtxImpl& io, const object_t& oid)
{
::SnapContext snapc;
- utime_t ut = g_clock.now();
+ utime_t ut = ceph_clock_now(cct);
/* can't write to a snapshot */
if (io.snap_seq != CEPH_NOSNAP)
int librados::RadosClient::
trunc(IoCtxImpl& io, const object_t& oid, uint64_t size)
{
- utime_t ut = g_clock.now();
+ utime_t ut = ceph_clock_now(cct);
/* can't write to a snapshot */
if (io.snap_seq != CEPH_NOSNAP)
int librados::RadosClient::
tmap_update(IoCtxImpl& io, const object_t& oid, bufferlist& cmdbl)
{
- utime_t ut = g_clock.now();
+ utime_t ut = ceph_clock_now(cct);
/* can't write to a snapshot */
if (io.snap_seq != CEPH_NOSNAP)
exec(IoCtxImpl& io, const object_t& oid, const char *cls, const char *method,
bufferlist& inbl, bufferlist& outbl)
{
- utime_t ut = g_clock.now();
+ utime_t ut = ceph_clock_now(cct);
Mutex mylock("RadosClient::exec::mylock");
Cond cond;
int librados::RadosClient::
rmxattr(IoCtxImpl& io, const object_t& oid, const char *name)
{
- utime_t ut = g_clock.now();
+ utime_t ut = ceph_clock_now(cct);
/* can't write to a snapshot */
if (io.snap_seq != CEPH_NOSNAP)
int librados::RadosClient::
setxattr(IoCtxImpl& io, const object_t& oid, const char *name, bufferlist& bl)
{
- utime_t ut = g_clock.now();
+ utime_t ut = ceph_clock_now(cct);
/* can't write to a snapshot */
if (io.snap_seq != CEPH_NOSNAP)
watch(IoCtxImpl& io, const object_t& oid, uint64_t ver,
uint64_t *cookie, librados::WatchCtx *ctx)
{
- utime_t ut = g_clock.now();
+ utime_t ut = ceph_clock_now(cct);
::ObjectOperation rd;
Mutex mylock("RadosClient::watch::mylock");
int librados::RadosClient::
_notify_ack(IoCtxImpl& io, const object_t& oid, uint64_t notify_id, uint64_t ver)
{
- utime_t ut = g_clock.now();
+ utime_t ut = ceph_clock_now(cct);
Mutex mylock("RadosClient::watch::mylock");
Cond cond;
int librados::RadosClient::
unwatch(IoCtxImpl& io, const object_t& oid, uint64_t cookie)
{
- utime_t ut = g_clock.now();
+ utime_t ut = ceph_clock_now(cct);
bufferlist inbl, outbl;
Mutex mylock("RadosClient::watch::mylock");
int librados::RadosClient::
notify(IoCtxImpl& io, const object_t& oid, uint64_t ver)
{
- utime_t ut = g_clock.now();
+ utime_t ut = ceph_clock_now(cct);
bufferlist inbl, outbl;
Mutex mylock("RadosClient::notify::mylock");
ostream& CDentry::print_db_line_prefix(ostream& out)
{
- return out << g_clock.now() << " mds" << dir->cache->mds->get_nodeid() << ".cache.den(" << dir->ino() << " " << name << ") ";
+ return out << ceph_clock_now(&g_ceph_context) << " mds" << dir->cache->mds->get_nodeid() << ".cache.den(" << dir->ino() << " " << name << ") ";
}
boost::pool<> CDentry::pool(sizeof(CDentry));
ostream& CDir::print_db_line_prefix(ostream& out)
{
- return out << g_clock.now() << " mds" << cache->mds->get_nodeid() << ".cache.dir(" << this->dirfrag() << ") ";
+ return out << ceph_clock_now(&g_ceph_context) << " mds" << cache->mds->get_nodeid() << ".cache.dir(" << this->dirfrag() << ") ";
}
in->mark_dirty_rstat();
//in->hack_accessed = false;
- //in->hack_load_stamp = g_clock.now();
+ //in->hack_load_stamp = ceph_clock_now(&g_ceph_context);
//num_new_inodes_loaded++;
}
}
m.priority = CEPH_MSG_PRIO_LOW; // set priority lower than journal!
if (committed_dn == items.end())
- cache->mds->objecter->mutate(oid, oloc, m, snapc, g_clock.now(), 0, NULL,
+ cache->mds->objecter->mutate(oid, oloc, m, snapc, ceph_clock_now(&g_ceph_context), 0, NULL,
new C_Dir_Committed(this, get_version(),
inode->inode.last_renamed_version));
else { // send in a different Context
while (committed_dn != items.end()) {
ObjectOperation n = ObjectOperation();
committed_dn = _commit_partial(n, snaps, max_write_size, committed_dn);
- cache->mds->objecter->mutate(oid, oloc, n, snapc, g_clock.now(), 0, NULL,
+ cache->mds->objecter->mutate(oid, oloc, n, snapc, ceph_clock_now(&g_ceph_context), 0, NULL,
gather->new_sub());
}
/*
* we simply send the message containing the header off last, we cannot
* get our header into an incorrect state.
*/
- cache->mds->objecter->mutate(oid, oloc, m, snapc, g_clock.now(), 0, NULL,
+ cache->mds->objecter->mutate(oid, oloc, m, snapc, ceph_clock_now(&g_ceph_context), 0, NULL,
gather->new_sub());
}
}
//int cinode_pins[CINODE_NUM_PINS]; // counts
ostream& CInode::print_db_line_prefix(ostream& out)
{
- return out << g_clock.now() << " mds" << mdcache->mds->get_nodeid() << ".cache.ino(" << inode.ino << ") ";
+ return out << ceph_clock_now(&g_ceph_context) << " mds" << mdcache->mds->get_nodeid() << ".cache.ino(" << inode.ino << ") ";
}
/*
object_t oid = CInode::get_object_name(ino(), frag_t(), ".inode");
object_locator_t oloc(mdcache->mds->mdsmap->get_metadata_pg_pool());
- mdcache->mds->objecter->mutate(oid, oloc, m, snapc, g_clock.now(), 0,
+ mdcache->mds->objecter->mutate(oid, oloc, m, snapc, ceph_clock_now(&g_ceph_context), 0,
NULL, new C_Inode_Stored(this, get_version(), fin) );
}
object_t oid = get_object_name(ino(), frag_t(), "");
object_locator_t oloc(mdcache->mds->mdsmap->get_metadata_pg_pool());
- mdcache->mds->objecter->mutate(oid, oloc, m, snapc, g_clock.now(), 0,
+ mdcache->mds->objecter->mutate(oid, oloc, m, snapc, ceph_clock_now(&g_ceph_context), 0,
NULL, new C_Inode_StoredParent(this, inode.last_renamed_version, fin) );
}
cap->issue_norevoke(issue);
issue = cap->pending();
cap->set_last_issue();
- cap->set_last_issue_stamp(g_clock.now());
+ cap->set_last_issue_stamp(ceph_clock_now(&g_ceph_context));
e.cap.caps = issue;
e.cap.wanted = cap->wanted();
e.cap.cap_id = cap->get_cap_id();
cap->reset_seq();
}
cap->set_cap_id(icr.cap_id);
- cap->set_last_issue_stamp(g_clock.now());
+ cap->set_last_issue_stamp(ceph_clock_now(&g_ceph_context));
return cap;
}
void clear_client_caps_after_export() {
Cond cond;
cout << "writing header " << oid << std::endl;
- objecter->write_full(oid, oloc, snapc, hbl, g_clock.now(), 0,
+ objecter->write_full(oid, oloc, snapc, hbl, ceph_clock_now(&g_ceph_context), 0,
NULL,
new C_SafeCond(&lock, &cond, &done));
uint64_t l = MIN(left, 1024*1024);
j.read_fd(fd, l);
cout << " writing " << pos << "~" << l << std::endl;
- filer.write(ino, &h.layout, snapc, pos, l, j, g_clock.now(), 0, NULL, new C_SafeCond(&lock, &cond, &done));
+ filer.write(ino, &h.layout, snapc, pos, l, j, ceph_clock_now(&g_ceph_context), 0, NULL, new C_SafeCond(&lock, &cond, &done));
lock.Lock();
while (!done)
if (wanted != in->replica_caps_wanted) {
if (wanted == 0) {
- if (in->replica_caps_wanted_keep_until > g_clock.now()) {
+ if (in->replica_caps_wanted_keep_until > ceph_clock_now(&g_ceph_context)) {
// ok, release them finally!
in->replica_caps_wanted_keep_until.sec_ref() = 0;
dout(7) << "request_inode_file_caps " << ccap_string(wanted)
<< dendl;
}
else if (in->replica_caps_wanted_keep_until.sec() == 0) {
- in->replica_caps_wanted_keep_until = g_clock.now();
+ in->replica_caps_wanted_keep_until = ceph_clock_now(&g_ceph_context);
in->replica_caps_wanted_keep_until.sec_ref() += 2;
dout(7) << "request_inode_file_caps " << ccap_string(wanted)
m->h.seq = ++l->seq;
m->clear_payload();
- utime_t now = g_clock.now();
+ utime_t now = ceph_clock_now(&g_ceph_context);
now += mdcache->client_lease_durations[pool];
mdcache->touch_client_lease(l, pool, now);
<< " - already on list since " << lock->get_update_stamp() << dendl;
} else {
updated_scatterlocks.push_back(lock->get_updated_item());
- utime_t now = g_clock.now();
+ utime_t now = ceph_clock_now(&g_ceph_context);
lock->set_update_stamp(now);
dout(10) << "mark_updated_scatterlock " << *lock
<< " - added at " << now << dendl;
dout(10) << "scatter_tick" << dendl;
// updated
- utime_t now = g_clock.now();
+ utime_t now = ceph_clock_now(&g_ceph_context);
int n = updated_scatterlocks.size();
while (!updated_scatterlocks.empty()) {
ScatterLock *lock = updated_scatterlocks.front();
void MDBalancer::tick()
{
static int num_bal_times = g_conf->mds_bal_max;
- static utime_t first = g_clock.now();
- utime_t now = g_clock.now();
+ static utime_t first = ceph_clock_now(&g_ceph_context);
+ utime_t now = ceph_clock_now(&g_ceph_context);
utime_t elapsed = now;
elapsed -= first;
void MDBalancer::send_heartbeat()
{
- utime_t now = g_clock.now();
+ utime_t now = ceph_clock_now(&g_ceph_context);
if (mds->mdsmap->is_degraded()) {
dout(10) << "send_heartbeat degraded" << dendl;
} else {
int cluster_size = mds->get_mds_map()->get_num_mds();
int whoami = mds->get_nodeid();
- rebalance_time = g_clock.now();
+ rebalance_time = ceph_clock_now(&g_ceph_context);
dump_pop_map();
if (mds->mdcache->root)
iq.push_back(mds->mdcache->root);
- utime_t now = g_clock.now();
+ utime_t now = ceph_clock_now(&g_ceph_context);
while (!iq.empty()) {
CInode *in = iq.front();
iq.pop_front();
in->inode.mode = 0500 | mode;
in->inode.size = 0;
in->inode.ctime =
- in->inode.mtime = g_clock.now();
+ in->inode.mtime = ceph_clock_now(&g_ceph_context);
in->inode.nlink = 1;
in->inode.truncate_size = -1ull;
// adjust recursive pop counters
if (dir->is_auth()) {
- utime_t now = g_clock.now();
+ utime_t now = ceph_clock_now(&g_ceph_context);
CDir *p = dir->get_parent_dir();
while (p) {
p->pop_auth_subtree.sub(now, decayrate, dir->pop_auth_subtree);
// adjust popularity?
if (dir->is_auth()) {
- utime_t now = g_clock.now();
+ utime_t now = ceph_clock_now(&g_ceph_context);
CDir *p = dir->get_parent_dir();
while (p) {
p->pop_auth_subtree.add(now, decayrate, dir->pop_auth_subtree);
// declare now?
if (mut->now == utime_t())
- mut->now = g_clock.now();
+ mut->now = ceph_clock_now(&g_ceph_context);
if (in->is_base())
return;
dout(10) << "purge_prealloc_ino " << ino << " oid " << oid << dendl;
SnapContext snapc;
- mds->objecter->remove(oid, oloc, snapc, g_clock.now(), 0, 0, fin);
+ mds->objecter->remove(oid, oloc, snapc, ceph_clock_now(&g_ceph_context), 0, 0, fin);
}
mds->logger->inc("outt");
else {
mds->logger->inc("outut");
- mds->logger->favg("oututl", g_clock.now() - in->hack_load_stamp);
+ mds->logger->favg("oututl", ceph_clock_now(&g_ceph_context) - in->hack_load_stamp);
}
}
*/
void MDCache::trim_client_leases()
{
- utime_t now = g_clock.now();
+ utime_t now = ceph_clock_now(&g_ceph_context);
dout(10) << "trim_client_leases" << dendl;
void MDCache::shutdown_check()
{
- dout(0) << "shutdown_check at " << g_clock.now() << dendl;
+ dout(0) << "shutdown_check at " << ceph_clock_now(&g_ceph_context) << dendl;
// cache
int o = g_conf->debug_mds;
uint64_t num = (to + period - 1) / period;
dout(10) << "purge_stray 0~" << to << " objects 0~" << num << " snapc " << snapc << " on " << *in << dendl;
mds->filer->purge_range(in->inode.ino, &in->inode.layout, *snapc,
- 0, num, g_clock.now(), 0,
+ 0, num, ceph_clock_now(&g_ceph_context), 0,
new C_MDC_PurgeStrayPurged(this, dn));
} else {
dout(10) << "purge_stray 0 objects snapc " << snapc << " on " << *in << dendl;
le->_segment->num_events++;
le->update_segment();
- le->set_stamp(g_clock.now());
+ le->set_stamp(ceph_clock_now(&g_ceph_context));
num_events++;
assert(!capped);
if (segments.empty()) return;
// hack: only trim for a few seconds at a time
- utime_t stop = g_clock.now();
+ utime_t stop = ceph_clock_now(&g_ceph_context);
stop += 2.0;
map<uint64_t,LogSegment*>::iterator p = segments.begin();
((max_events >= 0 && left-expiring_events-expired_events > max_events) ||
(max_segments >= 0 && (int)(segments.size()-expiring_segments.size()-expired_segments.size()) > max_segments))) {
- if (stop < g_clock.now())
+ if (stop < ceph_clock_now(&g_ceph_context))
break;
if ((int)expiring_segments.size() >= g_conf->mds_log_max_expiring)
}
// log
- utime_t now = g_clock.now();
+ utime_t now = ceph_clock_now(&g_ceph_context);
mds_load_t load = balancer->get_load(now);
if (logger) {
<< " (currently " << ceph_mds_state_name(state) << ")"
<< dendl;
- beacon_seq_stamp[beacon_last_seq] = g_clock.now();
+ beacon_seq_stamp[beacon_last_seq] = ceph_clock_now(&g_ceph_context);
MMDSBeacon *beacon = new MMDSBeacon(monc->get_fsid(), monc->get_global_id(), name, mdsmap->get_epoch(),
want_state, beacon_last_seq);
if (beacon_last_acked_stamp == utime_t())
return false;
- utime_t now = g_clock.now();
+ utime_t now = ceph_clock_now(&g_ceph_context);
utime_t since = now - beacon_last_acked_stamp;
if (since > g_conf->mds_beacon_grace) {
dout(5) << "is_laggy " << since << " > " << g_conf->mds_beacon_grace
if (beacon_seq_stamp.count(seq)) {
assert(beacon_seq_stamp[seq] > beacon_last_acked_stamp);
beacon_last_acked_stamp = beacon_seq_stamp[seq];
- utime_t now = g_clock.now();
+ utime_t now = ceph_clock_now(&g_ceph_context);
utime_t rtt = now - beacon_last_acked_stamp;
dout(10) << "handle_mds_beacon " << ceph_mds_state_name(m->get_state())
// hack: thrash exports
static utime_t start;
- utime_t now = g_clock.now();
+ utime_t now = ceph_clock_now(&g_ceph_context);
if (start == utime_t())
start = now;
/*double el = now - start;
object_locator_t oloc(mds->mdsmap->get_metadata_pg_pool());
mds->objecter->write_full(oid, oloc,
snapc,
- bl, g_clock.now(), 0,
+ bl, ceph_clock_now(&g_ceph_context), 0,
NULL, new C_MT_Save(this, version));
}
cache->adjust_subtree_auth(dir, mds->get_nodeid(), dest);
// take away the popularity we're sending.
- utime_t now = g_clock.now();
+ utime_t now = ceph_clock_now(&g_ceph_context);
mds->balancer->subtract_export(dir, now);
// fill export message with cache data
// finish export (adjust local cache state)
C_Contexts *fin = new C_Contexts(&g_ceph_context);
- finish_export_dir(dir, fin->contexts, g_clock.now());
+ finish_export_dir(dir, fin->contexts, ceph_clock_now(&g_ceph_context));
dir->add_waiter(CDir::WAIT_UNFREEZE, fin);
// unfreeze
CDir *dir = cache->get_dirfrag(m->dirfrag);
assert(dir);
- utime_t now = g_clock.now();
+ utime_t now = ceph_clock_now(&g_ceph_context);
int oldauth = m->get_source().num();
dout(7) << "handle_export_dir importing " << *dir << " from " << oldauth << dendl;
assert(dir->is_auth() == false);
// timeout/stale
// (caps go stale, lease die)
- utime_t now = g_clock.now();
+ utime_t now = ceph_clock_now(&g_ceph_context);
utime_t cutoff = now;
cutoff -= g_conf->mds_session_timeout;
while (1) {
// clients will get the mdsmap and discover we're reconnecting via the monitor.
- reconnect_start = g_clock.now();
+ reconnect_start = ceph_clock_now(&g_ceph_context);
dout(1) << "reconnect_clients -- " << client_reconnect_gather.size() << " sessions" << dendl;
mds->sessionmap.dump();
}
return;
}
- utime_t delay = g_clock.now();
+ utime_t delay = ceph_clock_now(&g_ceph_context);
delay -= reconnect_start;
dout(10) << " reconnect_start " << reconnect_start << " delay " << delay << dendl;
{
utime_t reconnect_end = reconnect_start;
reconnect_end += g_conf->mds_reconnect_timeout;
- if (g_clock.now() >= reconnect_end &&
+ if (ceph_clock_now(&g_ceph_context) >= reconnect_end &&
!client_reconnect_gather.empty()) {
dout(10) << "reconnect timed out" << dendl;
for (set<client_t>::iterator p = client_reconnect_gather.begin();
mdr->did_early_reply = true;
mds->logger->inc(l_mds_reply);
- double lat = g_clock.now() - mdr->client_request->get_recv_stamp();
+ double lat = ceph_clock_now(&g_ceph_context) - mdr->client_request->get_recv_stamp();
mds->logger->favg(l_mds_replyl, lat);
dout(20) << "lat " << lat << dendl;
}
if (!did_early_reply && !is_replay) {
mds->logger->inc(l_mds_reply);
- double lat = g_clock.now() - mdr->client_request->get_recv_stamp();
+ double lat = ceph_clock_now(&g_ceph_context) - mdr->client_request->get_recv_stamp();
mds->logger->favg(l_mds_replyl, lat);
dout(20) << "lat " << lat << dendl;
bufferlist bl;
int whoami = mds->get_nodeid();
client_t client = session->get_client();
- utime_t now = g_clock.now();
+ utime_t now = ceph_clock_now(&g_ceph_context);
dout(20) << "set_trace_dist snapid " << snapid << dendl;
if (!mds->locker->acquire_locks(mdr, rdlocks, wrlocks, xlocks))
return;
- mds->balancer->hit_inode(g_clock.now(), ref, META_POP_IRD,
+ mds->balancer->hit_inode(ceph_clock_now(&g_ceph_context), ref, META_POP_IRD,
mdr->client_request->get_source().num());
// reply
}
// hit pop
- mdr->now = g_clock.now();
+ mdr->now = ceph_clock_now(&g_ceph_context);
if (cmode == CEPH_FILE_MODE_RDWR ||
cmode == CEPH_FILE_MODE_WR)
mds->balancer->hit_inode(mdr->now, cur, META_POP_IWR);
// create inode.
- mdr->now = g_clock.now();
+ mdr->now = ceph_clock_now(&g_ceph_context);
SnapRealm *realm = diri->find_snaprealm(); // use directory's realm; inode isn't attached yet.
snapid_t follows = realm->get_newest_seq();
dir->verify_fragstat();
#endif
- mdr->now = g_clock.now();
+ mdr->now = ceph_clock_now(&g_ceph_context);
snapid_t snapid = mdr->snapid;
<< " complete=" << (int)complete << dendl;
// bump popularity. NOTE: this doesn't quite capture it.
- mds->balancer->hit_dir(g_clock.now(), dir, META_POP_IRD, -1, numfiles);
+ mds->balancer->hit_dir(ceph_clock_now(&g_ceph_context), dir, META_POP_IRD, -1, numfiles);
// reply
reply_request(mdr, reply, diri);
pi = cur->project_inode();
- utime_t now = g_clock.now();
+ utime_t now = ceph_clock_now(&g_ceph_context);
if (mask & CEPH_SETATTR_MODE)
pi->mode = (pi->mode & ~07777) | (req->head.args.setattr.mode & 07777);
// prepare
inode_t *pi = in->project_inode();
- pi->mtime = pi->ctime = g_clock.now();
+ pi->mtime = pi->ctime = ceph_clock_now(&g_ceph_context);
pi->version = in->pre_dirty();
pi->truncate_from = pi->size;
inode_t *pi = cur->project_inode();
pi->layout = layout;
pi->version = cur->pre_dirty();
- pi->ctime = g_clock.now();
+ pi->ctime = ceph_clock_now(&g_ceph_context);
// log + wait
mdr->ls = mdlog->get_current_segment();
map<string,bufferptr> *px = new map<string,bufferptr>;
inode_t *pi = cur->project_inode(px);
pi->version = cur->pre_dirty();
- pi->ctime = g_clock.now();
+ pi->ctime = ceph_clock_now(&g_ceph_context);
pi->xattr_version++;
px->erase(name);
(*px)[name] = buffer::create(len);
map<string,bufferptr> *px = new map<string,bufferptr>;
inode_t *pi = cur->project_inode(px);
pi->version = cur->pre_dirty();
- pi->ctime = g_clock.now();
+ pi->ctime = ceph_clock_now(&g_ceph_context);
pi->xattr_version++;
px->erase(name);
SnapRealm *realm = dn->get_dir()->inode->find_snaprealm();
snapid_t follows = realm->get_newest_seq();
- mdr->now = g_clock.now();
+ mdr->now = ceph_clock_now(&g_ceph_context);
CInode *newi = prepare_new_inode(mdr, dn->get_dir(), inodeno_t(req->head.ino),
req->head.args.mknod.mode, &layout);
// new inode
SnapRealm *realm = dn->get_dir()->inode->find_snaprealm();
snapid_t follows = realm->get_newest_seq();
- mdr->now = g_clock.now();
+ mdr->now = ceph_clock_now(&g_ceph_context);
unsigned mode = req->head.args.mkdir.mode;
mode &= ~S_IFMT;
if (!mds->locker->acquire_locks(mdr, rdlocks, wrlocks, xlocks))
return;
- mdr->now = g_clock.now();
+ mdr->now = ceph_clock_now(&g_ceph_context);
snapid_t follows = dn->get_dir()->inode->find_snaprealm()->get_newest_seq();
unsigned mode = S_IFLNK | 0777;
// pick mtime
if (mdr->now == utime_t())
- mdr->now = g_clock.now();
+ mdr->now = ceph_clock_now(&g_ceph_context);
// does the target need an anchor?
if (targeti->is_auth()) {
// yay!
if (mdr->now == utime_t())
- mdr->now = g_clock.now();
+ mdr->now = ceph_clock_now(&g_ceph_context);
// NOTE: this is non-optimal. we create an anchor at the old
// location, and then change it. we can do better, but it's more
// -- declare now --
if (mdr->now == utime_t())
- mdr->now = g_clock.now();
+ mdr->now = ceph_clock_now(&g_ceph_context);
// -- prepare witnesses --
map<snapid_t,SnapInfo*> infomap;
realm->get_snap_info(infomap, diri->get_oldest_snap());
- utime_t now = g_clock.now();
+ utime_t now = ceph_clock_now(&g_ceph_context);
__u32 num = 0;
bufferlist dnbl;
for (map<snapid_t,SnapInfo*>::iterator p = infomap.begin();
}
if (mdr->now == utime_t())
- mdr->now = g_clock.now();
+ mdr->now = ceph_clock_now(&g_ceph_context);
// anchor
if (!diri->is_anchored()) {
// journal
inode_t *pi = diri->project_inode();
- pi->ctime = g_clock.now();
+ pi->ctime = ceph_clock_now(&g_ceph_context);
pi->version = diri->pre_dirty();
mdr->ls = mdlog->get_current_segment();
mds->objecter->write_full(oid, oloc,
snapc,
- bl, g_clock.now(), 0,
+ bl, ceph_clock_now(&g_ceph_context), 0,
NULL, new C_SM_Save(this, version));
}
void SessionMap::decode(bufferlist::iterator& p)
{
- utime_t now = g_clock.now();
+ utime_t now = ceph_clock_now(&g_ceph_context);
uint64_t pre;
::decode(pre, p);
if (pre == (uint64_t)-1) {
else
s = session_map[i.name] = new Session;
s->inst = i;
- s->last_cap_renew = g_clock.now();
+ s->last_cap_renew = ceph_clock_now(&g_ceph_context);
return s;
}
void add_session(Session *s) {
void touch_session(Session *session) {
if (session->item_session_list.is_on_list()) {
by_state[session->state].push_back(&session->item_session_list);
- session->last_cap_renew = g_clock.now();
+ session->last_cap_renew = ceph_clock_now(&g_ceph_context);
} else {
assert(0); // hrm, should happen?
}
inline ostream& operator<<(ostream& out, dirfrag_load_vec_t& dl)
{
// ugliness!
- utime_t now = g_clock.now();
+ utime_t now = ceph_clock_now(&g_ceph_context);
DecayRate rate(g_conf->mds_decay_halflife);
return out << "[" << dl.vec[0].get(now, rate) << "," << dl.vec[1].get(now, rate)
<< " " << dl.meta_load(now, rate)
op(o), machine_id(mid),
first_committed(0), last_committed(0), pn_from(0), pn(0), uncommitted_pn(0),
latest_version(0) {
- sent_timestamp = g_clock.now();
+ sent_timestamp = ceph_clock_now(&g_ceph_context);
}
private:
// start by trying to elect me
if (epoch % 2 == 0)
bump_epoch(epoch+1); // odd == election cycle
- start_stamp = g_clock.now();
+ start_stamp = ceph_clock_now(&g_ceph_context);
electing_me = true;
acked_me.insert(mon->rank);
// ack them
leader_acked = who;
- ack_stamp = g_clock.now();
+ ack_stamp = ceph_clock_now(&g_ceph_context);
mon->messenger->send_message(new MMonElection(MMonElection::OP_ACK, epoch, mon->monmap),
mon->monmap->get_inst(who));
dout(10) << "create_initial -- creating initial map" << dendl;
LogEntry e;
memset(&e.who, 0, sizeof(e.who));
- e.stamp = g_clock.now();
+ e.stamp = ceph_clock_now(&g_ceph_context);
e.type = CLOG_INFO;
std::stringstream ss;
ss << "mkfs " << mon->monmap->get_fsid();
void MDSMonitor::create_new_fs(MDSMap &m, int metadata_pool, int data_pool)
{
m.max_mds = g_conf->max_mds;
- m.created = g_clock.now();
+ m.created = ceph_clock_now(&g_ceph_context);
m.data_pg_pools.push_back(data_pool);
m.metadata_pg_pool = metadata_pool;
m.cas_pg_pool = -1;
{
dout(10) << "encode_pending e" << pending_mdsmap.epoch << dendl;
- pending_mdsmap.modified = g_clock.now();
+ pending_mdsmap.modified = ceph_clock_now(&g_ceph_context);
//print_map(pending_mdsmap);
version_t seq = m->get_seq();
dout(15) << "_note_beacon " << *m << " noting time" << dendl;
- last_beacon[gid].stamp = g_clock.now();
+ last_beacon[gid].stamp = ceph_clock_now(&g_ceph_context);
last_beacon[gid].seq = seq;
}
}
// initialize the beacon timer
- last_beacon[gid].stamp = g_clock.now();
+ last_beacon[gid].stamp = ceph_clock_now(&g_ceph_context);
last_beacon[gid].seq = seq;
// new incompat?
if (pending_mdsmap.up.count(w)) {
uint64_t gid = pending_mdsmap.up[w];
if (pending_mdsmap.mds_info.count(gid)) {
- utime_t until = g_clock.now();
+ utime_t until = ceph_clock_now(&g_ceph_context);
until += g_conf->mds_blacklist_interval;
MDSMap::mds_info_t& info = pending_mdsmap.mds_info[pending_mdsmap.up[w]];
pending_mdsmap.last_failure_osd_epoch = mon->osdmon()->blacklist(info.addr, until);
// --- reset the cluster map ---
if (pending_mdsmap.mds_info.size()) {
// blacklist all old mds's
- utime_t until = g_clock.now();
+ utime_t until = ceph_clock_now(&g_ceph_context);
until += g_conf->mds_blacklist_interval;
for (map<int32_t,uint64_t>::iterator p = pending_mdsmap.up.begin();
p != pending_mdsmap.up.end();
}
// check beacon timestamps
- utime_t now = g_clock.now();
+ utime_t now = ceph_clock_now(&g_ceph_context);
utime_t cutoff = now;
cutoff -= g_conf->mds_beacon_grace;
dout(10) << " adding " << p->second.addr << " mds" << info.rank << "." << info.inc
<< " " << ceph_mds_state_name(info.state)
<< " to last_beacon" << dendl;
- last_beacon[p->first].stamp = g_clock.now();
+ last_beacon[p->first].stamp = ceph_clock_now(&g_ceph_context);
last_beacon[p->first].seq = 0;
}
}
case MDSMap::STATE_CLIENTREPLAY:
// BUG: hrm, if this is the case, the STOPPING guys won't be able to stop, will they?
{
- utime_t until = g_clock.now();
+ utime_t until = ceph_clock_now(&g_ceph_context);
until += g_conf->mds_blacklist_interval;
pending_mdsmap.last_failure_osd_epoch = mon->osdmon()->blacklist(info.addr, until);
propose_osdmap = true;
break;
utime_t interval(1, 0);
- map_cond.WaitInterval(monc_lock, interval);
+ map_cond.WaitInterval(cct, monc_lock, interval);
if (monmap.epoch == 0) {
messenger->mark_down(cur_con); // nope, clean that connection up
if (cur_mon.empty())
_reopen_session();
- utime_t until = g_clock.now();
+ utime_t until = ceph_clock_now(cct);
until += timeout;
if (timeout > 0.0)
dout(10) << "authenticate will time out at " << until << dendl;
if (state == MC_STATE_NEGOTIATING) {
if (!auth || (int)m->protocol != auth->get_protocol()) {
delete auth;
- auth = get_auth_client_handler(&g_ceph_context, m->protocol, rotating_secrets);
+ auth = get_auth_client_handler(cct, m->protocol, rotating_secrets);
if (!auth) {
m->put();
return;
_reopen_session();
} else if (!cur_mon.empty()) {
// just renew as needed
- utime_t now = g_clock.now();
+ utime_t now = ceph_clock_now(cct);
if (now > sub_renew_after)
_renew_subs();
_reopen_session();
else {
if (sub_renew_sent == utime_t())
- sub_renew_sent = g_clock.now();
+ sub_renew_sent = ceph_clock_now(cct);
MMonSubscribe *m = new MMonSubscribe;
m->what = sub_have;
return 0;
}
- utime_t cutoff = g_clock.now();
+ utime_t cutoff = ceph_clock_now(cct);
cutoff -= MIN(30.0, g_conf->auth_service_ticket_ttl / 4.0);
if (!rotating_secrets->need_new_secrets(cutoff)) {
dout(10) << "_check_auth_rotating have uptodate secrets (they expire after " << cutoff << ")" << dendl;
int MonClient::wait_auth_rotating(double timeout)
{
Mutex::Locker l(monc_lock);
- utime_t until = g_clock.now();
+ utime_t until = ceph_clock_now(cct);
until += timeout;
if (auth->get_protocol() == CEPH_AUTH_NONE)
while (auth_principal_needs_rotating_keys(entity_name) &&
rotating_secrets->need_new_secrets()) {
- utime_t now = g_clock.now();
+ utime_t now = ceph_clock_now(cct);
if (now >= until) {
dout(0) << "wait_auth_rotating timed out after " << timeout << dendl;
return -ETIMEDOUT;
MonMap() : epoch(0) {
memset(&fsid, 0, sizeof(fsid));
- last_changed = created = g_clock.now();
+ last_changed = created = ceph_clock_now(&g_ceph_context);
}
ceph_fsid_t& get_fsid() { return fsid; }
void Monitor::win_election(epoch_t epoch, set<int>& active)
{
state = STATE_LEADER;
- leader_since = g_clock.now();
+ leader_since = ceph_clock_now(&g_ceph_context);
leader = rank;
quorum = active;
dout(10) << "win_election, epoch " << epoch << " quorum is " << quorum << dendl;
dout(10) << "setting timeout on session" << dendl;
// set an initial timeout here, so we will trim this session even if they don't
// do anything.
- s->until = g_clock.now();
+ s->until = ceph_clock_now(&g_ceph_context);
s->until += g_conf->mon_subscribe_interval;
} else {
//give it monitor caps; the peer type has been authenticated
return;
}
- s->until = g_clock.now();
+ s->until = ceph_clock_now(&g_ceph_context);
s->until += g_conf->mon_subscribe_interval;
for (map<string,ceph_mon_subscribe_item>::iterator p = m->what.begin();
p != m->what.end();
(*p)->tick();
// trim sessions
- utime_t now = g_clock.now();
+ utime_t now = ceph_clock_now(&g_ceph_context);
xlist<MonSession*>::iterator p = session_map.sessions.begin();
while (!p.end()) {
MonSession *s = *p;
{
pending_map = *mon->monmap;
pending_map.epoch++;
- pending_map.last_changed = g_clock.now();
+ pending_map.last_changed = ceph_clock_now(&g_ceph_context);
dout(10) << "create_pending monmap epoch " << pending_map.epoch << dendl;
}
}
pending_map.add(name, addr);
- pending_map.last_changed = g_clock.now();
+ pending_map.last_changed = ceph_clock_now(&g_ceph_context);
ss << "added mon." << name << " at " << addr;
getline(ss, rs);
paxos->wait_for_commit(new Monitor::C_Command(mon, m, 0, rs, paxos->get_version()));
entity_addr_t addr = pending_map.get_addr(name);
pending_map.remove(name);
- pending_map.last_changed = g_clock.now();
+ pending_map.last_changed = ceph_clock_now(&g_ceph_context);
ss << "removed mon." << name << " at " << addr << ", there are now " << pending_map.size() << " monitors" ;
getline(ss, rs);
// send reply immediately in case we get removed
newmap.decode(bl);
newmap.set_epoch(1);
newmap.set_fsid(mon->monmap->fsid);
- newmap.created = newmap.modified = g_clock.now();
+ newmap.created = newmap.modified = ceph_clock_now(&g_ceph_context);
// encode into pending incremental
newmap.encode(pending_inc.fullmap);
if (osdmap.is_down(o) && osdmap.is_in(o) &&
down_pending_out.count(o) == 0) {
dout(10) << " adding osd" << o << " to down_pending_out map" << dendl;
- down_pending_out[o] = g_clock.now();
+ down_pending_out[o] = ceph_clock_now(&g_ceph_context);
}
if (mon->is_leader()) {
<< dendl;
// finalize up pending_inc
- pending_inc.modified = g_clock.now();
+ pending_inc.modified = ceph_clock_now(&g_ceph_context);
// tell me about it
for (map<int32_t,uint8_t>::iterator i = pending_inc.new_state.begin();
bool do_propose = false;
// mark down osds out?
- utime_t now = g_clock.now();
+ utime_t now = ceph_clock_now(&g_ceph_context);
map<int,utime_t>::iterator i = down_pending_out.begin();
while (i != down_pending_out.end()) {
int o = i->first;
ss << "unable to parse address " << m->cmd[3];
else if (m->cmd[2] == "add") {
- utime_t expires = g_clock.now();
+ utime_t expires = ceph_clock_now(&g_ceph_context);
double d = 60*60; // 1 hour default
if (m->cmd.size() > 4)
d = atof(m->cmd[4].c_str());
pp = &pending_inc.new_pools[pool];
*pp = *p;
}
- pp->add_snap(snapname.c_str(), g_clock.now());
+ pp->add_snap(snapname.c_str(), ceph_clock_now(&g_ceph_context));
pp->set_snap_epoch(pending_inc.epoch);
ss << "created pool " << m->cmd[3] << " snap " << snapname;
getline(ss, rs);
switch (m->op) {
case POOL_OP_CREATE_SNAP:
- pp->add_snap(m->name.c_str(), g_clock.now());
+ pp->add_snap(m->name.c_str(), ceph_clock_now(&g_ceph_context));
dout(10) << "create snap in pool " << m->pool << " " << m->name << " seq " << pp->get_snap_epoch() << dendl;
break;
}
// walk through incrementals
- utime_t now(g_clock.now());
+ utime_t now(ceph_clock_now(&g_ceph_context));
while (paxosv > pg_map.version) {
bufferlist bl;
bool success = paxos->read(pg_map.version+1, bl);
{
if (!mon->is_leader())
return;
- utime_t now(g_clock.now());
+ utime_t now(ceph_clock_now(&g_ceph_context));
utime_t timeo(g_conf->mon_osd_report_timeout, 0);
if (now - mon->get_leader_since() < timeo) {
// We haven't been the leader for long enough to consider OSD timeouts
return false;
}
- last_osd_report[from] = g_clock.now();
+ last_osd_report[from] = ceph_clock_now(&g_ceph_context);
if (!stats->get_orig_source().is_osd() ||
!mon->osdmon()->osdmap.is_up(from) ||
dout(10) << "send_pg_creates to " << pg_map.creating_pgs.size() << " pgs" << dendl;
map<int, MOSDPGCreate*> msg;
- utime_t now = g_clock.now();
+ utime_t now = ceph_clock_now(&g_ceph_context);
OSDMap *osdmap = &mon->osdmon()->osdmap;
int max = MIN(osdmap->get_max_osd(), osdmap->crush.get_max_devices());
p++) {
dout(10) << "sending pg_create to osd" << p->first << dendl;
mon->messenger->send_message(p->second, mon->osdmon()->osdmap.get_inst(p->first));
- last_sent_pg_create[p->first] = g_clock.now();
+ last_sent_pg_create[p->first] = ceph_clock_now(&g_ceph_context);
}
}
// commit locally
last_committed++;
- last_commit_time = g_clock.now();
+ last_commit_time = ceph_clock_now(&g_ceph_context);
mon->store->put_int(last_committed, machine_name, "last_committed");
// tell everyone
assert(mon->is_leader());
assert(is_active());
- lease_expire = g_clock.now();
+ lease_expire = ceph_clock_now(&g_ceph_context);
lease_expire += g_conf->mon_lease;
acked_lease.clear();
acked_lease.insert(mon->rank);
void Paxos::warn_on_future_time(utime_t t, entity_name_t from)
{
- utime_t now = g_clock.now();
+ utime_t now = ceph_clock_now(&g_ceph_context);
if (t > now) {
utime_t diff = t - now;
if (diff > g_conf->mon_clock_drift_allowed) {
pow(g_conf->mon_clock_drift_warn_backoff, clock_drift_warned)) {
mon->clog.warn() << "message from " << from << " was stamped " << diff
<< "s in the future, clocks not synchronized";
- last_clock_drift_warn = g_clock.now();
+ last_clock_drift_warn = ceph_clock_now(&g_ceph_context);
++clock_drift_warned;
}
}
MMonPaxos *ack = new MMonPaxos(mon->get_epoch(), MMonPaxos::OP_LEASE_ACK, machine_id);
ack->last_committed = last_committed;
ack->first_committed = first_committed;
- ack->lease_timestamp = g_clock.now();
+ ack->lease_timestamp = ceph_clock_now(&g_ceph_context);
mon->messenger->send_message(ack, lease->get_source_inst());
// (re)set timeout event.
observers[inst] = observer = new Observer(inst, v);
}
- utime_t timeout = g_clock.now();
+ utime_t timeout = ceph_clock_now(&g_ceph_context);
timeout += g_conf->paxos_observer_timeout;
observer->timeout = timeout;
Observer *observer = iter->second;
// timed out?
- if (g_clock.now() > observer->timeout) {
+ if (ceph_clock_now(&g_ceph_context) > observer->timeout) {
delete observer;
observers.erase(iter++);
continue;
bool Paxos::is_readable(version_t v)
{
- dout(1) << "is_readable now=" << g_clock.now() << " lease_expire=" << lease_expire
+ dout(1) << "is_readable now=" << ceph_clock_now(&g_ceph_context) << " lease_expire=" << lease_expire
<< " has v" << v << " lc " << last_committed << dendl;
if (v > last_committed)
return false;
(is_active() || is_updating()) &&
last_committed > 0 && // must have a value
(mon->get_quorum().size() == 1 || // alone, or
- g_clock.now() < lease_expire); // have lease
+ ceph_clock_now(&g_ceph_context) < lease_expire); // have lease
}
bool Paxos::read(version_t v, bufferlist &bl)
return
mon->is_leader() &&
is_active() &&
- g_clock.now() < lease_expire;
+ ceph_clock_now(&g_ceph_context) < lease_expire;
}
bool Paxos::propose_new_value(bufferlist& bl, Context *oncommit)
if (paxos->last_committed <= 1)
delay = 0.0;
else {
- utime_t now = g_clock.now();
+ utime_t now = ceph_clock_now(&g_ceph_context);
if ((now - paxos->last_commit_time) > g_conf->paxos_propose_interval)
delay = (double)g_conf->paxos_min_wait;
else
virtual bool ms_verify_authorizer(Connection *con, int peer_type,
int protocol, bufferlist& authorizer, bufferlist& authorizer_reply,
bool& isvalid) { return false; };
-private:
+protected:
CephContext *cct;
};
backoff.set_from_double(g_conf->ms_initial_backoff);
} else {
dout(10) << "fault waiting " << backoff << dendl;
- cond.WaitInterval(pipe_lock, backoff);
+ cond.WaitInterval(&g_ceph_context, pipe_lock, backoff);
backoff += backoff;
if (backoff > g_conf->ms_max_backoff)
backoff.set_from_double(g_conf->ms_max_backoff);
void queue_received(Message *m, int priority);
void queue_received(Message *m) {
- m->set_recv_stamp(g_clock.now());
+ m->set_recv_stamp(ceph_clock_now(&g_ceph_context));
// this is just to make sure that a changeset is working
// properly; if you start using the refcounting more and have
<< (hbp.length() ? " + header":"")
<< dendl;
- utime_t from = g_clock.now();
+ utime_t from = ceph_clock_now(&g_ceph_context);
// entry
off64_t pos = write_pos;
#endif
}
- utime_t lat = g_clock.now() - from;
+ utime_t lat = ceph_clock_now(&g_ceph_context) - from;
dout(20) << "do_write latency " << lat << dendl;
write_lock.Lock();
utime_t min_interval;
min_interval.set_from_double(g_conf->filestore_min_sync_interval);
- utime_t startwait = g_clock.now();
+ utime_t startwait = ceph_clock_now(&g_ceph_context);
if (!force_sync) {
dout(20) << "sync_entry waiting for max_interval " << max_interval << dendl;
- sync_cond.WaitInterval(lock, max_interval);
+ sync_cond.WaitInterval(&g_ceph_context, lock, max_interval);
} else {
dout(20) << "sync_entry not waiting, force_sync set" << dendl;
}
force_sync = false;
} else {
// wait for at least the min interval
- utime_t woke = g_clock.now();
+ utime_t woke = ceph_clock_now(&g_ceph_context);
woke -= startwait;
dout(20) << "sync_entry woke after " << woke << dendl;
if (woke < min_interval) {
t -= woke;
dout(20) << "sync_entry waiting for another " << t
<< " to reach min interval " << min_interval << dendl;
- sync_cond.WaitInterval(lock, t);
+ sync_cond.WaitInterval(&g_ceph_context, lock, t);
}
}
lock.Unlock();
if (commit_start()) {
- utime_t start = g_clock.now();
+ utime_t start = ceph_clock_now(&g_ceph_context);
uint64_t cp = committing_seq;
SyncEntryTimeout *sync_entry_timeo = new SyncEntryTimeout();
}
}
- utime_t done = g_clock.now();
+ utime_t done = ceph_clock_now(&g_ceph_context);
done -= start;
dout(10) << "sync_entry commit took " << done << dendl;
commit_finish();
bl.push_back(bp);
uint64_t wrote = 0;
while (1) {
- if (g_clock.now() > until) break;
+ if (ceph_clock_now(&g_ceph_context) > until) break;
struct statfs st;
store->statfs(&st);
store->_fake_writes(true);
srand(0);
- utime_t start = g_clock.now();
+ utime_t start = ceph_clock_now(&g_ceph_context);
utime_t until = start;
until.sec_ref() += time;
uint64_t wrote = 0;
for (int c=1; c<=count; c++) {
- if (g_clock.now() > until) break;
+ if (ceph_clock_now(&g_ceph_context) > until) break;
//if (c == 7) start_debug = true;
// dump freelist?
/*
- if (g_clock.now() > nextfl) {
+ if (ceph_clock_now(&g_ceph_context) > nextfl) {
elapsed += freelist_inc;
save_freelist(elapsed);
nextfl.sec_ref() += freelist_inc;
bp.zero();
bl.push_back(bp);
dout(0) << "testing disk bandwidth..." << dendl;
- utime_t start = g_clock.now();
+ utime_t start = ceph_clock_now(&g_ceph_context);
object_t oid("disk_bw_test");
for (int i=0; i<1000; i++) {
ObjectStore::Transaction *t = new ObjectStore::Transaction;
store->queue_transaction(NULL, t);
}
store->sync();
- utime_t end = g_clock.now();
+ utime_t end = ceph_clock_now(&g_ceph_context);
end -= start;
dout(0) << "measured " << (1000.0 / (double)end) << " mb/sec" << dendl;
ObjectStore::Transaction tr;
old_from.swap(heartbeat_from);
old_con.swap(heartbeat_con);
- utime_t now = g_clock.now();
+ utime_t now = ceph_clock_now(&g_ceph_context);
heartbeat_epoch = osdmap->get_epoch();
if (locked && !is_booting())
_share_map_outgoing(osdmap->get_cluster_inst(from));
- heartbeat_from_stamp[from] = g_clock.now(); // don't let _my_ lag interfere.
+ heartbeat_from_stamp[from] = ceph_clock_now(&g_ceph_context); // don't let _my_ lag interfere.
// remove from failure lists if needed
if (failure_pending.count(from)) {
utime_t w;
w.set_from_double(wait);
dout(30) << "heartbeat_entry sleeping for " << wait << dendl;
- heartbeat_cond.WaitInterval(heartbeat_lock, w);
+ heartbeat_cond.WaitInterval(&g_ceph_context, heartbeat_lock, w);
dout(30) << "heartbeat_entry woke up" << dendl;
}
heartbeat_lock.Unlock();
// we should also have map_lock rdlocked.
// check for incoming heartbeats (move me elsewhere?)
- utime_t grace = g_clock.now();
+ utime_t grace = ceph_clock_now(&g_ceph_context);
grace -= g_conf->osd_heartbeat_grace;
for (map<int, epoch_t>::iterator p = heartbeat_from.begin();
p != heartbeat_from.end();
void OSD::heartbeat()
{
- utime_t now = g_clock.now();
+ utime_t now = ceph_clock_now(&g_ceph_context);
dout(30) << "heartbeat" << dendl;
check_replay_queue();
// mon report?
- utime_t now = g_clock.now();
+ utime_t now = ceph_clock_now(&g_ceph_context);
if (now - last_pg_stats_sent > g_conf->osd_mon_report_interval_max) {
osd_stat_updated = true;
do_mon_report();
{
dout(7) << "do_mon_report" << dendl;
- utime_t now(g_clock.now());
+ utime_t now(ceph_clock_now(&g_ceph_context));
last_mon_report = now;
// do any pending reports
send_alive();
send_pg_temp();
send_failures();
- send_pg_stats(g_clock.now());
+ send_pg_stats(ceph_clock_now(&g_ceph_context));
}
}
<< " from " << obc->obs.oi << dendl;
entity_name_t entity = witer->first;
watch_info_t& w = obc->obs.oi.watchers[entity];
- utime_t expire = g_clock.now();
+ utime_t expire = ceph_clock_now(&g_ceph_context);
expire += w.timeout_seconds;
obc->unconnected_watchers[entity] = expire;
dout(10) << " disconnected watch " << w << " by " << entity << " session " << session
if (witer != obc->watchers.end()) {
watch_info_t& w = obc->obs.oi.watchers[notif_iter->first];
obc->watchers.erase(witer); // FIXME: hmm? notify timeout may be different than watch timeout?
- utime_t expire = g_clock.now();
+ utime_t expire = ceph_clock_now(&g_ceph_context);
expire += w.timeout_seconds;
obc->unconnected_watchers[notif_iter->first] = expire;
}
up_thru_wanted = want;
// expedite, a bit. WARNING this will somewhat delay other mon queries.
- last_mon_report = g_clock.now();
+ last_mon_report = ceph_clock_now(&g_ceph_context);
send_alive();
} else {
dout(10) << "queue_want_up_thru want " << want << " <= queued " << up_thru_wanted
ObjectStore::Transaction *cleanupt = new ObjectStore::Transaction;
store->sync_and_flush();
- utime_t start = g_clock.now();
+ utime_t start = ceph_clock_now(&g_ceph_context);
for (uint64_t pos = 0; pos < count; pos += bsize) {
char nm[30];
snprintf(nm, sizeof(nm), "disk_bw_test_%lld", (long long)pos);
cleanupt->remove(coll_t::META_COLL, soid);
}
store->sync_and_flush();
- utime_t end = g_clock.now();
+ utime_t end = ceph_clock_now(&g_ceph_context);
// clean up
store->queue_transaction(NULL, cleanupt);
clog.info() << "kicking recovery queue. set osd_recovery_delay_start "
<< "to " << g_conf->osd_recovery_delay_start << "\n";
- defer_recovery_until = g_clock.now();
+ defer_recovery_until = ceph_clock_now(&g_ceph_context);
defer_recovery_until += g_conf->osd_recovery_delay_start;
recovery_wq.kick();
}
dout(20) << "sched_scrub" << dendl;
pair<utime_t,pg_t> pos;
- utime_t max = g_clock.now();
+ utime_t max = ceph_clock_now(&g_ceph_context);
max -= g_conf->osd_scrub_max_interval;
sched_scrub_lock.Lock();
superblock.current_epoch = cur;
advance_map(t);
- had_map_since = g_clock.now();
+ had_map_since = ceph_clock_now(&g_ceph_context);
}
C_Contexts *fin = new C_Contexts(&g_ceph_context);
void OSD::check_replay_queue()
{
- utime_t now = g_clock.now();
+ utime_t now = ceph_clock_now(&g_ceph_context);
list< pair<pg_t,utime_t> > pgids;
replay_queue_lock.Lock();
while (!replay_queue.empty() &&
<< " >= max " << g_conf->osd_recovery_max_active << dendl;
return false;
}
- if (g_clock.now() < defer_recovery_until) {
+ if (ceph_clock_now(&g_ceph_context) < defer_recovery_until) {
dout(15) << "_recover_now defer until " << defer_recovery_until << dendl;
return false;
}
// ...
throttle_op_queue();
- utime_t now = g_clock.now();
+ utime_t now = ceph_clock_now(&g_ceph_context);
int r = init_op_flags(op);
if (r) {
osd->recovery_queue.push_back(&pg->recovery_item);
if (g_conf->osd_recovery_delay_start > 0) {
- osd->defer_recovery_until = g_clock.now();
+ osd->defer_recovery_until = ceph_clock_now(&g_ceph_context);
osd->defer_recovery_until += g_conf->osd_recovery_delay_start;
}
return true;
<< lpg_bits << " lpg bits" << dendl;
epoch = e;
set_fsid(fsid);
- created = modified = g_clock.now();
+ created = modified = ceph_clock_now(&g_ceph_context);
set_max_osd(nosd);
log.print(*_dout);
*_dout << dendl;
- utime_t mtime = g_clock.now();
+ utime_t mtime = ceph_clock_now(&g_ceph_context);
eversion_t old_last_update = info.last_update;
info.last_update.epoch = osd->osdmap->get_epoch();
map<sobject_t, Missing::item>::iterator m = missing.missing.begin();
assert(!is_active());
// -- crash recovery?
if (is_crashed()) {
- replay_until = g_clock.now();
+ replay_until = ceph_clock_now(&g_ceph_context);
replay_until += g_conf->osd_replay_window;
dout(10) << "crashed, allowing op replay for " << g_conf->osd_replay_window
<< " until " << replay_until << dendl;
}
// just scrubbed?
- if (info.history.last_scrub_stamp + g_conf->osd_scrub_min_interval > g_clock.now()) {
+ if (info.history.last_scrub_stamp + g_conf->osd_scrub_min_interval > ceph_clock_now(&g_ceph_context)) {
dout(20) << "sched_scrub: just scrubbed, skipping" << dendl;
return true;
}
// finish up
osd->unreg_last_pg_scrub(info.pgid, info.history.last_scrub_stamp);
info.history.last_scrub = info.last_update;
- info.history.last_scrub_stamp = g_clock.now();
+ info.history.last_scrub_stamp = ceph_clock_now(&g_ceph_context);
osd->reg_last_pg_scrub(info.pgid, info.history.last_scrub_stamp);
{
void PG::RecoveryState::RecoveryMachine::log_exit(const char *state_name, utime_t enter_time)
{
- utime_t dur = g_clock.now() - enter_time;
+ utime_t dur = ceph_clock_now(&g_ceph_context) - enter_time;
dout(20) << "exit " << state_name << " " << dur << " " << event_count << " " << event_time << dendl;
- pg->osd->pg_recovery_stats.log_exit(state_name, g_clock.now() - enter_time,
+ pg->osd->pg_recovery_stats.log_exit(state_name, ceph_clock_now(&g_ceph_context) - enter_time,
event_count, event_time);
event_count = 0;
event_time = utime_t();
assert(!rctx);
rctx = new_ctx;
if (rctx)
- rctx->start_time = g_clock.now();
+ rctx->start_time = ceph_clock_now(&g_ceph_context);
}
void end_handle() {
if (rctx) {
- utime_t dur = g_clock.now() - rctx->start_time;
+ utime_t dur = ceph_clock_now(&g_ceph_context) - rctx->start_time;
machine.event_time += dur;
}
machine.event_count++;
const char *state_name;
utime_t enter_time;
const char *get_state_name() { return state_name; }
- NamedState() : enter_time(g_clock.now()) {}
+ NamedState() : enter_time(ceph_clock_now(&g_ceph_context)) {}
virtual ~NamedState() {}
};
}
// note my stats
- utime_t now = g_clock.now();
+ utime_t now = ceph_clock_now(&g_ceph_context);
// note some basic context for op replication that prepare_transaction may clobber
eversion_t old_last_update = log.head;
osd->logger->inc(l_osd_c_rd);
osd->logger->inc(l_osd_c_rdb, ctx->outdata.length());
- utime_t now = g_clock.now();
+ utime_t now = ceph_clock_now(&g_ceph_context);
utime_t diff = now;
diff -= ctx->op->get_recv_stamp();
//dout(20) << "do_op " << ctx->reqid << " total op latency " << diff << dendl;
tid_t rep_tid = osd->get_tid();
osd_reqid_t reqid(osd->cluster_messenger->get_myname(), 0, rep_tid);
OpContext *ctx = new OpContext(NULL, reqid, ops, &obc->obs, this);
- ctx->mtime = g_clock.now();
+ ctx->mtime = ceph_clock_now(&g_ceph_context);
ctx->at_version.epoch = osd->osdmap->get_epoch();
ctx->at_version.version = log.head.version + 1;
}
// unconnected
- utime_t now = g_clock.now();
+ utime_t now = ceph_clock_now(&g_ceph_context);
for (map<entity_name_t, utime_t>::iterator q = obc->unconnected_watchers.begin();
q != obc->unconnected_watchers.end();
q++) {
repop->sent_ack = true;
}
- utime_t now = g_clock.now();
+ utime_t now = ceph_clock_now(&g_ceph_context);
now -= repop->start;
osd->logger->finc(l_osd_rlsum, now);
osd->logger->inc(l_osd_rlnum, 1);
repop->waitfor_disk.insert(osd);
}
- repop->start = g_clock.now();
+ repop->start = ceph_clock_now(&g_ceph_context);
repop_queue.push_back(&repop->queue_item);
repop_map[repop->rep_tid] = repop;
if (!obc->obs.oi.watchers.empty()) {
// populate unconnected_watchers
- utime_t now = g_clock.now();
+ utime_t now = ceph_clock_now(&g_ceph_context);
for (map<entity_name_t, watch_info_t>::iterator p = obc->obs.oi.watchers.begin();
p != obc->obs.oi.watchers.end();
p++) {
if (!rm->committed) {
// send ack to acker only if we haven't sent a commit already
MOSDSubOpReply *ack = new MOSDSubOpReply(rm->op, 0, osd->osdmap->get_epoch(), CEPH_OSD_FLAG_ACK);
- ack->set_peer_stat(osd->get_my_stat_for(g_clock.now(), rm->ackerosd));
+ ack->set_peer_stat(osd->get_my_stat_for(ceph_clock_now(&g_ceph_context), rm->ackerosd));
ack->set_priority(CEPH_MSG_PRIO_HIGH); // this better match commit priority!
osd->cluster_messenger->
send_message(ack, osd->osdmap->get_cluster_inst(rm->ackerosd));
MOSDSubOpReply *commit = new MOSDSubOpReply(rm->op, 0, osd->osdmap->get_epoch(), CEPH_OSD_FLAG_ONDISK);
commit->set_last_complete_ondisk(rm->last_complete);
commit->set_priority(CEPH_MSG_PRIO_HIGH); // this better match ack priority!
- commit->set_peer_stat(osd->get_my_stat_for(g_clock.now(), rm->ackerosd));
+ commit->set_peer_stat(osd->get_my_stat_for(ceph_clock_now(&g_ceph_context), rm->ackerosd));
osd->cluster_messenger->
send_message(commit, osd->osdmap->get_cluster_inst(rm->ackerosd));
}
last_written.write_pos = safe_pos;
dout(10) << "write_head " << last_written << dendl;
- last_wrote_head = g_clock.now();
+ last_wrote_head = ceph_clock_now(&g_ceph_context);
bufferlist bl;
::encode(last_written, bl);
object_t oid = file_object_t(ino, 0);
object_locator_t oloc(pg_pool);
- objecter->write_full(oid, oloc, snapc, bl, g_clock.now(), 0,
+ objecter->write_full(oid, oloc, snapc, bl, ceph_clock_now(&g_ceph_context), 0,
NULL,
new C_WriteHead(this, last_written, oncommit));
}
// calc latency?
if (logger) {
- utime_t lat = g_clock.now();
+ utime_t lat = ceph_clock_now(&g_ceph_context);
lat -= stamp;
logger->favg(logger_key_lat, lat);
}
// submit write for anything pending
// flush _start_ pos to _finish_flush
- utime_t now = g_clock.now();
+ utime_t now = ceph_clock_now(&g_ceph_context);
SnapContext snapc;
Context *onsafe = new C_Flush(this, flush_pos, now); // on COMMIT
}
filer.write(ino, &layout, snapc,
- flush_pos, len, write_bl, g_clock.now(),
+ flush_pos, len, write_bl, ceph_clock_now(&g_ceph_context),
0,
NULL, onsafe);
}
// write head?
- if (last_wrote_head.sec() + g_conf->journaler_write_head_interval < g_clock.now().sec()) {
+ if (last_wrote_head.sec() + g_conf->journaler_write_head_interval < ceph_clock_now(&g_ceph_context).sec()) {
write_head();
}
}
}
SnapContext snapc;
Context *c = new C_Journaler_Prezero(this, prezeroing_pos, len);
- filer.zero(ino, &layout, snapc, prezeroing_pos, len, g_clock.now(), 0, NULL, c);
+ filer.zero(ino, &layout, snapc, prezeroing_pos, len, ceph_clock_now(&g_ceph_context), 0, NULL, c);
prezeroing_pos += len;
}
}
uint64_t first = trimming_pos / period;
uint64_t num = (trim_to - trimming_pos) / period;
SnapContext snapc;
- filer.purge_range(ino, &layout, snapc, first, num, g_clock.now(), 0,
+ filer.purge_range(ino, &layout, snapc, first, num, ceph_clock_now(&g_ceph_context), 0,
new C_Trim(this, trim_to));
trimming_pos = trim_to;
}
void ObjectCacher::flush(loff_t amount)
{
- utime_t cutoff = g_clock.now();
+ utime_t cutoff = ceph_clock_now(&g_ceph_context);
//cutoff.sec_ref() -= g_conf->client_oc_max_dirty_age;
dout(10) << "flush " << amount << dendl;
int ObjectCacher::writex(OSDWrite *wr, ObjectSet *oset)
{
- utime_t now = g_clock.now();
+ utime_t now = ceph_clock_now(&g_ceph_context);
for (vector<ObjectExtent>::iterator ex_it = wr->extents.begin();
ex_it != wr->extents.end();
}
else {
// check tail of lru for old dirty items
- utime_t cutoff = g_clock.now();
+ utime_t cutoff = ceph_clock_now(&g_ceph_context);
cutoff.sec_ref()--;
BufferHead *bh = 0;
while ((bh = (BufferHead*)lru_dirty.lru_get_next_expire()) != 0 &&
}
}
if (flusher_stop) break;
- flusher_cond.WaitInterval(lock, utime_t(1,0));
+ flusher_cond.WaitInterval(&g_ceph_context, lock, utime_t(1,0));
}
lock.Unlock();
dout(10) << "flusher finish" << dendl;
void mark_dirty(BufferHead *bh) {
bh_set_state(bh, BufferHead::STATE_DIRTY);
lru_dirty.lru_touch(bh);
- //bh->set_dirty_stamp(g_clock.now());
+ //bh->set_dirty_stamp(ceph_clock_now(&g_ceph_context));
};
void bh_add(Object *ob, BufferHead *bh) {
set<OSDSession*> toping;
// look for laggy requests
- utime_t cutoff = g_clock.now();
+ utime_t cutoff = ceph_clock_now(&g_ceph_context);
cutoff -= g_conf->objecter_timeout; // timeout
for (hash_map<tid_t,Op*>::iterator p = ops.begin();
void Objecter::resend_mon_ops()
{
- utime_t cutoff = g_clock.now();
+ utime_t cutoff = ceph_clock_now(&g_ceph_context);
cutoff -= g_conf->objecter_mon_retry_interval;
op->paused = false;
op->incarnation = op->session->incarnation;
- op->stamp = g_clock.now();
+ op->stamp = ceph_clock_now(&g_ceph_context);
MOSDOp *m = new MOSDOp(client_inc, op->tid,
op->oid, op->oloc, op->pgid, osdmap->get_epoch(),
if (op->snapid) m->snapid = op->snapid;
if (op->crush_rule) m->crush_rule = op->crush_rule;
monc->send_mon_message(m);
- op->last_submit = g_clock.now();
+ op->last_submit = ceph_clock_now(&g_ceph_context);
}
/**
{
dout(10) << "poolstat_submit " << op->tid << dendl;
monc->send_mon_message(new MGetPoolStats(monc->get_fsid(), op->tid, op->pools, last_seen_pgmap_version));
- op->last_submit = g_clock.now();
+ op->last_submit = ceph_clock_now(&g_ceph_context);
}
void Objecter::handle_get_pool_stats_reply(MGetPoolStatsReply *m)
{
dout(10) << "fs_stats_submit" << op->tid << dendl;
monc->send_mon_message(new MStatfs(monc->get_fsid(), op->tid, last_seen_pgmap_version));
- op->last_submit = g_clock.now();
+ op->last_submit = ceph_clock_now(&g_ceph_context);
}
void Objecter::handle_fs_stats_reply(MStatfsReply *m) {
pthread_create(&print_thread, NULL, status_printer, (void *)data);
dataLock.Lock();
- data->start_time = g_clock.now();
+ data->start_time = ceph_clock_now(&g_ceph_context);
dataLock.Unlock();
for (int i = 0; i<concurrentios; ++i) {
- start_times[i] = g_clock.now();
+ start_times[i] = ceph_clock_now(&g_ceph_context);
completions[i] = rados.aio_create_completion((void *) &cond, 0,
&_aio_cb);
r = io_ctx.aio_write(name[i], completions[i], *contents[i], data->object_size, 0);
runtime.set_from_double(secondsToRun);
stopTime = data->start_time + runtime;
- while( g_clock.now() < stopTime ) {
+ while( ceph_clock_now(&g_ceph_context) < stopTime ) {
dataLock.Lock();
while (1) {
for (slot = 0; slot < concurrentios; ++slot) {
dataLock.Unlock();
goto ERR;
}
- data->cur_latency = g_clock.now() - start_times[slot];
+ data->cur_latency = ceph_clock_now(&g_ceph_context) - start_times[slot];
total_latency += data->cur_latency;
if( data->cur_latency > data->max_latency) data->max_latency = data->cur_latency;
if (data->cur_latency < data->min_latency) data->min_latency = data->cur_latency;
dataLock.Unlock();
completions[slot]->release();
completions[slot] = 0;
- timePassed = g_clock.now() - data->start_time;
+ timePassed = ceph_clock_now(&g_ceph_context) - data->start_time;
//write new stuff to rados, then delete old stuff
//and save locations of new stuff for later deletion
- start_times[slot] = g_clock.now();
+ start_times[slot] = ceph_clock_now(&g_ceph_context);
completions[slot] = rados.aio_create_completion((void *) &cond, 0, &_aio_cb);
r = io_ctx.aio_write(newName, completions[slot], *newContents, data->object_size, 0);
if (r < 0) {//naughty; doesn't clean up heap space.
dataLock.Unlock();
goto ERR;
}
- data->cur_latency = g_clock.now() - start_times[slot];
+ data->cur_latency = ceph_clock_now(&g_ceph_context) - start_times[slot];
total_latency += data->cur_latency;
if (data->cur_latency > data->max_latency) data->max_latency = data->cur_latency;
if (data->cur_latency < data->min_latency) data->min_latency = data->cur_latency;
delete contents[slot];
}
- timePassed = g_clock.now() - data->start_time;
+ timePassed = ceph_clock_now(&g_ceph_context) - data->start_time;
dataLock.Lock();
data->done = true;
dataLock.Unlock();
pthread_create(&print_thread, NULL, status_printer, (void *)data);
dataLock.Lock();
- data->start_time = g_clock.now();
+ data->start_time = ceph_clock_now(&g_ceph_context);
dataLock.Unlock();
utime_t finish_time = data->start_time + time_to_run;
//start initial reads
for (int i = 0; i < concurrentios; ++i) {
index[i] = i;
- start_times[i] = g_clock.now();
+ start_times[i] = ceph_clock_now(&g_ceph_context);
completions[i] = rados.aio_create_completion((void *) &cond, &_aio_cb, 0);
r = io_ctx.aio_read(name[i], completions[i], contents[i], data->object_size, 0);
if (r < 0) { //naughty, doesn't clean up heap -- oh, or handle the print thread!
char* newName;
bufferlist *cur_contents;
- while (seconds_to_run && (g_clock.now() < finish_time) &&
+ while (seconds_to_run && (ceph_clock_now(&g_ceph_context) < finish_time) &&
write_data->finished > data->started) {
dataLock.Lock();
while (1) {
dataLock.Unlock();
goto ERR;
}
- data->cur_latency = g_clock.now() - start_times[slot];
+ data->cur_latency = ceph_clock_now(&g_ceph_context) - start_times[slot];
total_latency += data->cur_latency;
if( data->cur_latency > data->max_latency) data->max_latency = data->cur_latency;
if (data->cur_latency < data->min_latency) data->min_latency = data->cur_latency;
cur_contents = contents[slot];
//start new read and check data if requested
- start_times[slot] = g_clock.now();
+ start_times[slot] = ceph_clock_now(&g_ceph_context);
contents[slot] = new bufferlist();
completions[slot] = rados.aio_create_completion((void *) &cond, &_aio_cb, 0);
r = io_ctx.aio_read(newName, completions[slot], contents[slot], data->object_size, 0);
dataLock.Unlock();
goto ERR;
}
- data->cur_latency = g_clock.now() - start_times[slot];
+ data->cur_latency = ceph_clock_now(&g_ceph_context) - start_times[slot];
total_latency += data->cur_latency;
if (data->cur_latency > data->max_latency) data->max_latency = data->cur_latency;
if (data->cur_latency < data->min_latency) data->min_latency = data->cur_latency;
delete contents[slot];
}
- runtime = g_clock.now() - data->start_time;
+ runtime = ceph_clock_now(&g_ceph_context) - data->start_time;
dataLock.Lock();
data->done = true;
dataLock.Unlock();
/ (1024*1024)
/ cycleSinceChange;
avg_bandwidth = (double) (data->trans_size) * (data->finished)
- / (double)(g_clock.now() - data->start_time) / (1024*1024);
+ / (double)(ceph_clock_now(&g_ceph_context) - data->start_time) / (1024*1024);
if (previous_writes != data->finished) {
previous_writes = data->finished;
cycleSinceChange = 0;
}
++i;
++cycleSinceChange;
- cond.WaitInterval(dataLock, ONE_SECOND);
+ cond.WaitInterval(&g_ceph_context, dataLock, ONE_SECOND);
}
dataLock.Unlock();
return NULL;
entry.owner = s->acl->get_owner().get_id();
entry.time = s->time;
- entry.total_time = g_clock.now() - s->time;
+ entry.total_time = ceph_clock_now(&g_ceph_context) - s->time;
entry.bytes_sent = s->bytes_sent;
entry.bytes_received = s->bytes_received;
if (s->err.http_ret) {
info.num = atoi(part_num.c_str());
info.etag = etag;
info.size = s->obj_size;
- info.modified = g_clock.now();
+ info.modified = ceph_clock_now(&g_ceph_context);
::encode(info, bl);
RGW_LOG(0) << "JJJ name=" << p << "bl.length()=" << bl.length() << dendl;
meta_attrs[p] = bl;
s->os_auth_token = NULL;
s->os_user = NULL;
s->os_groups = NULL;
- s->time = g_clock.now();
+ s->time = ceph_clock_now(&g_ceph_context);
s->user.clear();
s->perm_mask = 0;
}
if (ret < 0)
return ret;
- utime_t expiration = g_clock.now();
+ utime_t expiration = ceph_clock_now(&g_ceph_context);
expiration += RGW_OS_TOKEN_EXPIRATION; // 15 minutes
ret = build_token(os_user, key, nonce, expiration, bl);
RGW_LOG(0) << "failed to decode token" << dendl;
return -EINVAL;
}
- if (expiration < g_clock.now()) {
- RGW_LOG(0) << "old timed out token was used now=" << g_clock.now() << " token.expiration=" << expiration << dendl;
+ if (expiration < ceph_clock_now(&g_ceph_context)) {
+ RGW_LOG(0) << "old timed out token was used now=" << ceph_clock_now(&g_ceph_context) << " token.expiration=" << expiration << dendl;
return -EPERM;
}
off_t off;
C_Ack(off_t o) : off(o) {}
void finish(int r) {
- set_ack(off, g_clock.now());
+ set_ack(off, ceph_clock_now(&g_ceph_context));
}
};
struct C_Commit : public Context {
off_t off;
C_Commit(off_t o) : off(o) {}
void finish(int r) {
- set_commit(off, g_clock.now());
+ set_commit(off, ceph_clock_now(&g_ceph_context));
}
};
ft.create_collection(coll_t());
fs->apply_transaction(ft);
- utime_t now = g_clock.now();
+ utime_t now = ceph_clock_now(&g_ceph_context);
utime_t end = now;
end += seconds;
off_t pos = 0;
while (now < end) {
sobject_t poid(object_t("streamtest"), 0);
- set_start(pos, g_clock.now());
+ set_start(pos, ceph_clock_now(&g_ceph_context));
ObjectStore::Transaction *t = new ObjectStore::Transaction;
t->write(coll_t(), poid, pos, bytes, bl);
fs->queue_transaction(NULL, t, new C_Ack(pos), new C_Commit(pos));
if (lock)
lock->Lock();
utime_t inc(2 * i, 0);
- utime_t t = g_clock.now() + inc;
+ utime_t t = ceph_clock_now(&g_ceph_context) + inc;
timer.add_event_at(t, test_contexts[i]);
if (lock)
lock->Unlock();
{
utime_t inc(100, 0);
- utime_t t = g_clock.now() + inc;
+ utime_t t = ceph_clock_now(&g_ceph_context) + inc;
lock->Lock();
timer.add_event_at(t, test_contexts[0]);
lock->Unlock();
{
utime_t inc(2, 0);
- utime_t t = g_clock.now() + inc;
+ utime_t t = ceph_clock_now(&g_ceph_context) + inc;
lock->Lock();
timer.add_event_at(t, test_contexts[1]);
lock->Unlock();
safe_timer_lock.Lock();
for (int i = 0; i < MAX_TEST_CONTEXTS; ++i) {
utime_t inc(4 * i, 0);
- utime_t t = g_clock.now() + inc;
+ utime_t t = ceph_clock_now(&g_ceph_context) + inc;
safe_timer.add_event_at(t, test_contexts[i]);
}
safe_timer_lock.Unlock();
safe_timer_lock.Lock();
for (int i = 0; i < MAX_TEST_CONTEXTS; ++i) {
utime_t inc(4 * i, 0);
- utime_t t = g_clock.now() + inc;
+ utime_t t = ceph_clock_now(&g_ceph_context) + inc;
safe_timer.add_event_at(t, test_contexts[i]);
}
safe_timer_lock.Unlock();
}
- utime_t start = g_clock.now();
+ utime_t start = ceph_clock_now(&g_ceph_context);
while (loop++ < count) {
ret = safe_write(fd, buf, bsize);
if (ret)
}
::fsync(fd);
::close(fd);
- utime_t end = g_clock.now();
+ utime_t end = ceph_clock_now(&g_ceph_context);
end -= start;
int s = blocks*4096;
- utime_t start = g_clock.now();
+ utime_t start = ceph_clock_now(&g_ceph_context);
for (int i=0; i<count; i++) {
off64_t so, o = (lrand48() % numblocks) * 4096;
//cout << "s = " << s << " o = " << o << endl;
//int r = ::read(fd, buf, s);
if (r < 0) cout << "r = " << r << " " << strerror(errno) << endl;
}
- utime_t end = g_clock.now();
+ utime_t end = ceph_clock_now(&g_ceph_context);
double timeper = end - start;
timeper /= count;
so = o + 4096*((rand() % range) );//- range/2);
//cout << o << " " << so << " " << (so-o) << endl;
- utime_t start = g_clock.now();
+ utime_t start = ceph_clock_now(&g_ceph_context);
lseek64(fd, so, SEEK_SET);
r = ::read(fd, buf, blocks*4096);
- utime_t end = g_clock.now();
+ utime_t end = ceph_clock_now(&g_ceph_context);
timeper += (end-start);
}
DecayCounter dc(hl);
RealCounter rc;
- utime_t now = g_clock.now();
+ utime_t now = ceph_clock_now(&g_ceph_context);
for (int ms=0; ms < 300*1000; ms++) {
if (ms % 30000 == 0) {
char aes_key[AES_KEY_LEN];
memset(aes_key, 0x77, sizeof(aes_key));
bufferptr keybuf(aes_key, sizeof(aes_key));
- CryptoKey key(CEPH_CRYPTO_AES, g_clock.now(), keybuf);
+ CryptoKey key(CEPH_CRYPTO_AES, ceph_clock_now(&g_ceph_context), keybuf);
const char *msg="hello! this is a message\n";
char pad[16];
char aes_key[AES_KEY_LEN];
memset(aes_key, 0x77, sizeof(aes_key));
bufferptr keybuf(aes_key, sizeof(aes_key));
- CryptoKey key(CEPH_CRYPTO_AES, g_clock.now(), keybuf);
+ CryptoKey key(CEPH_CRYPTO_AES, ceph_clock_now(&g_ceph_context), keybuf);
const char *msg="hello! this is a message\n";
char pad[16];
static void handle_notify(CephToolCtx *ctx, MMonObserveNotify *notify)
{
- utime_t now = g_clock.now();
+ utime_t now = ceph_clock_now(&g_ceph_context);
dout(1) << notify->get_source() << " -> " << get_paxos_name(notify->machine_id)
<< " v" << notify->ver
m->set_data(pending_bl);
if (!ctx->concise)
- *ctx->log << g_clock.now() << " mon" << " <- " << pending_cmd << std::endl;
+ *ctx->log << ceph_clock_now(&g_ceph_context) << " mon" << " <- " << pending_cmd << std::endl;
ctx->mc.send_mon_message(m);
}
rs = rs;
rbl = reply_bl;
if (!ctx->concise)
- *ctx->log << g_clock.now() << " "
+ *ctx->log << ceph_clock_now(&g_ceph_context) << " "
<< reply_from.name << " -> '"
<< reply_rs << "' (" << reply_rc << ")"
<< std::endl;
CephToolCtx *ctx = gui->ctx;
ctx->lock.Lock();
while (true) {
- utime_t t(g_clock.now());
+ utime_t t(ceph_clock_now(&g_ceph_context));
t += 3.0;
ctx->gui_cond.WaitUntil(ctx->lock, t);
if (shutting_down) {