From 46548c51255518e422d544f0d0776068051fdacc Mon Sep 17 00:00:00 2001 From: "Adam C. Emerson" Date: Mon, 14 Nov 2016 19:33:56 -0500 Subject: [PATCH] common: Unskew clock In preparation to deglobalizing CephContext, remove the CephContext* parameter to ceph_clock_now() and ceph::real_clock::now() that carries a configurable offset. Signed-off-by: Adam C. Emerson (cherry picked from commit 750ad8340c827d2f8896e1251e45f921dddb9f30) Conflicts: src/mon/PGMonitor.cc --- qa/suites/rados/multimon/clusters/3.yaml | 3 +- .../multimon/tasks/mon_clock_with_skews.yaml | 8 +- src/auth/Crypto.cc | 2 +- src/auth/cephx/CephxKeyServer.cc | 10 +-- src/auth/cephx/CephxProtocol.cc | 6 +- src/auth/cephx/CephxServiceHandler.cc | 2 +- src/client/Client.cc | 76 +++++++++--------- src/client/Inode.cc | 2 +- src/client/SyntheticClient.cc | 68 ++++++++-------- src/client/SyntheticClient.h | 10 +-- src/cls/lock/cls_lock.cc | 4 +- src/cls/rbd/cls_rbd.cc | 2 +- src/common/Clock.cc | 17 ++-- src/common/Clock.h | 12 ++- src/common/Cond.h | 8 +- src/common/Finisher.cc | 4 +- src/common/LogClient.cc | 2 +- src/common/Mutex.cc | 4 +- src/common/Throttle.cc | 4 +- src/common/Timer.cc | 6 +- src/common/TrackedOp.cc | 12 +-- src/common/TrackedOp.h | 4 +- src/common/WorkQueue.cc | 6 +- src/common/assert.cc | 4 +- src/common/ceph_context.cc | 2 +- src/common/ceph_time.cc | 19 +---- src/common/ceph_time.h | 7 +- src/common/obj_bencher.cc | 54 ++++++------- src/key_value_store/kv_flat_btree_async.cc | 16 ++-- src/kv/LevelDBStore.cc | 16 ++-- src/kv/RocksDBStore.cc | 16 ++-- src/librados/IoCtxImpl.cc | 20 ++--- src/librados/RadosClient.cc | 6 +- src/librbd/AioCompletion.cc | 4 +- src/librbd/internal.cc | 4 +- src/log/Log.cc | 12 +-- src/log/test.cc | 20 ++--- src/mds/Beacon.cc | 12 +-- src/mds/CDentry.cc | 2 +- src/mds/CDir.cc | 20 ++--- src/mds/CInode.cc | 22 +++--- src/mds/CInode.h | 2 +- src/mds/DamageTable.h | 2 +- src/mds/JournalPointer.cc | 4 +- src/mds/Locker.cc | 14 ++-- src/mds/MDBalancer.cc | 16 ++-- src/mds/MDCache.cc | 28 +++---- src/mds/MDLog.cc | 6 +- src/mds/MDSRank.cc | 4 +- src/mds/MDSTable.cc | 2 +- src/mds/Migrator.cc | 10 +-- src/mds/Server.cc | 22 +++--- src/mds/SessionMap.cc | 17 ++-- src/mds/SessionMap.h | 2 +- src/mds/StrayManager.cc | 12 +-- src/mds/mdstypes.h | 4 +- src/messages/MHeartbeat.h | 2 +- src/mgr/DaemonState.cc | 2 +- src/mon/DataHealthService.cc | 4 +- src/mon/Elector.cc | 4 +- src/mon/LogMonitor.cc | 4 +- src/mon/MDSMonitor.cc | 22 +++--- src/mon/MgrMonitor.cc | 6 +- src/mon/MonClient.cc | 14 ++-- src/mon/MonClient.h | 2 +- src/mon/MonMap.cc | 4 +- src/mon/Monitor.cc | 32 ++++---- src/mon/Monitor.h | 2 +- src/mon/MonmapMonitor.cc | 8 +- src/mon/OSDMonitor.cc | 24 +++--- src/mon/PGMap.cc | 2 +- src/mon/PGMonitor.cc | 20 ++--- src/mon/Paxos.cc | 60 +++++++------- src/mon/Paxos.h | 6 +- src/mon/PaxosService.cc | 2 +- src/mon/Session.h | 2 +- src/msg/DispatchQueue.cc | 2 +- src/msg/Messenger.h | 4 +- src/msg/async/AsyncConnection.cc | 14 ++-- src/msg/async/dpdk/IP.cc | 4 +- src/msg/async/dpdk/IP.h | 2 +- src/msg/simple/Pipe.cc | 16 ++-- src/msg/xio/XioConnection.cc | 12 +-- src/os/bluestore/BlueFS.cc | 14 ++-- src/os/bluestore/BlueStore.cc | 26 +++---- src/os/bluestore/BlueStore.h | 4 +- src/os/bluestore/KernelDevice.cc | 6 +- src/os/filestore/FileJournal.cc | 8 +- src/os/filestore/FileStore.cc | 24 +++--- src/os/kstore/KStore.cc | 4 +- src/os/kstore/KStore.h | 6 +- src/osd/OSD.cc | 60 +++++++------- src/osd/OSD.h | 6 +- src/osd/OSDMap.cc | 2 +- src/osd/PG.cc | 78 +++++++++---------- src/osd/PG.h | 2 +- src/osd/PGBackend.cc | 2 +- src/osd/PrimaryLogPG.cc | 40 +++++----- src/osd/ReplicatedBackend.cc | 2 +- src/osd/Watch.cc | 4 +- src/osdc/Journaler.cc | 21 ++--- src/osdc/ObjectCacher.cc | 2 +- src/rgw/librgw.cc | 4 +- src/rgw/rgw_admin.cc | 2 +- src/rgw/rgw_bucket.cc | 2 +- src/rgw/rgw_common.cc | 2 +- src/rgw/rgw_coroutine.cc | 2 +- src/rgw/rgw_cr_rados.h | 2 +- src/rgw/rgw_file.cc | 2 +- src/rgw/rgw_gc.cc | 10 +-- src/rgw/rgw_keystone.cc | 4 +- src/rgw/rgw_keystone.h | 2 +- src/rgw/rgw_lc.cc | 14 ++-- src/rgw/rgw_loadgen_process.cc | 2 +- src/rgw/rgw_log.cc | 6 +- src/rgw/rgw_object_expirer_core.cc | 10 +-- src/rgw/rgw_op.cc | 22 +++--- src/rgw/rgw_orphan.cc | 2 +- src/rgw/rgw_policy_s3.cc | 2 +- src/rgw/rgw_quota.cc | 11 +-- src/rgw/rgw_rados.cc | 12 +-- src/rgw/rgw_request.cc | 4 +- src/rgw/rgw_rest_client.cc | 12 +-- src/rgw/rgw_rest_s3.cc | 2 +- src/rgw/rgw_rest_swift.cc | 4 +- src/rgw/rgw_swift_auth.cc | 6 +- src/test/TestTimers.cc | 10 +-- src/test/bench_log.cc | 6 +- src/test/bufferlist.cc | 60 +++++++------- src/test/cls_lock/test_cls_lock.cc | 4 +- src/test/cls_log/test_cls_log.cc | 6 +- src/test/cls_statelog/test_cls_statelog.cc | 2 +- src/test/common/test_crc32c.cc | 20 ++--- src/test/crypto.cc | 6 +- .../ceph_erasure_code_benchmark.cc | 8 +- src/test/journal/RadosTestFixture.cc | 3 +- src/test/journal/test_JournalPlayer.cc | 2 - src/test/journal/test_ObjectRecorder.cc | 2 - src/test/kv_store_bench.cc | 2 +- src/test/kv_store_bench.h | 4 +- src/test/librados/tier.cc | 16 ++-- .../librados_test_stub/TestMemIoCtxImpl.cc | 2 +- src/test/librbd/journal/test_Entries.cc | 6 +- src/test/librbd/test_ImageWatcher.cc | 6 +- src/test/librbd/test_librbd.cc | 6 +- src/test/librbd/test_mock_ObjectWatcher.cc | 3 +- src/test/mon/test-mon-msg.cc | 6 +- src/test/mon/test_mon_workloadgen.cc | 10 +-- src/test/msgr/test_async_networkstack.cc | 4 +- src/test/msgr/test_msgr.cc | 4 +- src/test/objectstore/test_kv.cc | 4 +- src/test/objectstore/test_transaction.cc | 4 +- src/test/objectstore/workload_generator.cc | 6 +- src/test/old/test_disk_bw.cc | 4 +- src/test/old/test_seek_read.c | 4 +- src/test/old/test_short_seek_read.c | 4 +- src/test/old/testcounter.cc | 2 +- src/test/omap_bench.cc | 4 +- src/test/perf_local.cc | 2 +- .../image_sync/test_mock_ImageCopyRequest.cc | 6 +- src/test/rbd_mirror/random_write.cc | 2 +- src/test/rbd_mirror/test_ImageReplayer.cc | 2 +- src/test/test_rewrite_latency.cc | 4 +- src/test/testcrypto.cc | 2 +- src/test/testkeys.cc | 2 +- src/tools/ceph_kvstore_tool.cc | 6 +- src/tools/ceph_monstore_tool.cc | 4 +- src/tools/cephfs/DataScan.cc | 4 +- src/tools/cephfs/Dumper.cc | 6 +- src/tools/monmaptool.cc | 2 +- src/tools/rados/rados.cc | 10 +-- src/tools/rbd/action/Bench.cc | 8 +- src/tools/rbd_mirror/Mirror.cc | 5 +- src/tools/rbd_mirror/Replayer.cc | 7 +- 174 files changed, 834 insertions(+), 869 deletions(-) diff --git a/qa/suites/rados/multimon/clusters/3.yaml b/qa/suites/rados/multimon/clusters/3.yaml index e30dc76f381d8..e663324b5d61c 100644 --- a/qa/suites/rados/multimon/clusters/3.yaml +++ b/qa/suites/rados/multimon/clusters/3.yaml @@ -1,5 +1,6 @@ roles: -- [mon.a, mon.b, mon.c, osd.0, osd.1] +- [mon.a, mon.c, osd.0] +- [mon.b, osd.1] openstack: - volumes: # attached to each instance count: 2 diff --git a/qa/suites/rados/multimon/tasks/mon_clock_with_skews.yaml b/qa/suites/rados/multimon/tasks/mon_clock_with_skews.yaml index 2953e0d6dc2cd..1c6c1538b800e 100644 --- a/qa/suites/rados/multimon/tasks/mon_clock_with_skews.yaml +++ b/qa/suites/rados/multimon/tasks/mon_clock_with_skews.yaml @@ -1,10 +1,8 @@ -overrides: - ceph: - conf: - mon.b: - clock offset: 10 tasks: - install: +- exec: + mon.b: + - date -u -s @$(expr $(date -u +%s) + 10) - ceph: wait-for-healthy: false log-whitelist: diff --git a/src/auth/Crypto.cc b/src/auth/Crypto.cc index 032dc2d494fa0..3746975ca9ddc 100644 --- a/src/auth/Crypto.cc +++ b/src/auth/Crypto.cc @@ -446,7 +446,7 @@ int CryptoKey::create(CephContext *cct, int t) r = _set_secret(t, s); if (r < 0) return r; - created = ceph_clock_now(cct); + created = ceph_clock_now(); return r; } diff --git a/src/auth/cephx/CephxKeyServer.cc b/src/auth/cephx/CephxKeyServer.cc index 6524e616fdeb7..e06de6602eba2 100644 --- a/src/auth/cephx/CephxKeyServer.cc +++ b/src/auth/cephx/CephxKeyServer.cc @@ -34,12 +34,12 @@ bool KeyServerData::get_service_secret(CephContext *cct, uint32_t service_id, const RotatingSecrets& secrets = iter->second; // second to oldest, unless it's expired - map::const_iterator riter = + map::const_iterator riter = secrets.secrets.begin(); if (secrets.secrets.size() > 1) ++riter; - if (riter->second.expiration < ceph_clock_now(cct)) + if (riter->second.expiration < ceph_clock_now()) ++riter; // "current" key has expired, use "next" key instead secret_id = riter->first; @@ -189,7 +189,7 @@ int KeyServer::_rotate_secret(uint32_t service_id) { RotatingSecrets& r = data.rotating_secrets[service_id]; int added = 0; - utime_t now = ceph_clock_now(cct); + utime_t now = ceph_clock_now(); double ttl = service_id == CEPH_ENTITY_TYPE_AUTH ? cct->_conf->auth_mon_ticket_ttl : cct->_conf->auth_service_ticket_ttl; while (r.need_new_secrets(now)) { @@ -267,7 +267,7 @@ bool KeyServer::generate_secret(CryptoKey& secret) if (crypto->create(bp) < 0) return false; - secret.set_secret(CEPH_CRYPTO_AES, bp, ceph_clock_now(NULL)); + secret.set_secret(CEPH_CRYPTO_AES, bp, ceph_clock_now()); return true; } @@ -426,7 +426,7 @@ int KeyServer::_build_session_auth_info(uint32_t service_id, CephXServiceTicketI { info.service_id = service_id; info.ticket = auth_ticket_info.ticket; - info.ticket.init_timestamps(ceph_clock_now(cct), cct->_conf->auth_service_ticket_ttl); + info.ticket.init_timestamps(ceph_clock_now(), cct->_conf->auth_service_ticket_ttl); generate_secret(info.session_key); diff --git a/src/auth/cephx/CephxProtocol.cc b/src/auth/cephx/CephxProtocol.cc index 0e668c67cf5e2..ae61d09dde9ad 100644 --- a/src/auth/cephx/CephxProtocol.cc +++ b/src/auth/cephx/CephxProtocol.cc @@ -179,7 +179,7 @@ bool CephXTicketHandler::verify_service_ticket_reply(CryptoKey& secret, << " validity=" << msg_a.validity << dendl; session_key = msg_a.session_key; if (!msg_a.validity.is_zero()) { - expires = ceph_clock_now(cct); + expires = ceph_clock_now(); expires += msg_a.validity; renew_after = expires; renew_after -= ((double)msg_a.validity.sec() / 4); @@ -193,7 +193,7 @@ bool CephXTicketHandler::verify_service_ticket_reply(CryptoKey& secret, bool CephXTicketHandler::have_key() { if (have_key_flag) { - have_key_flag = ceph_clock_now(cct) < expires; + have_key_flag = ceph_clock_now() < expires; } return have_key_flag; @@ -202,7 +202,7 @@ bool CephXTicketHandler::have_key() bool CephXTicketHandler::need_key() const { if (have_key_flag) { - return (!expires.is_zero()) && (ceph_clock_now(cct) >= renew_after); + return (!expires.is_zero()) && (ceph_clock_now() >= renew_after); } return true; diff --git a/src/auth/cephx/CephxServiceHandler.cc b/src/auth/cephx/CephxServiceHandler.cc index 15d27f540c767..3184835a14dcf 100644 --- a/src/auth/cephx/CephxServiceHandler.cc +++ b/src/auth/cephx/CephxServiceHandler.cc @@ -106,7 +106,7 @@ int CephxServiceHandler::handle_request(bufferlist::iterator& indata, bufferlist should_enc_ticket = true; } - info.ticket.init_timestamps(ceph_clock_now(cct), cct->_conf->auth_mon_ticket_ttl); + info.ticket.init_timestamps(ceph_clock_now(), cct->_conf->auth_mon_ticket_ttl); info.ticket.name = entity_name; info.ticket.global_id = global_id; info.ticket.auid = eauth.auid; diff --git a/src/client/Client.cc b/src/client/Client.cc index 2263f87863a9e..81be35b983560 100644 --- a/src/client/Client.cc +++ b/src/client/Client.cc @@ -1628,7 +1628,7 @@ int Client::make_request(MetaRequest *request, request->set_tid(tid); // and timestamp - request->op_stamp = ceph_clock_now(NULL); + request->op_stamp = ceph_clock_now(); // make note mds_requests[tid] = request->get(); @@ -1745,7 +1745,7 @@ int Client::make_request(MetaRequest *request, pdirbl->claim(reply->get_extra_bl()); // -- log times -- - utime_t lat = ceph_clock_now(cct); + utime_t lat = ceph_clock_now(); lat -= request->sent_stamp; ldout(cct, 20) << "lat " << lat << dendl; logger->tinc(l_c_lat, lat); @@ -2138,7 +2138,7 @@ void Client::send_request(MetaRequest *request, MetaSession *session, } if (request->mds == -1) { - request->sent_stamp = ceph_clock_now(cct); + request->sent_stamp = ceph_clock_now(); ldout(cct, 20) << "send_request set sent_stamp to " << request->sent_stamp << dendl; } request->mds = mds; @@ -3158,7 +3158,7 @@ int Client::get_caps_used(Inode *in) void Client::cap_delay_requeue(Inode *in) { ldout(cct, 10) << "cap_delay_requeue on " << *in << dendl; - in->hold_caps_until = ceph_clock_now(cct); + in->hold_caps_until = ceph_clock_now(); in->hold_caps_until += cct->_conf->client_caps_release_delay; delayed_caps.push_back(&in->cap_item); } @@ -3334,7 +3334,7 @@ void Client::check_caps(Inode *in, unsigned flags) else cap_delay_requeue(in); - utime_t now = ceph_clock_now(cct); + utime_t now = ceph_clock_now(); map::iterator it = in->caps.begin(); while (it != in->caps.end()) { @@ -5783,13 +5783,13 @@ void Client::unmount() lru.lru_set_max(0); trim_cache(); - while (lru.lru_get_size() > 0 || + while (lru.lru_get_size() > 0 || !inode_map.empty()) { - ldout(cct, 2) << "cache still has " << lru.lru_get_size() - << "+" << inode_map.size() << " items" + ldout(cct, 2) << "cache still has " << lru.lru_get_size() + << "+" << inode_map.size() << " items" << ", waiting (for caps to release?)" << dendl; - utime_t until = ceph_clock_now(cct) + utime_t(5, 0); + utime_t until = ceph_clock_now() + utime_t(5, 0); int r = mount_cond.WaitUntil(client_lock, until); if (r == ETIMEDOUT) { dump_cache(NULL); @@ -5855,7 +5855,7 @@ void Client::tick() tick_event = new C_C_Tick(this); timer.add_event_after(cct->_conf->client_tick_interval, tick_event); - utime_t now = ceph_clock_now(cct); + utime_t now = ceph_clock_now(); if (!mounted && !mds_requests.empty()) { MetaRequest *req = mds_requests.begin()->second; @@ -5900,8 +5900,8 @@ void Client::tick() void Client::renew_caps() { ldout(cct, 10) << "renew_caps()" << dendl; - last_cap_renew = ceph_clock_now(cct); - + last_cap_renew = ceph_clock_now(); + for (map::iterator p = mds_sessions.begin(); p != mds_sessions.end(); ++p) { @@ -5914,7 +5914,7 @@ void Client::renew_caps() void Client::renew_caps(MetaSession *session) { ldout(cct, 10) << "renew_caps mds." << session->mds_num << dendl; - session->last_cap_renew_request = ceph_clock_now(cct); + session->last_cap_renew_request = ceph_clock_now(); uint64_t seq = ++session->cap_renew_seq; session->con->send_message(new MClientSession(CEPH_SESSION_REQUEST_RENEWCAPS, seq)); } @@ -5989,7 +5989,7 @@ int Client::_lookup(Inode *dir, const string& dname, int mask, InodeRef *target, if (!dn->inode || dn->inode->caps_issued_mask(mask)) { // is dn lease valid? - utime_t now = ceph_clock_now(cct); + utime_t now = ceph_clock_now(); if (dn->lease_mds >= 0 && dn->lease_ttl > now && mds_sessions.count(dn->lease_mds)) { @@ -6056,9 +6056,9 @@ int Client::get_or_create(Inode *dir, const char* name, Dentry *dn = dir->dir->dentries[name]; // is dn lease valid? - utime_t now = ceph_clock_now(cct); + utime_t now = ceph_clock_now(); if (dn->inode && - dn->lease_mds >= 0 && + dn->lease_mds >= 0 && dn->lease_ttl > now && mds_sessions.count(dn->lease_mds)) { MetaSession *s = mds_sessions[dn->lease_mds]; @@ -6513,7 +6513,7 @@ int Client::_do_setattr(Inode *in, struct ceph_statx *stx, int mask, if (!mask) { // caller just needs us to bump the ctime - in->ctime = ceph_clock_now(cct); + in->ctime = ceph_clock_now(); in->cap_dirtier_uid = perms.uid(); in->cap_dirtier_gid = perms.gid(); if (issued & CEPH_CAP_AUTH_EXCL) @@ -6532,7 +6532,7 @@ int Client::_do_setattr(Inode *in, struct ceph_statx *stx, int mask, mask &= ~CEPH_SETATTR_KILL_SGUID; if (mask & CEPH_SETATTR_UID) { - in->ctime = ceph_clock_now(cct); + in->ctime = ceph_clock_now(); in->cap_dirtier_uid = perms.uid(); in->cap_dirtier_gid = perms.gid(); in->uid = stx->stx_uid; @@ -6542,7 +6542,7 @@ int Client::_do_setattr(Inode *in, struct ceph_statx *stx, int mask, ldout(cct,10) << "changing uid to " << stx->stx_uid << dendl; } if (mask & CEPH_SETATTR_GID) { - in->ctime = ceph_clock_now(cct); + in->ctime = ceph_clock_now(); in->cap_dirtier_uid = perms.uid(); in->cap_dirtier_gid = perms.gid(); in->gid = stx->stx_gid; @@ -6553,7 +6553,7 @@ int Client::_do_setattr(Inode *in, struct ceph_statx *stx, int mask, } if (mask & CEPH_SETATTR_MODE) { - in->ctime = ceph_clock_now(cct); + in->ctime = ceph_clock_now(); in->cap_dirtier_uid = perms.uid(); in->cap_dirtier_gid = perms.gid(); in->mode = (in->mode & ~07777) | (stx->stx_mode & 07777); @@ -6569,7 +6569,7 @@ int Client::_do_setattr(Inode *in, struct ceph_statx *stx, int mask, } if (mask & CEPH_SETATTR_BTIME) { - in->ctime = ceph_clock_now(cct); + in->ctime = ceph_clock_now(); in->cap_dirtier_uid = perms.uid(); in->cap_dirtier_gid = perms.gid(); in->btime = utime_t(stx->stx_btime); @@ -6588,7 +6588,7 @@ int Client::_do_setattr(Inode *in, struct ceph_statx *stx, int mask, in->mtime = utime_t(stx->stx_mtime); if (mask & CEPH_SETATTR_ATIME) in->atime = utime_t(stx->stx_atime); - in->ctime = ceph_clock_now(cct); + in->ctime = ceph_clock_now(); in->cap_dirtier_uid = perms.uid(); in->cap_dirtier_gid = perms.gid(); in->time_warp_seq++; @@ -8339,7 +8339,7 @@ int Client::uninline_data(Inode *in, Context *onfinish) OSDMap::file_to_object_locator(in->layout), create_ops, in->snaprealm->get_snap_context(), - ceph::real_clock::now(cct), + ceph::real_clock::now(), 0, NULL, NULL); @@ -8360,7 +8360,7 @@ int Client::uninline_data(Inode *in, Context *onfinish) OSDMap::file_to_object_locator(in->layout), uninline_ops, in->snaprealm->get_snap_context(), - ceph::real_clock::now(cct), + ceph::real_clock::now(), 0, NULL, onfinish); @@ -8845,7 +8845,7 @@ int Client::_write(Fh *f, int64_t offset, uint64_t size, const char *buf, ldout(cct, 10) << "cur file size is " << in->size << dendl; // time it. - utime_t start = ceph_clock_now(cct); + utime_t start = ceph_clock_now(); if (in->inline_version == 0) { int r = _getattr(in, CEPH_STAT_CAP_INLINE_DATA, f->actor_perms, true); @@ -8940,7 +8940,7 @@ int Client::_write(Fh *f, int64_t offset, uint64_t size, const char *buf, // async, caching, non-blocking. r = objectcacher->file_write(&in->oset, &in->layout, in->snaprealm->get_snap_context(), - offset, size, bl, ceph::real_clock::now(cct), + offset, size, bl, ceph::real_clock::now(), 0); put_cap_ref(in, CEPH_CAP_FILE_BUFFER); @@ -8968,7 +8968,7 @@ int Client::_write(Fh *f, int64_t offset, uint64_t size, const char *buf, get_cap_ref(in, CEPH_CAP_FILE_BUFFER); // released by onsafe callback filer->write_trunc(in->ino, &in->layout, in->snaprealm->get_snap_context(), - offset, size, bl, ceph::real_clock::now(cct), 0, + offset, size, bl, ceph::real_clock::now(), 0, in->truncate_size, in->truncate_seq, onfinish, new C_OnFinisher(onsafe, &objecter_finisher)); client_lock.Unlock(); @@ -8983,7 +8983,7 @@ int Client::_write(Fh *f, int64_t offset, uint64_t size, const char *buf, // if we get here, write was successful, update client metadata success: // time - lat = ceph_clock_now(cct); + lat = ceph_clock_now(); lat -= start; logger->tinc(l_c_wrlat, lat); @@ -9009,7 +9009,7 @@ success: } // mtime - in->mtime = ceph_clock_now(cct); + in->mtime = ceph_clock_now(); in->change_attr++; mark_caps_dirty(in, CEPH_CAP_FILE_WR); @@ -12152,7 +12152,7 @@ int Client::ll_write_block(Inode *in, uint64_t blockid, length, fakesnap, bl, - ceph::real_clock::now(cct), + ceph::real_clock::now(), 0, onack, onsafe); @@ -12296,7 +12296,7 @@ int Client::_fallocate(Fh *fh, int mode, int64_t offset, int64_t length) in->inline_data = bl; in->inline_version++; } - in->mtime = ceph_clock_now(cct); + in->mtime = ceph_clock_now(); in->change_attr++; mark_caps_dirty(in, CEPH_CAP_FILE_WR); } else { @@ -12321,10 +12321,10 @@ int Client::_fallocate(Fh *fh, int mode, int64_t offset, int64_t length) filer->zero(in->ino, &in->layout, in->snaprealm->get_snap_context(), offset, length, - ceph::real_clock::now(cct), + ceph::real_clock::now(), 0, true, onfinish, new C_OnFinisher(onsafe, &objecter_finisher)); - in->mtime = ceph_clock_now(cct); + in->mtime = ceph_clock_now(); in->change_attr++; mark_caps_dirty(in, CEPH_CAP_FILE_WR); @@ -12339,7 +12339,7 @@ int Client::_fallocate(Fh *fh, int mode, int64_t offset, int64_t length) uint64_t size = offset + length; if (size > in->size) { in->size = size; - in->mtime = ceph_clock_now(cct); + in->mtime = ceph_clock_now(); in->change_attr++; mark_caps_dirty(in, CEPH_CAP_FILE_WR); @@ -12757,7 +12757,7 @@ Inode *Client::get_quota_root(Inode *in, const UserPerm& perms) return NULL; Inode *cur = in; - utime_t now = ceph_clock_now(cct); + utime_t now = ceph_clock_now(); while (cur) { if (cur != in && cur->quota.is_enable()) @@ -12809,7 +12809,7 @@ Inode *Client::get_quota_root(Inode *in, const UserPerm& perms) break; } - now = ceph_clock_now(cct); + now = ceph_clock_now(); if (cur == in) cur = parent_ref.get(); else @@ -12936,14 +12936,14 @@ int Client::check_pool_perm(Inode *in, int need) rd_op.stat(NULL, (ceph::real_time*)nullptr, NULL); objecter->mutate(oid, OSDMap::file_to_object_locator(in->layout), rd_op, - nullsnapc, ceph::real_clock::now(cct), 0, &rd_cond, NULL); + nullsnapc, ceph::real_clock::now(), 0, &rd_cond, NULL); C_SaferCond wr_cond; ObjectOperation wr_op; wr_op.create(true); objecter->mutate(oid, OSDMap::file_to_object_locator(in->layout), wr_op, - nullsnapc, ceph::real_clock::now(cct), 0, &wr_cond, NULL); + nullsnapc, ceph::real_clock::now(), 0, &wr_cond, NULL); client_lock.Unlock(); int rd_ret = rd_cond.wait(); diff --git a/src/client/Inode.cc b/src/client/Inode.cc index 34376a3d5f7b5..f410306805390 100644 --- a/src/client/Inode.cc +++ b/src/client/Inode.cc @@ -175,7 +175,7 @@ bool Inode::cap_is_valid(Cap* cap) const << "cap expire " << cap->session->cap_ttl << std::endl << "cur time " << ceph_clock_now(cct) << std::endl;*/ if ((cap->session->cap_gen <= cap->gen) - && (ceph_clock_now(client->cct) < cap->session->cap_ttl)) { + && (ceph_clock_now() < cap->session->cap_ttl)) { return true; } return false; diff --git a/src/client/SyntheticClient.cc b/src/client/SyntheticClient.cc index 5e1ad2d0352d8..9fab5214c4d41 100644 --- a/src/client/SyntheticClient.cc +++ b/src/client/SyntheticClient.cc @@ -275,7 +275,7 @@ SyntheticClient::SyntheticClient(Client *client, int w) this->iargs = syn_iargs; this->sargs = syn_sargs; - run_start = ceph_clock_now(client->cct); + run_start = ceph_clock_now(); } @@ -434,8 +434,8 @@ int SyntheticClient::run() iargs.pop_front(); if (iarg1 && run_me()) { dout(2) << "sleepuntil " << iarg1 << dendl; - utime_t at = ceph_clock_now(client->cct) - run_start; - if (at.sec() < iarg1) + utime_t at = ceph_clock_now() - run_start; + if (at.sec() < iarg1) sleep(iarg1 - at.sec()); } did_run_me(); @@ -789,14 +789,14 @@ int SyntheticClient::run() if (iarg1 == 0) iarg1 = 1; // play trace at least once! for (int i=0; icct); + utime_t start = ceph_clock_now(); if (time_to_stop()) break; play_trace(t, prefix, !playdata); if (time_to_stop()) break; if (iarg1 > 1) clean_dir(prefix); // clean only if repeat - utime_t lat = ceph_clock_now(client->cct); + utime_t lat = ceph_clock_now(); lat -= start; dout(0) << " trace " << tfile << " loop " << (i+1) << "/" << iarg1 << " done in " << (double)lat << " seconds" << dendl; @@ -1007,7 +1007,7 @@ int SyntheticClient::play_trace(Trace& t, string& prefix, bool metadata_only) char buf[1024]; char buf2[1024]; - utime_t start = ceph_clock_now(client->cct); + utime_t start = ceph_clock_now(); ceph::unordered_map open_files; ceph::unordered_map open_dirs; @@ -1483,7 +1483,7 @@ int SyntheticClient::play_trace(Trace& t, string& prefix, bool metadata_only) bl.push_back(bp); SnapContext snapc; client->objecter->write(oid, oloc, off, len, snapc, bl, - ceph::real_clock::now(client->cct), 0, + ceph::real_clock::now(), 0, new C_SafeCond(&lock, &cond, &ack), safeg.new_sub()); safeg.activate(); @@ -1500,7 +1500,7 @@ int SyntheticClient::play_trace(Trace& t, string& prefix, bool metadata_only) lock.Lock(); SnapContext snapc; client->objecter->zero(oid, oloc, off, len, snapc, - ceph::real_clock::now(client->cct), 0, + ceph::real_clock::now(), 0, new C_SafeCond(&lock, &cond, &ack), safeg.new_sub()); safeg.activate(); @@ -1817,9 +1817,9 @@ int SyntheticClient::read_dirs(const char *basedir, int dirs, int files, int dep list contents; UserPerm perms = client->pick_my_perms(); - utime_t s = ceph_clock_now(client->cct); + utime_t s = ceph_clock_now(); int r = client->getdir(basedir, contents, perms); - utime_t e = ceph_clock_now(client->cct); + utime_t e = ceph_clock_now(); e -= s; if (r < 0) { dout(0) << "getdir couldn't readdir " << basedir << ", stopping" << dendl; @@ -1828,12 +1828,12 @@ int SyntheticClient::read_dirs(const char *basedir, int dirs, int files, int dep for (int i=0; icct); + utime_t s = ceph_clock_now(); if (client->lstat(d, &st, perms) < 0) { dout(2) << "read_dirs failed stat on " << d << ", stopping" << dendl; return -1; } - utime_t e = ceph_clock_now(client->cct); + utime_t e = ceph_clock_now(); e -= s; } @@ -1872,7 +1872,7 @@ int SyntheticClient::make_files(int num, int count, int priv, bool more) // files struct stat st; - utime_t start = ceph_clock_now(client->cct); + utime_t start = ceph_clock_now(); for (int c=0; ccct); + utime_t end = ceph_clock_now(); end -= start; dout(0) << "makefiles time is " << end << " or " << ((double)end / (double)num) <<" per file" << dendl; - + return 0; } @@ -1909,24 +1909,24 @@ int SyntheticClient::link_test() client->mkdir("orig", 0755, perms); client->mkdir("copy", 0755, perms); - utime_t start = ceph_clock_now(client->cct); + utime_t start = ceph_clock_now(); for (int i=0; imknod(d, 0755, perms); } - utime_t end = ceph_clock_now(client->cct); + utime_t end = ceph_clock_now(); end -= start; dout(0) << "orig " << end << dendl; // link - start = ceph_clock_now(client->cct); + start = ceph_clock_now(); for (int i=0; ilink(d, e, perms); } - end = ceph_clock_now(client->cct); + end = ceph_clock_now(); end -= start; dout(0) << "copy " << end << dendl; @@ -2046,8 +2046,8 @@ int SyntheticClient::write_file(string& fn, int size, loff_t wrsize) // size i delete[] buf; return fd; } - - utime_t from = ceph_clock_now(client->cct); + + utime_t from = ceph_clock_now(); utime_t start = from; uint64_t bytes = 0, total = 0; @@ -2075,7 +2075,7 @@ int SyntheticClient::write_file(string& fn, int size, loff_t wrsize) // size i bytes += wrsize; total += wrsize; - utime_t now = ceph_clock_now(client->cct); + utime_t now = ceph_clock_now(); if (now - from >= 1.0) { double el = now - from; dout(0) << "write " << (bytes / el / 1048576.0) << " MB/sec" << dendl; @@ -2085,8 +2085,8 @@ int SyntheticClient::write_file(string& fn, int size, loff_t wrsize) // size i } client->fsync(fd, true); - - utime_t stop = ceph_clock_now(client->cct); + + utime_t stop = ceph_clock_now(); double el = stop - start; dout(0) << "write total " << (total / el / 1048576.0) << " MB/sec (" << total << " bytes in " << el << " seconds)" << dendl; @@ -2164,7 +2164,7 @@ int SyntheticClient::read_file(const std::string& fn, int size, return fd; } - utime_t from = ceph_clock_now(client->cct); + utime_t from = ceph_clock_now(); utime_t start = from; uint64_t bytes = 0, total = 0; @@ -2180,7 +2180,7 @@ int SyntheticClient::read_file(const std::string& fn, int size, bytes += rdsize; total += rdsize; - utime_t now = ceph_clock_now(client->cct); + utime_t now = ceph_clock_now(); if (now - from >= 1.0) { double el = now - from; dout(0) << "read " << (bytes / el / 1048576.0) << " MB/sec" << dendl; @@ -2210,7 +2210,7 @@ int SyntheticClient::read_file(const std::string& fn, int size, dout(0) << " + " << (bad-1) << " other bad 16-byte bits in this block" << dendl; } - utime_t stop = ceph_clock_now(client->cct); + utime_t stop = ceph_clock_now(); double el = stop - start; dout(0) << "read total " << (total / el / 1048576.0) << " MB/sec (" << total << " bytes in " << el << " seconds)" << dendl; @@ -2290,10 +2290,10 @@ int SyntheticClient::create_objects(int nobj, int osize, int inflight) } dout(10) << "writing " << oid << dendl; - starts.push_back(ceph_clock_now(client->cct)); + starts.push_back(ceph_clock_now()); client->client_lock.Lock(); client->objecter->write(oid, oloc, 0, osize, snapc, bl, - ceph::real_clock::now(client->cct), 0, + ceph::real_clock::now(), 0, new C_Ref(lock, cond, &unack), new C_Ref(lock, cond, &unsafe)); client->client_lock.Unlock(); @@ -2304,8 +2304,8 @@ int SyntheticClient::create_objects(int nobj, int osize, int inflight) cond.Wait(lock); } lock.Unlock(); - - utime_t lat = ceph_clock_now(client->cct); + + utime_t lat = ceph_clock_now(); lat -= starts.front(); starts.pop_front(); } @@ -2385,7 +2385,7 @@ int SyntheticClient::object_rw(int nobj, int osize, int wrpc, SnapContext snapc; client->client_lock.Lock(); - utime_t start = ceph_clock_now(client->cct); + utime_t start = ceph_clock_now(); if (write) { dout(10) << "write to " << oid << dendl; @@ -2397,7 +2397,7 @@ int SyntheticClient::object_rw(int nobj, int osize, int wrpc, op.indata = bl; m.ops.push_back(op); client->objecter->mutate(oid, oloc, m, snapc, - ceph::real_clock::now(client->cct), 0, + ceph::real_clock::now(), 0, NULL, new C_Ref(lock, cond, &unack)); } else { dout(10) << "read from " << oid << dendl; @@ -2414,7 +2414,7 @@ int SyntheticClient::object_rw(int nobj, int osize, int wrpc, } lock.Unlock(); - utime_t lat = ceph_clock_now(client->cct); + utime_t lat = ceph_clock_now(); lat -= start; } diff --git a/src/client/SyntheticClient.h b/src/client/SyntheticClient.h index 755f5e0ef093f..8330e617dd1a1 100644 --- a/src/client/SyntheticClient.h +++ b/src/client/SyntheticClient.h @@ -207,12 +207,12 @@ class SyntheticClient { } bool time_to_stop() { - utime_t now = ceph_clock_now(client->cct); - if (0) cout << "time_to_stop .. now " << now - << " until " << run_until - << " start " << run_start + utime_t now = ceph_clock_now(); + if (0) cout << "time_to_stop .. now " << now + << " until " << run_until + << " start " << run_start << std::endl; - if (run_until.sec() && now > run_until) + if (run_until.sec() && now > run_until) return true; else return false; diff --git a/src/cls/lock/cls_lock.cc b/src/cls/lock/cls_lock.cc index 14e3b0dcdd420..01f691b043c60 100644 --- a/src/cls/lock/cls_lock.cc +++ b/src/cls/lock/cls_lock.cc @@ -96,7 +96,7 @@ static int read_lock(cls_method_context_t hctx, const string& name, lock_info_t /* now trim expired locks */ - utime_t now = ceph_clock_now(g_ceph_context); + utime_t now = ceph_clock_now(); map::iterator iter = lock->lockers.begin(); @@ -217,7 +217,7 @@ static int lock_obj(cls_method_context_t hctx, linfo.tag = tag; utime_t expiration; if (!duration.is_zero()) { - expiration = ceph_clock_now(g_ceph_context); + expiration = ceph_clock_now(); expiration += duration; } diff --git a/src/cls/rbd/cls_rbd.cc b/src/cls/rbd/cls_rbd.cc index c9850fea1d055..4cd079cc73da3 100644 --- a/src/cls/rbd/cls_rbd.cc +++ b/src/cls/rbd/cls_rbd.cc @@ -3318,7 +3318,7 @@ int image_status_set(cls_method_context_t hctx, const string &global_image_id, const cls::rbd::MirrorImageStatus &status) { MirrorImageStatusOnDisk ondisk_status(status); ondisk_status.up = false; - ondisk_status.last_update = ceph_clock_now(g_ceph_context); + ondisk_status.last_update = ceph_clock_now(); int r = cls_get_request_origin(hctx, &ondisk_status.origin); assert(r == 0); diff --git a/src/common/Clock.cc b/src/common/Clock.cc index 54b6a55ed0668..fc8fcb7c919bb 100644 --- a/src/common/Clock.cc +++ b/src/common/Clock.cc @@ -1,4 +1,4 @@ -// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system @@ -7,9 +7,9 @@ * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public - * License version 2.1, as published by the Free Software + * License version 2.1, as published by the Free Software * Foundation. See file COPYING. - * + * */ @@ -20,7 +20,7 @@ #include -utime_t ceph_clock_now(CephContext *cct) +utime_t ceph_clock_now() { #if defined(__linux__) struct timespec tp; @@ -31,15 +31,10 @@ utime_t ceph_clock_now(CephContext *cct) gettimeofday(&tv, NULL); utime_t n(&tv); #endif - if (cct) - n += cct->_conf->clock_offset; return n; } -time_t ceph_clock_gettime(CephContext *cct) +time_t ceph_clock_gettime() { - time_t ret = time(NULL); - if (cct) - ret += ((time_t)cct->_conf->clock_offset); - return ret; + return time(NULL); } diff --git a/src/common/Clock.h b/src/common/Clock.h index bdcb0d1553014..12923f1be8382 100644 --- a/src/common/Clock.h +++ b/src/common/Clock.h @@ -1,4 +1,4 @@ -// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system @@ -7,9 +7,9 @@ * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public - * License version 2.1, as published by the Free Software + * License version 2.1, as published by the Free Software * Foundation. See file COPYING. - * + * */ #ifndef CEPH_CLOCK_H @@ -19,9 +19,7 @@ #include -class CephContext; - -extern utime_t ceph_clock_now(CephContext *cct); -extern time_t ceph_clock_gettime(CephContext *cct); +extern utime_t ceph_clock_now(); +extern time_t ceph_clock_gettime(); #endif diff --git a/src/common/Cond.h b/src/common/Cond.h index c0301ea8ac9c7..de625429273f1 100644 --- a/src/common/Cond.h +++ b/src/common/Cond.h @@ -75,15 +75,15 @@ class Cond { return r; } - int WaitInterval(CephContext *cct, Mutex &mutex, utime_t interval) { - utime_t when = ceph_clock_now(cct); + int WaitInterval(Mutex &mutex, utime_t interval) { + utime_t when = ceph_clock_now(); when += interval; return WaitUntil(mutex, when); } template - int WaitInterval(CephContext *cct, Mutex &mutex, Duration interval) { - ceph::real_time when(ceph::real_clock::now(cct)); + int WaitInterval(Mutex &mutex, Duration interval) { + ceph::real_time when(ceph::real_clock::now()); when += interval; struct timespec ts = ceph::real_clock::to_timespec(when); diff --git a/src/common/Finisher.cc b/src/common/Finisher.cc index 16ebea5fbfbae..16e283e7deb32 100644 --- a/src/common/Finisher.cc +++ b/src/common/Finisher.cc @@ -59,7 +59,7 @@ void *Finisher::finisher_thread_entry() ldout(cct, 10) << "finisher_thread doing " << ls << dendl; if (logger) - start = ceph_clock_now(cct); + start = ceph_clock_now(); // Now actually process the contexts. for (vector::iterator p = ls.begin(); @@ -79,7 +79,7 @@ void *Finisher::finisher_thread_entry() } if (logger) { logger->dec(l_finisher_queue_len); - end = ceph_clock_now(cct); + end = ceph_clock_now(); logger->tinc(l_finisher_complete_lat, end - start); start = end; } diff --git a/src/common/LogClient.cc b/src/common/LogClient.cc index 353f94ba07bc7..f52912f7fccc4 100644 --- a/src/common/LogClient.cc +++ b/src/common/LogClient.cc @@ -238,7 +238,7 @@ void LogChannel::do_log(clog_type prio, const std::string& s) int lvl = (prio == CLOG_ERROR ? -1 : 0); ldout(cct,lvl) << "log " << prio << " : " << s << dendl; LogEntry e; - e.stamp = ceph_clock_now(cct); + e.stamp = ceph_clock_now(); // seq and who should be set for syslog/graylog/log_to_mon e.who = parent->get_myinst(); e.seq = parent->get_next_seq(); diff --git a/src/common/Mutex.cc b/src/common/Mutex.cc index 05059b969917b..ef2a0f6f7f270 100644 --- a/src/common/Mutex.cc +++ b/src/common/Mutex.cc @@ -97,7 +97,7 @@ void Mutex::Lock(bool no_lockdep) { if (logger && cct && cct->_conf->mutex_perf_counter) { utime_t start; // instrumented mutex enabled - start = ceph_clock_now(cct); + start = ceph_clock_now(); if (TryLock()) { goto out; } @@ -105,7 +105,7 @@ void Mutex::Lock(bool no_lockdep) { r = pthread_mutex_lock(&_m); logger->tinc(l_mutex_wait, - ceph_clock_now(cct) - start); + ceph_clock_now() - start); } else { r = pthread_mutex_lock(&_m); } diff --git a/src/common/Throttle.cc b/src/common/Throttle.cc index 343c67a08448f..64aec5925618f 100644 --- a/src/common/Throttle.cc +++ b/src/common/Throttle.cc @@ -102,7 +102,7 @@ bool Throttle::_wait(int64_t c) waited = true; ldout(cct, 2) << "_wait waiting..." << dendl; if (logger) - start = ceph_clock_now(cct); + start = ceph_clock_now(); do { cv->Wait(lock); @@ -110,7 +110,7 @@ bool Throttle::_wait(int64_t c) ldout(cct, 2) << "_wait finished waiting" << dendl; if (logger) { - utime_t dur = ceph_clock_now(cct) - start; + utime_t dur = ceph_clock_now() - start; logger->tinc(l_throttle_wait, dur); } diff --git a/src/common/Timer.cc b/src/common/Timer.cc index 4f48e5443cd76..20545e12481b3 100644 --- a/src/common/Timer.cc +++ b/src/common/Timer.cc @@ -86,8 +86,8 @@ void SafeTimer::timer_thread() lock.Lock(); ldout(cct,10) << "timer_thread starting" << dendl; while (!stopping) { - utime_t now = ceph_clock_now(cct); - + utime_t now = ceph_clock_now(); + while (!schedule.empty()) { scheduled_map_t::iterator p = schedule.begin(); @@ -126,7 +126,7 @@ void SafeTimer::add_event_after(double seconds, Context *callback) { assert(lock.is_locked()); - utime_t when = ceph_clock_now(cct); + utime_t when = ceph_clock_now(); when += seconds; add_event_at(when, callback); } diff --git a/src/common/TrackedOp.cc b/src/common/TrackedOp.cc index 8826b72052906..030b9a2231989 100644 --- a/src/common/TrackedOp.cc +++ b/src/common/TrackedOp.cc @@ -122,7 +122,7 @@ bool OpTracker::dump_historic_ops(Formatter *f) if (!tracking_enabled) return false; - utime_t now = ceph_clock_now(cct); + utime_t now = ceph_clock_now(); history.dump_ops(now, f); return true; } @@ -136,7 +136,7 @@ bool OpTracker::dump_ops_in_flight(Formatter *f, bool print_only_blocked) f->open_object_section("ops_in_flight"); // overall dump uint64_t total_ops_in_flight = 0; f->open_array_section("ops"); // list of TrackedOps - utime_t now = ceph_clock_now(cct); + utime_t now = ceph_clock_now(); for (uint32_t i = 0; i < num_optracker_shards; i++) { ShardedTrackingData* sdata = sharded_in_flight_list[i]; assert(NULL != sdata); @@ -197,7 +197,7 @@ void OpTracker::unregister_inflight_op(TrackedOp *i) if (!tracking_enabled) delete i; else { - utime_t now = ceph_clock_now(cct); + utime_t now = ceph_clock_now(); history.insert(now, TrackedOpRef(i)); } } @@ -208,7 +208,7 @@ bool OpTracker::check_ops_in_flight(std::vector &warning_vector, int *sl if (!tracking_enabled) return false; - utime_t now = ceph_clock_now(cct); + utime_t now = ceph_clock_now(); utime_t too_old = now; too_old -= complaint_time; utime_t oldest_op = now; @@ -296,7 +296,7 @@ bool OpTracker::check_ops_in_flight(std::vector &warning_vector, int *sl void OpTracker::get_age_ms_histogram(pow2_hist_t *h) { h->clear(); - utime_t now = ceph_clock_now(NULL); + utime_t now = ceph_clock_now(); for (uint32_t iter = 0; iter < num_optracker_shards; iter++) { ShardedTrackingData* sdata = sharded_in_flight_list[iter]; @@ -347,7 +347,7 @@ void TrackedOp::mark_event(const string &event) if (!is_tracked) return; - utime_t now = ceph_clock_now(g_ceph_context); + utime_t now = ceph_clock_now(); { Mutex::Locker l(lock); events.push_back(make_pair(now, event)); diff --git a/src/common/TrackedOp.h b/src/common/TrackedOp.h index edfe5c7639e6c..5b21c5aee1f2e 100644 --- a/src/common/TrackedOp.h +++ b/src/common/TrackedOp.h @@ -104,7 +104,7 @@ public: */ bool check_ops_in_flight(std::vector &warning_strings, int *slow = NULL); void mark_event(TrackedOp *op, const string &evt, - utime_t time = ceph_clock_now(g_ceph_context)); + utime_t time = ceph_clock_now()); void on_shutdown() { history.on_shutdown(); @@ -169,7 +169,7 @@ public: if (!events.empty() && events.rbegin()->second.compare("done") == 0) return events.rbegin()->first - get_initiated(); else - return ceph_clock_now(NULL) - get_initiated(); + return ceph_clock_now() - get_initiated(); } void mark_event(const string &event); diff --git a/src/common/WorkQueue.cc b/src/common/WorkQueue.cc index f37d125b5d720..33bd1eb0397cb 100644 --- a/src/common/WorkQueue.cc +++ b/src/common/WorkQueue.cc @@ -150,7 +150,7 @@ void ThreadPool::worker(WorkThread *wt) hb, cct->_conf->threadpool_default_timeout, 0); - _cond.WaitInterval(cct, _lock, + _cond.WaitInterval(_lock, utime_t( cct->_conf->threadpool_empty_queue_max_wait, 0)); } @@ -325,7 +325,7 @@ void ShardedThreadPool::shardedthreadpool_worker(uint32_t thread_index) cct->get_heartbeat_map()->reset_timeout( hb, wq->timeout_interval, wq->suicide_interval); - shardedpool_cond.WaitInterval(cct, shardedpool_lock, + shardedpool_cond.WaitInterval(shardedpool_lock, utime_t( cct->_conf->threadpool_empty_queue_max_wait, 0)); } @@ -341,7 +341,7 @@ void ShardedThreadPool::shardedthreadpool_worker(uint32_t thread_index) cct->get_heartbeat_map()->reset_timeout( hb, wq->timeout_interval, wq->suicide_interval); - shardedpool_cond.WaitInterval(cct, shardedpool_lock, + shardedpool_cond.WaitInterval(shardedpool_lock, utime_t( cct->_conf->threadpool_empty_queue_max_wait, 0)); } diff --git a/src/common/assert.cc b/src/common/assert.cc index 5802e8f714b20..340391742e8ea 100644 --- a/src/common/assert.cc +++ b/src/common/assert.cc @@ -47,7 +47,7 @@ namespace ceph { const char *func) { ostringstream tss; - tss << ceph_clock_now(g_assert_context); + tss << ceph_clock_now(); char buf[8096]; BackTrace *bt = new BackTrace(1); @@ -82,7 +82,7 @@ namespace ceph { const char *func, const char* msg, ...) { ostringstream tss; - tss << ceph_clock_now(g_assert_context); + tss << ceph_clock_now(); class BufAppender { public: diff --git a/src/common/ceph_context.cc b/src/common/ceph_context.cc index 75946839169aa..76902c151d303 100644 --- a/src/common/ceph_context.cc +++ b/src/common/ceph_context.cc @@ -146,7 +146,7 @@ public: if (_cct->_conf->heartbeat_interval) { utime_t interval(_cct->_conf->heartbeat_interval, 0); - _cond.WaitInterval(_cct, _lock, interval); + _cond.WaitInterval(_lock, interval); } else _cond.Wait(_lock); diff --git a/src/common/ceph_time.cc b/src/common/ceph_time.cc index 742481d9f4188..552b45a4eabcd 100644 --- a/src/common/ceph_time.cc +++ b/src/common/ceph_time.cc @@ -14,8 +14,6 @@ // For ceph_timespec #include "include/types.h" - -#include "ceph_context.h" #include "ceph_time.h" #include "config.h" @@ -47,13 +45,6 @@ int clock_gettime(int clk_id, struct timespec *tp) namespace ceph { namespace time_detail { - real_clock::time_point real_clock::now(const CephContext* cct) noexcept { - auto t = now(); - if (cct) - t += make_timespan(cct->_conf->clock_offset); - return t; - } - void real_clock::to_ceph_timespec(const time_point& t, struct ceph_timespec& ts) { ts.tv_sec = to_time_t(t); @@ -69,14 +60,6 @@ namespace ceph { return time_point(seconds(ts.tv_sec) + nanoseconds(ts.tv_nsec)); } - coarse_real_clock::time_point coarse_real_clock::now( - const CephContext* cct) noexcept { - auto t = now(); - if (cct) - t += make_timespan(cct->_conf->clock_offset); - return t; - } - void coarse_real_clock::to_ceph_timespec(const time_point& t, struct ceph_timespec& ts) { ts.tv_sec = to_time_t(t); @@ -92,7 +75,7 @@ namespace ceph { const struct ceph_timespec& ts) { return time_point(seconds(ts.tv_sec) + nanoseconds(ts.tv_nsec)); } - }; + } using std::chrono::duration_cast; using std::chrono::seconds; diff --git a/src/common/ceph_time.h b/src/common/ceph_time.h index 5fb867bc83e2a..c4937111bc3d2 100644 --- a/src/common/ceph_time.h +++ b/src/common/ceph_time.h @@ -33,7 +33,6 @@ int clock_gettime(int clk_id, struct timespec *tp); #endif -class CephContext; struct ceph_timespec; namespace ceph { @@ -84,12 +83,9 @@ namespace ceph { clock_gettime(CLOCK_REALTIME, &ts); return from_timespec(ts); } - // We need a version of 'now' that can take a CephContext for - // introducing configurable clock skew. - static time_point now(const CephContext* cct) noexcept; static bool is_zero(const time_point& t) { - return (t == time_point::min()); + return (t == time_point::min()); } // Allow conversion to/from any clock with the same interface as @@ -182,7 +178,6 @@ namespace ceph { #endif return from_timespec(ts); } - static time_point now(const CephContext* cct) noexcept; static time_t to_time_t(const time_point& t) noexcept { return duration_cast(t.time_since_epoch()).count(); diff --git a/src/common/obj_bencher.cc b/src/common/obj_bencher.cc index 0b0df3a8ddb18..701a4b5ad0ee9 100644 --- a/src/common/obj_bencher.cc +++ b/src/common/obj_bencher.cc @@ -78,7 +78,7 @@ ostream& ObjBencher::out(ostream& os, utime_t& t) ostream& ObjBencher::out(ostream& os) { - utime_t cur_time = ceph_clock_now(cct); + utime_t cur_time = ceph_clock_now(); return out(os, cur_time); } @@ -99,7 +99,7 @@ void *ObjBencher::status_printer(void *_bencher) { if (formatter) formatter->open_array_section("datas"); while(!data.done) { - utime_t cur_time = ceph_clock_now(bencher->cct); + utime_t cur_time = ceph_clock_now(); if (i % 20 == 0 && !formatter) { if (i > 0) @@ -208,7 +208,7 @@ void *ObjBencher::status_printer(void *_bencher) { } ++i; ++cycleSinceChange; - cond.WaitInterval(bencher->cct, bencher->lock, ONE_SECOND); + cond.WaitInterval(bencher->lock, ONE_SECOND); } if (formatter) formatter->close_section(); //datas @@ -292,13 +292,13 @@ int ObjBencher::aio_bench( goto out; } - data.start_time = ceph_clock_now(cct); + data.start_time = ceph_clock_now(); out(cout) << "Cleaning up (deleting benchmark objects)" << std::endl; r = clean_up(num_objects, prevPid, concurrentios); if (r != 0) goto out; - runtime = ceph_clock_now(cct) - data.start_time; + runtime = ceph_clock_now() - data.start_time; out(cout) << "Clean up completed and total clean up time :" << runtime << std::endl; // lastrun file @@ -442,10 +442,10 @@ int ObjBencher::write_bench(int secondsToRun, ceph_pthread_setname(print_thread, "write_stat"); lock.Lock(); data.finished = 0; - data.start_time = ceph_clock_now(cct); + data.start_time = ceph_clock_now(); lock.Unlock(); for (int i = 0; i data.max_latency) data.max_latency = data.cur_latency; @@ -513,10 +513,10 @@ int ObjBencher::write_bench(int secondsToRun, --data.in_flight; lock.Unlock(); release_completion(slot); - timePassed = ceph_clock_now(cct) - data.start_time; + timePassed = ceph_clock_now() - data.start_time; //write new stuff to backend - start_times[slot] = ceph_clock_now(cct); + start_times[slot] = ceph_clock_now(); r = create_completion(slot, _aio_cb, &lc); if (r < 0) goto ERR; @@ -545,7 +545,7 @@ int ObjBencher::write_bench(int secondsToRun, lock.Unlock(); goto ERR; } - data.cur_latency = ceph_clock_now(cct) - start_times[slot]; + data.cur_latency = ceph_clock_now() - start_times[slot]; data.history.latency.push_back(data.cur_latency); total_latency += data.cur_latency; if (data.cur_latency > data.max_latency) data.max_latency = data.cur_latency; @@ -559,7 +559,7 @@ int ObjBencher::write_bench(int secondsToRun, contents[slot] = 0; } - timePassed = ceph_clock_now(cct) - data.start_time; + timePassed = ceph_clock_now() - data.start_time; lock.Lock(); data.done = true; lock.Unlock(); @@ -670,7 +670,7 @@ int ObjBencher::seq_read_bench(int seconds_to_run, int num_objects, int concurre lock.Lock(); data.finished = 0; - data.start_time = ceph_clock_now(cct); + data.start_time = ceph_clock_now(); lock.Unlock(); pthread_t print_thread; @@ -681,7 +681,7 @@ int ObjBencher::seq_read_bench(int seconds_to_run, int num_objects, int concurre //start initial reads for (int i = 0; i < concurrentios; ++i) { index[i] = i; - start_times[i] = ceph_clock_now(cct); + start_times[i] = ceph_clock_now(); create_completion(i, _aio_cb, (void *)&lc); r = aio_read(name[i], i, contents[i], data.op_size, data.op_size * (i % writes_per_object)); @@ -700,7 +700,7 @@ int ObjBencher::seq_read_bench(int seconds_to_run, int num_objects, int concurre bufferlist *cur_contents; slot = 0; - while ((!seconds_to_run || ceph_clock_now(cct) < finish_time) && + while ((!seconds_to_run || ceph_clock_now() < finish_time) && num_objects > data.started) { lock.Lock(); int old_slot = slot; @@ -723,7 +723,7 @@ int ObjBencher::seq_read_bench(int seconds_to_run, int num_objects, int concurre } // calculate latency here, so memcmp doesn't inflate it - data.cur_latency = ceph_clock_now(cct) - start_times[slot]; + data.cur_latency = ceph_clock_now() - start_times[slot]; cur_contents = contents[slot]; int current_index = index[slot]; @@ -761,7 +761,7 @@ int ObjBencher::seq_read_bench(int seconds_to_run, int num_objects, int concurre release_completion(slot); //start new read and check data if requested - start_times[slot] = ceph_clock_now(cct); + start_times[slot] = ceph_clock_now(); create_completion(slot, _aio_cb, (void *)&lc); r = aio_read(newName, slot, contents[slot], data.op_size, data.op_size * (data.started % writes_per_object)); @@ -786,7 +786,7 @@ int ObjBencher::seq_read_bench(int seconds_to_run, int num_objects, int concurre lock.Unlock(); goto ERR; } - data.cur_latency = ceph_clock_now(cct) - start_times[slot]; + data.cur_latency = ceph_clock_now() - start_times[slot]; total_latency += data.cur_latency; if (data.cur_latency > data.max_latency) data.max_latency = data.cur_latency; if (data.cur_latency < data.min_latency) data.min_latency = data.cur_latency; @@ -808,7 +808,7 @@ int ObjBencher::seq_read_bench(int seconds_to_run, int num_objects, int concurre delete contents[slot]; } - runtime = ceph_clock_now(cct) - data.start_time; + runtime = ceph_clock_now() - data.start_time; lock.Lock(); data.done = true; lock.Unlock(); @@ -899,7 +899,7 @@ int ObjBencher::rand_read_bench(int seconds_to_run, int num_objects, int concurr lock.Lock(); data.finished = 0; - data.start_time = ceph_clock_now(g_ceph_context); + data.start_time = ceph_clock_now(); lock.Unlock(); pthread_t print_thread; @@ -910,7 +910,7 @@ int ObjBencher::rand_read_bench(int seconds_to_run, int num_objects, int concurr //start initial reads for (int i = 0; i < concurrentios; ++i) { index[i] = i; - start_times[i] = ceph_clock_now(g_ceph_context); + start_times[i] = ceph_clock_now(); create_completion(i, _aio_cb, (void *)&lc); r = aio_read(name[i], i, contents[i], data.op_size, data.op_size * (i % writes_per_object)); @@ -930,7 +930,7 @@ int ObjBencher::rand_read_bench(int seconds_to_run, int num_objects, int concurr int rand_id; slot = 0; - while ((!seconds_to_run || ceph_clock_now(g_ceph_context) < finish_time)) { + while ((!seconds_to_run || ceph_clock_now() < finish_time)) { lock.Lock(); int old_slot = slot; bool found = false; @@ -952,7 +952,7 @@ int ObjBencher::rand_read_bench(int seconds_to_run, int num_objects, int concurr } // calculate latency here, so memcmp doesn't inflate it - data.cur_latency = ceph_clock_now(cct) - start_times[slot]; + data.cur_latency = ceph_clock_now() - start_times[slot]; lock.Unlock(); @@ -993,7 +993,7 @@ int ObjBencher::rand_read_bench(int seconds_to_run, int num_objects, int concurr cur_contents->invalidate_crc(); //start new read and check data if requested - start_times[slot] = ceph_clock_now(g_ceph_context); + start_times[slot] = ceph_clock_now(); create_completion(slot, _aio_cb, (void *)&lc); r = aio_read(newName, slot, contents[slot], data.op_size, data.op_size * (rand_id % writes_per_object)); @@ -1019,7 +1019,7 @@ int ObjBencher::rand_read_bench(int seconds_to_run, int num_objects, int concurr lock.Unlock(); goto ERR; } - data.cur_latency = ceph_clock_now(g_ceph_context) - start_times[slot]; + data.cur_latency = ceph_clock_now() - start_times[slot]; total_latency += data.cur_latency; if (data.cur_latency > data.max_latency) data.max_latency = data.cur_latency; if (data.cur_latency < data.min_latency) data.min_latency = data.cur_latency; @@ -1041,7 +1041,7 @@ int ObjBencher::rand_read_bench(int seconds_to_run, int num_objects, int concurr delete contents[slot]; } - runtime = ceph_clock_now(g_ceph_context) - data.start_time; + runtime = ceph_clock_now() - data.start_time; lock.Lock(); data.done = true; lock.Unlock(); diff --git a/src/key_value_store/kv_flat_btree_async.cc b/src/key_value_store/kv_flat_btree_async.cc index c0acd4dc924bb..ec65e787e6794 100644 --- a/src/key_value_store/kv_flat_btree_async.cc +++ b/src/key_value_store/kv_flat_btree_async.cc @@ -61,7 +61,7 @@ void IndexCache::push(const string &key, const index_data &idata) { utime_t old_time = new_it->second.second; t2kmap.erase(old_time); } - utime_t time = ceph_clock_now(g_ceph_context); + utime_t time = ceph_clock_now(); k2itmap[idata.kdata] = make_pair(idata, time); t2kmap[time] = idata.kdata; if ((int)k2itmap.size() > cache_size) { @@ -79,7 +79,7 @@ void IndexCache::push(const index_data &idata) { t2kmap.erase(old_time); k2itmap.erase(idata.kdata); } - utime_t time = ceph_clock_now(g_ceph_context); + utime_t time = ceph_clock_now(); k2itmap[idata.kdata] = make_pair(idata, time); t2kmap[time] = idata.kdata; if ((int)k2itmap.size() > cache_size) { @@ -190,7 +190,7 @@ int KvFlatBtreeAsync::next(const index_data &idata, index_data * out_data) out_data->kdata.parse(kvs.begin()->first); bufferlist::iterator b = kvs.begin()->second.begin(); out_data->decode(b); - if (idata.is_timed_out(ceph_clock_now(g_ceph_context),timeout)) { + if (idata.is_timed_out(ceph_clock_now(), timeout)) { if (verbose) cout << client_name << " THINKS THE OTHER CLIENT DIED." << std::endl; //the client died after deleting the object. clean up. @@ -217,7 +217,7 @@ int KvFlatBtreeAsync::prev(const index_data &idata, index_data * out_data) if (verbose) cout << "\t\t\t" << client_name << "-prev: getting index failed with error " << err << std::endl; - if (idata.is_timed_out(ceph_clock_now(g_ceph_context),timeout)) { + if (idata.is_timed_out(ceph_clock_now(), timeout)) { if (verbose) cout << client_name << " THINKS THE OTHER CLIENT DIED." << std::endl; //the client died after deleting the object. clean up. @@ -276,7 +276,7 @@ int KvFlatBtreeAsync::read_index(const string &key, index_data * idata, (cache_size / cache_refresh >= 2? cache_size / cache_refresh: 2), &kvmap,&err); err = io_ctx.operate(index_name, &oro, NULL); - utime_t mytime = ceph_clock_now(g_ceph_context); + utime_t mytime = ceph_clock_now(); if (err < 0){ cerr << "\t" << client_name << "-read_index: getting keys failed with " @@ -711,7 +711,7 @@ void KvFlatBtreeAsync::set_up_prefix_index( std::map > assertions; map to_insert; idata->prefix = "1"; - idata->ts = ceph_clock_now(g_ceph_context); + idata->ts = ceph_clock_now(); for(vector::const_iterator it = to_create.begin(); it != to_create.end(); ++it) { @@ -1682,7 +1682,7 @@ int KvFlatBtreeAsync::get(const string &key, bufferlist *val) { return -ESUICIDE; } err = read_index(key, &idata, NULL, false); - mytime = ceph_clock_now(g_ceph_context); + mytime = ceph_clock_now(); if (err < 0) { if (verbose) cout << "getting oid failed with code " << err << std::endl; return err; @@ -2081,7 +2081,7 @@ bool KvFlatBtreeAsync::is_consistent() { io_ctx.aio_operate(dit->obj, aioc, &oro, NULL); aioc->wait_for_safe(); err = aioc->get_return_value(); - if (ceph_clock_now(g_ceph_context) - idata.ts > timeout) { + if (ceph_clock_now() - idata.ts > timeout) { if (err < 0) { aioc->release(); if (err == -ENOENT) { diff --git a/src/kv/LevelDBStore.cc b/src/kv/LevelDBStore.cc index 3c3b4993817c4..9d8e1e402d589 100644 --- a/src/kv/LevelDBStore.cc +++ b/src/kv/LevelDBStore.cc @@ -168,11 +168,11 @@ void LevelDBStore::close() int LevelDBStore::submit_transaction(KeyValueDB::Transaction t) { - utime_t start = ceph_clock_now(g_ceph_context); + utime_t start = ceph_clock_now(); LevelDBTransactionImpl * _t = static_cast(t.get()); leveldb::Status s = db->Write(leveldb::WriteOptions(), &(_t->bat)); - utime_t lat = ceph_clock_now(g_ceph_context) - start; + utime_t lat = ceph_clock_now() - start; logger->inc(l_leveldb_txns); logger->tinc(l_leveldb_submit_latency, lat); return s.ok() ? 0 : -1; @@ -180,13 +180,13 @@ int LevelDBStore::submit_transaction(KeyValueDB::Transaction t) int LevelDBStore::submit_transaction_sync(KeyValueDB::Transaction t) { - utime_t start = ceph_clock_now(g_ceph_context); + utime_t start = ceph_clock_now(); LevelDBTransactionImpl * _t = static_cast(t.get()); leveldb::WriteOptions options; options.sync = true; leveldb::Status s = db->Write(options, &(_t->bat)); - utime_t lat = ceph_clock_now(g_ceph_context) - start; + utime_t lat = ceph_clock_now() - start; logger->inc(l_leveldb_txns); logger->tinc(l_leveldb_submit_sync_latency, lat); return s.ok() ? 0 : -1; @@ -246,7 +246,7 @@ int LevelDBStore::get( const std::set &keys, std::map *out) { - utime_t start = ceph_clock_now(g_ceph_context); + utime_t start = ceph_clock_now(); for (std::set::const_iterator i = keys.begin(); i != keys.end(); ++i) { std::string value; @@ -255,7 +255,7 @@ int LevelDBStore::get( if (status.ok()) (*out)[*i].append(value); } - utime_t lat = ceph_clock_now(g_ceph_context) - start; + utime_t lat = ceph_clock_now() - start; logger->inc(l_leveldb_gets); logger->tinc(l_leveldb_get_latency, lat); return 0; @@ -266,7 +266,7 @@ int LevelDBStore::get(const string &prefix, bufferlist *out) { assert(out && (out->length() == 0)); - utime_t start = ceph_clock_now(g_ceph_context); + utime_t start = ceph_clock_now(); int r = 0; string value, k; leveldb::Status s; @@ -277,7 +277,7 @@ int LevelDBStore::get(const string &prefix, } else { r = -ENOENT; } - utime_t lat = ceph_clock_now(g_ceph_context) - start; + utime_t lat = ceph_clock_now() - start; logger->inc(l_leveldb_gets); logger->tinc(l_leveldb_get_latency, lat); return r; diff --git a/src/kv/RocksDBStore.cc b/src/kv/RocksDBStore.cc index a4cd50c76f6e3..41c7221902e10 100644 --- a/src/kv/RocksDBStore.cc +++ b/src/kv/RocksDBStore.cc @@ -412,7 +412,7 @@ void RocksDBStore::get_statistics(Formatter *f) int RocksDBStore::submit_transaction(KeyValueDB::Transaction t) { - utime_t start = ceph_clock_now(g_ceph_context); + utime_t start = ceph_clock_now(); // enable rocksdb breakdown // considering performance overhead, default is disabled if (g_conf->rocksdb_perf) { @@ -436,7 +436,7 @@ int RocksDBStore::submit_transaction(KeyValueDB::Transaction t) derr << __func__ << " error: " << s.ToString() << " code = " << s.code() << " Rocksdb transaction: " << rocks_txc.seen << dendl; } - utime_t lat = ceph_clock_now(g_ceph_context) - start; + utime_t lat = ceph_clock_now() - start; if (g_conf->rocksdb_perf) { utime_t write_memtable_time; @@ -465,7 +465,7 @@ int RocksDBStore::submit_transaction(KeyValueDB::Transaction t) int RocksDBStore::submit_transaction_sync(KeyValueDB::Transaction t) { - utime_t start = ceph_clock_now(g_ceph_context); + utime_t start = ceph_clock_now(); // enable rocksdb breakdown // considering performance overhead, default is disabled if (g_conf->rocksdb_perf) { @@ -490,7 +490,7 @@ int RocksDBStore::submit_transaction_sync(KeyValueDB::Transaction t) derr << __func__ << " error: " << s.ToString() << " code = " << s.code() << " Rocksdb transaction: " << rocks_txc.seen << dendl; } - utime_t lat = ceph_clock_now(g_ceph_context) - start; + utime_t lat = ceph_clock_now() - start; if (g_conf->rocksdb_perf) { utime_t write_memtable_time; @@ -590,7 +590,7 @@ int RocksDBStore::get( const std::set &keys, std::map *out) { - utime_t start = ceph_clock_now(g_ceph_context); + utime_t start = ceph_clock_now(); for (std::set::const_iterator i = keys.begin(); i != keys.end(); ++i) { std::string value; @@ -599,7 +599,7 @@ int RocksDBStore::get( if (status.ok()) (*out)[*i].append(value); } - utime_t lat = ceph_clock_now(g_ceph_context) - start; + utime_t lat = ceph_clock_now() - start; logger->inc(l_rocksdb_gets); logger->tinc(l_rocksdb_get_latency, lat); return 0; @@ -611,7 +611,7 @@ int RocksDBStore::get( bufferlist *out) { assert(out && (out->length() == 0)); - utime_t start = ceph_clock_now(g_ceph_context); + utime_t start = ceph_clock_now(); int r = 0; string value, k; rocksdb::Status s; @@ -622,7 +622,7 @@ int RocksDBStore::get( } else { r = -ENOENT; } - utime_t lat = ceph_clock_now(g_ceph_context) - start; + utime_t lat = ceph_clock_now() - start; logger->inc(l_rocksdb_gets); logger->tinc(l_rocksdb_get_latency, lat); return r; diff --git a/src/librados/IoCtxImpl.cc b/src/librados/IoCtxImpl.cc index 75f4dee420ffe..8416d31e11107 100644 --- a/src/librados/IoCtxImpl.cc +++ b/src/librados/IoCtxImpl.cc @@ -408,7 +408,7 @@ int librados::IoCtxImpl::selfmanaged_snap_rollback_object(const object_t& oid, prepare_assert_ops(&op); op.rollback(snapid); objecter->mutate(oid, oloc, - op, snapc, ceph::real_clock::now(client->cct), 0, + op, snapc, ceph::real_clock::now(), 0, onack, NULL, NULL); mylock.Lock(); @@ -667,7 +667,7 @@ int librados::IoCtxImpl::operate(const object_t& oid, ::ObjectOperation *o, ceph::real_time *pmtime, int flags) { ceph::real_time ut = (pmtime ? *pmtime : - ceph::real_clock::now(client->cct)); + ceph::real_clock::now()); /* can't write to a snapshot */ if (snap_seq != CEPH_NOSNAP) @@ -761,7 +761,7 @@ int librados::IoCtxImpl::aio_operate(const object_t& oid, ::ObjectOperation *o, AioCompletionImpl *c, const SnapContext& snap_context, int flags) { - auto ut = ceph::real_clock::now(client->cct); + auto ut = ceph::real_clock::now(); /* can't write to a snapshot */ if (snap_seq != CEPH_NOSNAP) return -EROFS; @@ -865,7 +865,7 @@ int librados::IoCtxImpl::aio_write(const object_t &oid, AioCompletionImpl *c, const bufferlist& bl, size_t len, uint64_t off) { - auto ut = ceph::real_clock::now(client->cct); + auto ut = ceph::real_clock::now(); ldout(client->cct, 20) << "aio_write " << oid << " " << off << "~" << len << " snapc=" << snapc << " snap_seq=" << snap_seq << dendl; if (len > UINT_MAX/2) @@ -892,7 +892,7 @@ int librados::IoCtxImpl::aio_write(const object_t &oid, AioCompletionImpl *c, int librados::IoCtxImpl::aio_append(const object_t &oid, AioCompletionImpl *c, const bufferlist& bl, size_t len) { - auto ut = ceph::real_clock::now(client->cct); + auto ut = ceph::real_clock::now(); if (len > UINT_MAX/2) return -E2BIG; @@ -919,7 +919,7 @@ int librados::IoCtxImpl::aio_write_full(const object_t &oid, AioCompletionImpl *c, const bufferlist& bl) { - auto ut = ceph::real_clock::now(client->cct); + auto ut = ceph::real_clock::now(); if (bl.length() > UINT_MAX/2) return -E2BIG; @@ -948,7 +948,7 @@ int librados::IoCtxImpl::aio_writesame(const object_t &oid, size_t write_len, uint64_t off) { - auto ut = ceph::real_clock::now(client->cct); + auto ut = ceph::real_clock::now(); if ((bl.length() > UINT_MAX/2) || (write_len > UINT_MAX/2)) return -E2BIG; @@ -976,7 +976,7 @@ int librados::IoCtxImpl::aio_writesame(const object_t &oid, int librados::IoCtxImpl::aio_remove(const object_t &oid, AioCompletionImpl *c, int flags) { - auto ut = ceph::real_clock::now(client->cct); + auto ut = ceph::real_clock::now(); /* can't write to a snapshot */ if (snap_seq != CEPH_NOSNAP) @@ -1610,7 +1610,7 @@ int librados::IoCtxImpl::unwatch(uint64_t cookie) prepare_assert_ops(&wr); wr.watch(cookie, CEPH_OSD_WATCH_OP_UNWATCH); objecter->mutate(linger_op->target.base_oid, oloc, wr, - snapc, ceph::real_clock::now(client->cct), 0, NULL, + snapc, ceph::real_clock::now(), 0, NULL, &onfinish, &ver); objecter->linger_cancel(linger_op); @@ -1629,7 +1629,7 @@ int librados::IoCtxImpl::aio_unwatch(uint64_t cookie, AioCompletionImpl *c) prepare_assert_ops(&wr); wr.watch(cookie, CEPH_OSD_WATCH_OP_UNWATCH); objecter->mutate(linger_op->target.base_oid, oloc, wr, - snapc, ceph::real_clock::now(client->cct), 0, NULL, + snapc, ceph::real_clock::now(), 0, NULL, oncomplete, &c->objver); return 0; } diff --git a/src/librados/RadosClient.cc b/src/librados/RadosClient.cc index e0fc6f507b769..a683e52c0be01 100644 --- a/src/librados/RadosClient.cc +++ b/src/librados/RadosClient.cc @@ -543,13 +543,13 @@ int librados::RadosClient::wait_for_osdmap() if (objecter->with_osdmap(std::mem_fn(&OSDMap::get_epoch)) == 0) { ldout(cct, 10) << __func__ << " waiting" << dendl; - utime_t start = ceph_clock_now(cct); + utime_t start = ceph_clock_now(); while (objecter->with_osdmap(std::mem_fn(&OSDMap::get_epoch)) == 0) { if (timeout.is_zero()) { cond.Wait(lock); } else { - cond.WaitInterval(cct, lock, timeout); - utime_t elapsed = ceph_clock_now(cct) - start; + cond.WaitInterval(lock, timeout); + utime_t elapsed = ceph_clock_now() - start; if (elapsed > timeout) { lderr(cct) << "timed out waiting for first osdmap from monitors" << dendl; diff --git a/src/librbd/AioCompletion.cc b/src/librbd/AioCompletion.cc index e4044fa82a544..e42754b2b9e65 100644 --- a/src/librbd/AioCompletion.cc +++ b/src/librbd/AioCompletion.cc @@ -77,7 +77,7 @@ void AioCompletion::complete() { tracepoint(librbd, aio_complete_enter, this, rval); utime_t elapsed; - elapsed = ceph_clock_now(cct) - start_time; + elapsed = ceph_clock_now() - start_time; switch (aio_type) { case AIO_TYPE_OPEN: case AIO_TYPE_CLOSE: @@ -130,7 +130,7 @@ void AioCompletion::init_time(ImageCtx *i, aio_type_t t) { if (ictx == nullptr) { ictx = i; aio_type = t; - start_time = ceph_clock_now(ictx->cct); + start_time = ceph_clock_now(); } } diff --git a/src/librbd/internal.cc b/src/librbd/internal.cc index 8512e6f4999cc..6be69ff87d8bf 100644 --- a/src/librbd/internal.cc +++ b/src/librbd/internal.cc @@ -2260,7 +2260,7 @@ int mirror_image_disable_internal(ImageCtx *ictx, bool force, uint64_t left = mylen; RWLock::RLocker owner_locker(ictx->owner_lock); - start_time = ceph_clock_now(ictx->cct); + start_time = ceph_clock_now(); while (left > 0) { uint64_t period_off = off - (off % period); uint64_t read_len = min(period_off + period - off, left); @@ -2287,7 +2287,7 @@ int mirror_image_disable_internal(ImageCtx *ictx, bool force, off += ret; } - elapsed = ceph_clock_now(ictx->cct) - start_time; + elapsed = ceph_clock_now() - start_time; ictx->perfcounter->tinc(l_librbd_rd_latency, elapsed); ictx->perfcounter->inc(l_librbd_rd); ictx->perfcounter->inc(l_librbd_rd_bytes, mylen); diff --git a/src/log/Log.cc b/src/log/Log.cc index 9f174dfb735cc..2614a4dceba4d 100644 --- a/src/log/Log.cc +++ b/src/log/Log.cc @@ -225,13 +225,13 @@ void Log::submit_entry(Entry *e) Entry *Log::create_entry(int level, int subsys) { if (true) { - return new Entry(ceph_clock_now(NULL), - pthread_self(), - level, subsys); + return new Entry(ceph_clock_now(), + pthread_self(), + level, subsys); } else { // kludge for perf testing Entry *e = m_recent.dequeue(); - e->m_stamp = ceph_clock_now(NULL); + e->m_stamp = ceph_clock_now(); e->m_thread = pthread_self(); e->m_prio = level; e->m_subsys = subsys; @@ -246,13 +246,13 @@ Entry *Log::create_entry(int level, int subsys, size_t* expected_size) "Log hint"); size_t size = __atomic_load_n(expected_size, __ATOMIC_RELAXED); void *ptr = ::operator new(sizeof(Entry) + size); - return new(ptr) Entry(ceph_clock_now(NULL), + return new(ptr) Entry(ceph_clock_now(), pthread_self(), level, subsys, reinterpret_cast(ptr) + sizeof(Entry), size, expected_size); } else { // kludge for perf testing Entry *e = m_recent.dequeue(); - e->m_stamp = ceph_clock_now(NULL); + e->m_stamp = ceph_clock_now(); e->m_thread = pthread_self(); e->m_prio = level; e->m_subsys = subsys; diff --git a/src/log/test.cc b/src/log/test.cc index 618788246a8bb..573cc35b5e491 100644 --- a/src/log/test.cc +++ b/src/log/test.cc @@ -28,7 +28,7 @@ TEST(Log, Simple) int sys = i % 4; int l = 5 + (i%4); if (subs.should_gather(sys, l)) { - Entry *e = new Entry(ceph_clock_now(NULL), + Entry *e = new Entry(ceph_clock_now(), pthread_self(), l, sys, @@ -57,7 +57,7 @@ TEST(Log, ManyNoGather) for (int i=0; iset_str(oss.str()); @@ -114,7 +114,7 @@ TEST(Log, ManyGatherLogStringAssignWithReserve) for (int i=0; im_static_buf, sizeof(e->m_static_buf)); ostream oss(&psb); oss << "this i a long stream asdf asdf asdf asdf asdf asdf asdf asdf asdf as fd"; @@ -160,7 +160,7 @@ TEST(Log, ManyGatherLogPrebufOverflow) for (int i=0; im_static_buf, sizeof(e->m_static_buf)); ostream oss(&psb); oss << "this i a long stream asdf asdf asdf asdf asdf asdf asdf asdf asdf as fd" @@ -184,7 +184,7 @@ TEST(Log, ManyGather) for (int i=0; iset_str(msg); diff --git a/src/mds/Beacon.cc b/src/mds/Beacon.cc index 285583b6780b0..36c7d882e099e 100644 --- a/src/mds/Beacon.cc +++ b/src/mds/Beacon.cc @@ -118,7 +118,7 @@ void Beacon::handle_mds_beacon(MMDSBeacon *m) // update lab if (seq_stamp.count(seq)) { - utime_t now = ceph_clock_now(g_ceph_context); + utime_t now = ceph_clock_now(); if (seq_stamp[seq] > last_acked_stamp) { last_acked_stamp = seq_stamp[seq]; utime_t rtt = now - last_acked_stamp; @@ -172,9 +172,9 @@ void Beacon::send_and_wait(const double duration) << " for up to " << duration << "s" << dendl; utime_t timeout; - timeout.set_from_double(ceph_clock_now(cct) + duration); + timeout.set_from_double(ceph_clock_now() + duration); while ((!seq_stamp.empty() && seq_stamp.begin()->first <= awaiting_seq) - && ceph_clock_now(cct) < timeout) { + && ceph_clock_now() < timeout) { waiting_cond.WaitUntil(lock, timeout); } @@ -205,7 +205,7 @@ void Beacon::_send() << " seq " << last_seq << dendl; - seq_stamp[last_seq] = ceph_clock_now(g_ceph_context); + seq_stamp[last_seq] = ceph_clock_now(); assert(want_state != MDSMap::STATE_NULL); @@ -264,7 +264,7 @@ bool Beacon::is_laggy() if (last_acked_stamp == utime_t()) return false; - utime_t now = ceph_clock_now(g_ceph_context); + utime_t now = ceph_clock_now(); utime_t since = now - last_acked_stamp; if (since > g_conf->mds_beacon_grace) { dout(5) << "is_laggy " << since << " > " << g_conf->mds_beacon_grace @@ -398,7 +398,7 @@ void Beacon::notify_health(MDSRank const *mds) set sessions; mds->sessionmap.get_client_session_set(sessions); - utime_t cutoff = ceph_clock_now(g_ceph_context); + utime_t cutoff = ceph_clock_now(); cutoff -= g_conf->mds_recall_state_timeout; utime_t last_recall = mds->mdcache->last_recall_state; diff --git a/src/mds/CDentry.cc b/src/mds/CDentry.cc index 112a5ac08a6ed..c528a028f708a 100644 --- a/src/mds/CDentry.cc +++ b/src/mds/CDentry.cc @@ -32,7 +32,7 @@ ostream& CDentry::print_db_line_prefix(ostream& out) { - return out << ceph_clock_now(g_ceph_context) << " mds." << dir->cache->mds->get_nodeid() << ".cache.den(" << dir->ino() << " " << name << ") "; + return out << ceph_clock_now() << " mds." << dir->cache->mds->get_nodeid() << ".cache.den(" << dir->ino() << " " << name << ") "; } boost::pool<> CDentry::pool(sizeof(CDentry)); diff --git a/src/mds/CDir.cc b/src/mds/CDir.cc index 950a31ee52988..f3a7ce8592398 100644 --- a/src/mds/CDir.cc +++ b/src/mds/CDir.cc @@ -171,7 +171,7 @@ void CDir::print(ostream& out) ostream& CDir::print_db_line_prefix(ostream& out) { - return out << ceph_clock_now(g_ceph_context) << " mds." << cache->mds->get_nodeid() << ".cache.dir(" << this->dirfrag() << ") "; + return out << ceph_clock_now() << " mds." << cache->mds->get_nodeid() << ".cache.dir(" << this->dirfrag() << ") "; } @@ -189,10 +189,10 @@ CDir::CDir(CInode *in, frag_t fg, MDCache *mdcache, bool auth) : num_dirty(0), committing_version(0), committed_version(0), dir_auth_pins(0), request_pins(0), dir_rep(REP_NONE), - pop_me(ceph_clock_now(g_ceph_context)), - pop_nested(ceph_clock_now(g_ceph_context)), - pop_auth_subtree(ceph_clock_now(g_ceph_context)), - pop_auth_subtree_nested(ceph_clock_now(g_ceph_context)), + pop_me(ceph_clock_now()), + pop_nested(ceph_clock_now()), + pop_auth_subtree(ceph_clock_now()), + pop_auth_subtree_nested(ceph_clock_now()), num_dentries_nested(0), num_dentries_auth_subtree(0), num_dentries_auth_subtree_nested(0), dir_auth(CDIR_AUTH_DEFAULT) @@ -1686,7 +1686,7 @@ CDentry *CDir::_load_dentry( } //in->hack_accessed = false; - //in->hack_load_stamp = ceph_clock_now(g_ceph_context); + //in->hack_load_stamp = ceph_clock_now(); //num_new_inodes_loaded++; } else { dout(0) << "_fetched badness: got (but i already had) " << *in @@ -2073,7 +2073,7 @@ void CDir::_omap_commit(int op_prio) op.omap_rm_keys(to_remove); cache->mds->objecter->mutate(oid, oloc, op, snapc, - ceph::real_clock::now(g_ceph_context), + ceph::real_clock::now(), 0, NULL, gather.new_sub()); write_size = 0; @@ -2107,7 +2107,7 @@ void CDir::_omap_commit(int op_prio) op.omap_rm_keys(to_remove); cache->mds->objecter->mutate(oid, oloc, op, snapc, - ceph::real_clock::now(g_ceph_context), + ceph::real_clock::now(), 0, NULL, gather.new_sub()); gather.activate(); @@ -2940,7 +2940,7 @@ void CDir::scrub_initialize(const ScrubHeaderRefConst& header) assert(scrub_infop && !scrub_infop->directory_scrubbing); scrub_infop->recursive_start.version = get_projected_version(); - scrub_infop->recursive_start.time = ceph_clock_now(g_ceph_context); + scrub_infop->recursive_start.time = ceph_clock_now(); scrub_infop->directories_to_scrub.clear(); scrub_infop->directories_scrubbing.clear(); @@ -3118,7 +3118,7 @@ bool CDir::scrub_local() scrub_info(); if (rval) { - scrub_infop->last_local.time = ceph_clock_now(g_ceph_context); + scrub_infop->last_local.time = ceph_clock_now(); scrub_infop->last_local.version = get_projected_version(); scrub_infop->pending_scrub_error = false; scrub_infop->last_scrub_dirty = true; diff --git a/src/mds/CInode.cc b/src/mds/CInode.cc index ac31dc850bc24..d2239317cdb15 100644 --- a/src/mds/CInode.cc +++ b/src/mds/CInode.cc @@ -82,7 +82,7 @@ LockType CInode::policylock_type(CEPH_LOCK_IPOLICY); //int cinode_pins[CINODE_NUM_PINS]; // counts ostream& CInode::print_db_line_prefix(ostream& out) { - return out << ceph_clock_now(g_ceph_context) << " mds." << mdcache->mds->get_nodeid() << ".cache.ino(" << inode.ino << ") "; + return out << ceph_clock_now() << " mds." << mdcache->mds->get_nodeid() << ".cache.ino(" << inode.ino << ") "; } /* @@ -980,7 +980,7 @@ void CInode::store(MDSInternalContextBase *fin) new C_OnFinisher(new C_IO_Inode_Stored(this, get_version(), fin), mdcache->mds->finisher); mdcache->mds->objecter->mutate(oid, oloc, m, snapc, - ceph::real_clock::now(g_ceph_context), 0, + ceph::real_clock::now(), 0, NULL, newfin); } @@ -1165,14 +1165,14 @@ void CInode::store_backtrace(MDSInternalContextBase *fin, int op_prio) if (!state_test(STATE_DIRTYPOOL) || inode.old_pools.empty()) { dout(20) << __func__ << ": no dirtypool or no old pools" << dendl; mdcache->mds->objecter->mutate(oid, oloc, op, snapc, - ceph::real_clock::now(g_ceph_context), + ceph::real_clock::now(), 0, NULL, fin2); return; } C_GatherBuilder gather(g_ceph_context, fin2); mdcache->mds->objecter->mutate(oid, oloc, op, snapc, - ceph::real_clock::now(g_ceph_context), + ceph::real_clock::now(), 0, NULL, gather.new_sub()); // In the case where DIRTYPOOL is set, we update all old pools backtraces @@ -1193,7 +1193,7 @@ void CInode::store_backtrace(MDSInternalContextBase *fin, int op_prio) object_locator_t oloc(*p); mdcache->mds->objecter->mutate(oid, oloc, op, snapc, - ceph::real_clock::now(g_ceph_context), + ceph::real_clock::now(), 0, NULL, gather.new_sub()); } gather.activate(); @@ -2842,7 +2842,7 @@ Capability *CInode::reconnect_cap(client_t client, const cap_reconnect_t& icr, S cap->issue_norevoke(icr.capinfo.issued); cap->reset_seq(); } - cap->set_last_issue_stamp(ceph_clock_now(g_ceph_context)); + cap->set_last_issue_stamp(ceph_clock_now()); return cap; } @@ -3241,7 +3241,7 @@ int CInode::encode_inodestat(bufferlist& bl, Session *session, cap->issue_norevoke(issue); issue = cap->pending(); cap->set_last_issue(); - cap->set_last_issue_stamp(ceph_clock_now(g_ceph_context)); + cap->set_last_issue_stamp(ceph_clock_now()); cap->clear_new(); ecap.caps = issue; ecap.wanted = cap->wanted(); @@ -3604,7 +3604,7 @@ void CInode::decode_import(bufferlist::iterator& p, _mark_dirty_parent(ls); } - ::decode(pop, ceph_clock_now(g_ceph_context), p); + ::decode(pop, ceph_clock_now(), p); ::decode(replica_map, p); if (!replica_map.empty()) @@ -3749,7 +3749,7 @@ void CInode::validate_disk_state(CInode::validated_data *results, scrub_tag.setxattr("scrub_tag", tag_bl); SnapContext snapc; in->mdcache->mds->objecter->mutate(oid, object_locator_t(pool), scrub_tag, snapc, - ceph::real_clock::now(g_ceph_context), + ceph::real_clock::now(), 0, NULL, NULL); } } @@ -4242,7 +4242,7 @@ void CInode::scrub_initialize(CDentry *scrub_parent, scrub_infop->header = header; scrub_infop->scrub_start_version = get_version(); - scrub_infop->scrub_start_stamp = ceph_clock_now(g_ceph_context); + scrub_infop->scrub_start_stamp = ceph_clock_now(); // right now we don't handle remote inodes } @@ -4261,7 +4261,7 @@ int CInode::scrub_dirfrag_next(frag_t* out_dirfrag) while (i != scrub_infop->dirfrag_stamps.end()) { if (i->second.scrub_start_version < scrub_infop->scrub_start_version) { i->second.scrub_start_version = get_projected_version(); - i->second.scrub_start_stamp = ceph_clock_now(g_ceph_context); + i->second.scrub_start_stamp = ceph_clock_now(); *out_dirfrag = i->first; dout(20) << " return frag " << *out_dirfrag << dendl; return 0; diff --git a/src/mds/CInode.h b/src/mds/CInode.h index 3980686235779..3a59eb40bedf2 100644 --- a/src/mds/CInode.h +++ b/src/mds/CInode.h @@ -666,7 +666,7 @@ public: item_dirty_dirfrag_nest(this), item_dirty_dirfrag_dirfragtree(this), auth_pin_freeze_allowance(0), - pop(ceph_clock_now(g_ceph_context)), + pop(ceph_clock_now()), versionlock(this, &versionlock_type), authlock(this, &authlock_type), linklock(this, &linklock_type), diff --git a/src/mds/DamageTable.h b/src/mds/DamageTable.h index 998e0f84ea6d9..5a988576dbd31 100644 --- a/src/mds/DamageTable.h +++ b/src/mds/DamageTable.h @@ -40,7 +40,7 @@ class DamageEntry DamageEntry() { id = get_random(0, 0xffffffff); - reported_at = ceph_clock_now(g_ceph_context); + reported_at = ceph_clock_now(); } virtual damage_entry_type_t get_type() const = 0; diff --git a/src/mds/JournalPointer.cc b/src/mds/JournalPointer.cc index bf6670fdf1dba..d8c43aa5a9d62 100644 --- a/src/mds/JournalPointer.cc +++ b/src/mds/JournalPointer.cc @@ -91,7 +91,7 @@ int JournalPointer::save(Objecter *objecter) const C_SaferCond waiter; objecter->write_full(object_t(object_id), object_locator_t(pool_id), SnapContext(), data, - ceph::real_clock::now(g_ceph_context), 0, NULL, + ceph::real_clock::now(), 0, NULL, &waiter); int write_result = waiter.wait(); if (write_result < 0) { @@ -114,7 +114,7 @@ void JournalPointer::save(Objecter *objecter, Context *completion) const objecter->write_full(object_t(get_object_id()), object_locator_t(pool_id), SnapContext(), data, - ceph::real_clock::now(g_ceph_context), 0, NULL, + ceph::real_clock::now(), 0, NULL, completion); } diff --git a/src/mds/Locker.cc b/src/mds/Locker.cc index 544975ef3a984..f576a1a539e08 100644 --- a/src/mds/Locker.cc +++ b/src/mds/Locker.cc @@ -564,7 +564,7 @@ bool Locker::acquire_locks(MDRequestRef& mdr, } mdr->done_locking = true; - mdr->set_mds_stamp(ceph_clock_now(NULL)); + mdr->set_mds_stamp(ceph_clock_now()); result = true; marker.message = "acquired locks"; @@ -1957,7 +1957,7 @@ bool Locker::issue_caps(CInode *in, Capability *only_cap) if (op == CEPH_CAP_OP_REVOKE) { revoking_caps.push_back(&cap->item_revoking_caps); revoking_caps_by_client[cap->get_client()].push_back(&cap->item_client_revoking_caps); - cap->set_last_revoke_stamp(ceph_clock_now(g_ceph_context)); + cap->set_last_revoke_stamp(ceph_clock_now()); cap->reset_num_revoke_warnings(); } @@ -3395,7 +3395,7 @@ bool Locker::any_late_revoking_caps(xlist const &revoking) const // No revoking caps at the moment return false; } else { - utime_t now = ceph_clock_now(g_ceph_context); + utime_t now = ceph_clock_now(); utime_t age = now - (*p)->get_last_revoke_stamp(); if (age <= g_conf->mds_revoke_cap_timeout) { return false; @@ -3432,7 +3432,7 @@ void Locker::get_late_revoking_clients(std::list *result) const void Locker::caps_tick() { - utime_t now = ceph_clock_now(g_ceph_context); + utime_t now = ceph_clock_now(); dout(20) << __func__ << " " << revoking_caps.size() << " revoking caps" << dendl; @@ -3526,7 +3526,7 @@ void Locker::handle_client_lease(MClientLease *m) m->h.seq = ++l->seq; m->clear_payload(); - utime_t now = ceph_clock_now(g_ceph_context); + utime_t now = ceph_clock_now(); now += mdcache->client_lease_durations[pool]; mdcache->touch_client_lease(l, pool, now); @@ -4347,7 +4347,7 @@ void Locker::mark_updated_scatterlock(ScatterLock *lock) << " - already on list since " << lock->get_update_stamp() << dendl; } else { updated_scatterlocks.push_back(lock->get_updated_item()); - utime_t now = ceph_clock_now(g_ceph_context); + utime_t now = ceph_clock_now(); lock->set_update_stamp(now); dout(10) << "mark_updated_scatterlock " << *lock << " - added at " << now << dendl; @@ -4478,7 +4478,7 @@ void Locker::scatter_tick() dout(10) << "scatter_tick" << dendl; // updated - utime_t now = ceph_clock_now(g_ceph_context); + utime_t now = ceph_clock_now(); int n = updated_scatterlocks.size(); while (!updated_scatterlocks.empty()) { ScatterLock *lock = updated_scatterlocks.front(); diff --git a/src/mds/MDBalancer.cc b/src/mds/MDBalancer.cc index 5f6d279fbe496..b8fd8fc48d033 100644 --- a/src/mds/MDBalancer.cc +++ b/src/mds/MDBalancer.cc @@ -73,8 +73,8 @@ int MDBalancer::proc_message(Message *m) void MDBalancer::tick() { static int num_bal_times = g_conf->mds_bal_max; - static utime_t first = ceph_clock_now(g_ceph_context); - utime_t now = ceph_clock_now(g_ceph_context); + static utime_t first = ceph_clock_now(); + utime_t now = ceph_clock_now(); utime_t elapsed = now; elapsed -= first; @@ -186,7 +186,7 @@ int MDBalancer::localize_balancer() << " oid=" << oid << " oloc=" << oloc << dendl; /* timeout: if we waste half our time waiting for RADOS, then abort! */ - double t = ceph_clock_now(g_ceph_context) + g_conf->mds_bal_interval/2; + double t = ceph_clock_now() + g_conf->mds_bal_interval/2; utime_t timeout; timeout.set_from_double(t); lock.Lock(); @@ -208,7 +208,7 @@ int MDBalancer::localize_balancer() void MDBalancer::send_heartbeat() { - utime_t now = ceph_clock_now(g_ceph_context); + utime_t now = ceph_clock_now(); if (mds->mdsmap->is_degraded()) { dout(10) << "send_heartbeat degraded" << dendl; @@ -506,7 +506,7 @@ void MDBalancer::prep_rebalance(int beat) } else { int cluster_size = mds->get_mds_map()->get_num_in_mds(); mds_rank_t whoami = mds->get_nodeid(); - rebalance_time = ceph_clock_now(g_ceph_context); + rebalance_time = ceph_clock_now(); // reset my_targets.clear(); @@ -533,7 +533,7 @@ void MDBalancer::prep_rebalance(int beat) double total_load = 0.0; multimap load_map; for (mds_rank_t i=mds_rank_t(0); i < mds_rank_t(cluster_size); i++) { - map::value_type val(i, mds_load_t(ceph_clock_now(g_ceph_context))); + map::value_type val(i, mds_load_t(ceph_clock_now())); std::pair < map::iterator, bool > r(mds_load.insert(val)); mds_load_t &load(r.first->second); @@ -679,7 +679,7 @@ int MDBalancer::mantle_prep_rebalance() /* prepare for balancing */ int cluster_size = mds->get_mds_map()->get_num_in_mds(); - rebalance_time = ceph_clock_now(g_ceph_context); + rebalance_time = ceph_clock_now(); my_targets.clear(); imported.clear(); exported.clear(); @@ -690,7 +690,7 @@ int MDBalancer::mantle_prep_rebalance() for (mds_rank_t i=mds_rank_t(0); i < mds_rank_t(cluster_size); i++) { - map::value_type val(i, mds_load_t(ceph_clock_now(g_ceph_context))); + map::value_type val(i, mds_load_t(ceph_clock_now())); std::pair < map::iterator, bool > r(mds_load.insert(val)); mds_load_t &load(r.first->second); diff --git a/src/mds/MDCache.cc b/src/mds/MDCache.cc index 9cff6e9c5444f..e33424afd56fe 100644 --- a/src/mds/MDCache.cc +++ b/src/mds/MDCache.cc @@ -364,7 +364,7 @@ void MDCache::create_unlinked_system_inode(CInode *in, inodeno_t ino, in->inode.size = 0; in->inode.ctime = in->inode.mtime = - in->inode.btime = ceph_clock_now(g_ceph_context); + in->inode.btime = ceph_clock_now(); in->inode.nlink = 1; in->inode.truncate_size = -1ull; in->inode.change_attr = 0; @@ -859,7 +859,7 @@ void MDCache::adjust_subtree_auth(CDir *dir, mds_authority_t auth, bool do_eval) // adjust recursive pop counters if (dir->is_auth()) { - utime_t now = ceph_clock_now(g_ceph_context); + utime_t now = ceph_clock_now(); CDir *p = dir->get_parent_dir(); while (p) { p->pop_auth_subtree.sub(now, decayrate, dir->pop_auth_subtree); @@ -936,7 +936,7 @@ void MDCache::try_subtree_merge_at(CDir *dir, bool do_eval) // adjust popularity? if (dir->is_auth()) { - utime_t now = ceph_clock_now(g_ceph_context); + utime_t now = ceph_clock_now(); CDir *p = dir->get_parent_dir(); while (p) { p->pop_auth_subtree.add(now, decayrate, dir->pop_auth_subtree); @@ -2107,7 +2107,7 @@ void MDCache::predirty_journal_parents(MutationRef mut, EMetaBlob *blob, // make sure stamp is set if (mut->get_mds_stamp() == utime_t()) - mut->set_mds_stamp(ceph_clock_now(g_ceph_context)); + mut->set_mds_stamp(ceph_clock_now()); if (in->is_base()) return; @@ -5677,7 +5677,7 @@ void MDCache::do_cap_import(Session *session, CInode *in, Capability *cap, if (cap->get_last_seq() == 0) // reconnected cap cap->inc_last_seq(); cap->set_last_issue(); - cap->set_last_issue_stamp(ceph_clock_now(g_ceph_context)); + cap->set_last_issue_stamp(ceph_clock_now()); cap->clear_new(); MClientCaps *reap = new MClientCaps(CEPH_CAP_OP_IMPORT, in->ino(), @@ -6745,7 +6745,7 @@ bool MDCache::trim_inode(CDentry *dn, CInode *in, CDir *con, maplogger->inc("outt"); else { mds->logger->inc("outut"); - mds->logger->fset("oututl", ceph_clock_now(g_ceph_context) - in->hack_load_stamp); + mds->logger->fset("oututl", ceph_clock_now() - in->hack_load_stamp); } } */ @@ -7326,7 +7326,7 @@ void MDCache::dentry_remove_replica(CDentry *dn, mds_rank_t from, set g_conf->mds_cache_size) { float ratio = (float)g_conf->mds_cache_size * .9 / (float)num_inodes_with_caps; if (ratio < 1.0) { - last_recall_state = ceph_clock_now(g_ceph_context); + last_recall_state = ceph_clock_now(); mds->server->recall_client_state(ratio); } } @@ -7417,7 +7417,7 @@ public: void MDCache::shutdown_check() { - dout(0) << "shutdown_check at " << ceph_clock_now(g_ceph_context) << dendl; + dout(0) << "shutdown_check at " << ceph_clock_now() << dendl; // cache char old_val[32] = { 0 }; @@ -8967,7 +8967,7 @@ MDRequestRef MDCache::request_start_internal(int op) MDRequestImpl::Params params; params.reqid.name = entity_name_t::MDS(mds->get_nodeid()); params.reqid.tid = mds->issue_tid(); - params.initiated = ceph_clock_now(g_ceph_context); + params.initiated = ceph_clock_now(); params.internal_op = op; MDRequestRef mdr = mds->op_tracker.create_request(params); @@ -10778,7 +10778,7 @@ void MDCache::split_dir(CDir *dir, int bits) info.mdr = mdr; info.dirs.push_back(dir); info.bits = bits; - info.last_cum_auth_pins_change = ceph_clock_now(g_ceph_context); + info.last_cum_auth_pins_change = ceph_clock_now(); fragment_freeze_dirs(dirs); // initial mark+complete pass @@ -10816,7 +10816,7 @@ void MDCache::merge_dir(CInode *diri, frag_t frag) info.mdr = mdr; info.dirs = dirs; info.bits = -bits; - info.last_cum_auth_pins_change = ceph_clock_now(g_ceph_context); + info.last_cum_auth_pins_change = ceph_clock_now(); fragment_freeze_dirs(dirs); // initial mark+complete pass @@ -10990,7 +10990,7 @@ void MDCache::find_stale_fragment_freeze() { dout(10) << "find_stale_fragment_freeze" << dendl; // see comment in Migrator::find_stale_export_freeze() - utime_t now = ceph_clock_now(g_ceph_context); + utime_t now = ceph_clock_now(); utime_t cutoff = now; cutoff -= g_conf->mds_freeze_tree_timeout; @@ -11329,7 +11329,7 @@ void MDCache::_fragment_committed(dirfrag_t basedirfrag, list& resultfrag op.remove(); } mds->objecter->mutate(oid, oloc, op, nullsnapc, - ceph::real_clock::now(g_ceph_context), + ceph::real_clock::now(), 0, NULL, gather.new_sub()); } diff --git a/src/mds/MDLog.cc b/src/mds/MDLog.cc index b0b0f794afb45..267da464d22c5 100644 --- a/src/mds/MDLog.cc +++ b/src/mds/MDLog.cc @@ -295,7 +295,7 @@ void MDLog::_submit_entry(LogEvent *le, MDSLogContextBase *c) le->_segment = ls; le->update_segment(); - le->set_stamp(ceph_clock_now(g_ceph_context)); + le->set_stamp(ceph_clock_now()); mdsmap_up_features = mds->mdsmap->get_up_features(); pending_events[ls->seq].push_back(PendingEvent(le, c)); @@ -615,7 +615,7 @@ void MDLog::trim(int m) } // hack: only trim for a few seconds at a time - utime_t stop = ceph_clock_now(g_ceph_context); + utime_t stop = ceph_clock_now(); stop += 2.0; map::iterator p = segments.begin(); @@ -624,7 +624,7 @@ void MDLog::trim(int m) num_events - expiring_events - expired_events > max_events) || (segments.size() - expiring_segments.size() - expired_segments.size() > max_segments))) { - if (stop < ceph_clock_now(g_ceph_context)) + if (stop < ceph_clock_now()) break; int num_expiring_segments = (int)expiring_segments.size(); diff --git a/src/mds/MDSRank.cc b/src/mds/MDSRank.cc index 7a51ed02ad0a4..79bc25ba22653 100644 --- a/src/mds/MDSRank.cc +++ b/src/mds/MDSRank.cc @@ -187,7 +187,7 @@ void MDSRankDispatcher::tick() } // log - utime_t now = ceph_clock_now(g_ceph_context); + utime_t now = ceph_clock_now(); mds_load_t load = balancer->get_load(now); if (logger) { @@ -465,7 +465,7 @@ bool MDSRank::_dispatch(Message *m, bool new_msg) // hack: thrash exports static utime_t start; - utime_t now = ceph_clock_now(g_ceph_context); + utime_t now = ceph_clock_now(); if (start == utime_t()) start = now; /*double el = now - start; diff --git a/src/mds/MDSTable.cc b/src/mds/MDSTable.cc index c8adddc16ed5b..9f9515adf13bc 100644 --- a/src/mds/MDSTable.cc +++ b/src/mds/MDSTable.cc @@ -81,7 +81,7 @@ void MDSTable::save(MDSInternalContextBase *onfinish, version_t v) object_locator_t oloc(mds->mdsmap->get_metadata_pool()); mds->objecter->write_full(oid, oloc, snapc, - bl, ceph::real_clock::now(g_ceph_context), 0, + bl, ceph::real_clock::now(), 0, NULL, new C_OnFinisher(new C_IO_MT_Save(this, version), mds->finisher)); diff --git a/src/mds/Migrator.cc b/src/mds/Migrator.cc index 1173d3550080e..0008ef06c858f 100644 --- a/src/mds/Migrator.cc +++ b/src/mds/Migrator.cc @@ -211,7 +211,7 @@ void Migrator::export_empty_import(CDir *dir) void Migrator::find_stale_export_freeze() { - utime_t now = ceph_clock_now(g_ceph_context); + utime_t now = ceph_clock_now(); utime_t cutoff = now; cutoff -= g_conf->mds_freeze_tree_timeout; @@ -845,7 +845,7 @@ void Migrator::dispatch_export_dir(MDRequestRef& mdr) mds->send_message_mds(discover, it->second.peer); assert(g_conf->mds_kill_export_at != 2); - it->second.last_cum_auth_pins_change = ceph_clock_now(g_ceph_context); + it->second.last_cum_auth_pins_change = ceph_clock_now(); // start the freeze, but hold it up with an auth_pin. dir->freeze_tree(); @@ -1236,7 +1236,7 @@ void Migrator::export_go_synced(CDir *dir, uint64_t tid) cache->adjust_subtree_auth(dir, mds->get_nodeid(), it->second.peer); // take away the popularity we're sending. - utime_t now = ceph_clock_now(g_ceph_context); + utime_t now = ceph_clock_now(); mds->balancer->subtract_export(dir, now); // fill export message with cache data @@ -1807,7 +1807,7 @@ void Migrator::export_finish(CDir *dir) // finish export (adjust local cache state) int num_dentries = 0; C_ContextsBase *fin = new C_ContextsBase(g_ceph_context); - finish_export_dir(dir, ceph_clock_now(g_ceph_context), it->second.peer, + finish_export_dir(dir, ceph_clock_now(), it->second.peer, it->second.peer_imported, fin->contexts, &num_dentries); dir->add_waiter(CDir::WAIT_UNFREEZE, fin); @@ -2225,7 +2225,7 @@ void Migrator::handle_export_dir(MExportDir *m) assert(it->second.state == IMPORT_PREPPED); assert(it->second.tid == m->get_tid()); - utime_t now = ceph_clock_now(g_ceph_context); + utime_t now = ceph_clock_now(); mds_rank_t oldauth = mds_rank_t(m->get_source().num()); dout(7) << "handle_export_dir importing " << *dir << " from " << oldauth << dendl; assert(dir->is_auth() == false); diff --git a/src/mds/Server.cc b/src/mds/Server.cc index 2f29757b46776..8610f939a5862 100644 --- a/src/mds/Server.cc +++ b/src/mds/Server.cc @@ -618,7 +618,7 @@ void Server::find_idle_sessions() // timeout/stale // (caps go stale, lease die) - utime_t now = ceph_clock_now(g_ceph_context); + utime_t now = ceph_clock_now(); utime_t cutoff = now; cutoff -= g_conf->mds_session_timeout; while (1) { @@ -746,7 +746,7 @@ void Server::reconnect_clients(MDSInternalContext *reconnect_done_) // clients will get the mdsmap and discover we're reconnecting via the monitor. - reconnect_start = ceph_clock_now(g_ceph_context); + reconnect_start = ceph_clock_now(); dout(1) << "reconnect_clients -- " << client_reconnect_gather.size() << " sessions" << dendl; mds->sessionmap.dump(); } @@ -765,7 +765,7 @@ void Server::handle_client_reconnect(MClientReconnect *m) return; } - utime_t delay = ceph_clock_now(g_ceph_context); + utime_t delay = ceph_clock_now(); delay -= reconnect_start; dout(10) << " reconnect_start " << reconnect_start << " delay " << delay << dendl; @@ -881,7 +881,7 @@ void Server::reconnect_tick() { utime_t reconnect_end = reconnect_start; reconnect_end += g_conf->mds_reconnect_timeout; - if (ceph_clock_now(g_ceph_context) >= reconnect_end && + if (ceph_clock_now() >= reconnect_end && !client_reconnect_gather.empty()) { dout(10) << "reconnect timed out" << dendl; for (set::iterator p = client_reconnect_gather.begin(); @@ -1096,7 +1096,7 @@ void Server::early_reply(MDRequestRef& mdr, CInode *tracei, CDentry *tracedn) mdr->did_early_reply = true; mds->logger->inc(l_mds_reply); - utime_t lat = ceph_clock_now(g_ceph_context) - req->get_recv_stamp(); + utime_t lat = ceph_clock_now() - req->get_recv_stamp(); mds->logger->tinc(l_mds_reply_latency, lat); dout(20) << "lat " << lat << dendl; @@ -1151,7 +1151,7 @@ void Server::reply_client_request(MDRequestRef& mdr, MClientReply *reply) if (!did_early_reply && !is_replay) { mds->logger->inc(l_mds_reply); - utime_t lat = ceph_clock_now(g_ceph_context) - mdr->client_request->get_recv_stamp(); + utime_t lat = ceph_clock_now() - mdr->client_request->get_recv_stamp(); mds->logger->tinc(l_mds_reply_latency, lat); dout(20) << "lat " << lat << dendl; @@ -1263,7 +1263,7 @@ void Server::set_trace_dist(Session *session, MClientReply *reply, bufferlist bl; mds_rank_t whoami = mds->get_nodeid(); client_t client = session->get_client(); - utime_t now = ceph_clock_now(g_ceph_context); + utime_t now = ceph_clock_now(); dout(20) << "set_trace_dist snapid " << snapid << dendl; @@ -2754,7 +2754,7 @@ void Server::handle_client_getattr(MDRequestRef& mdr, bool is_lookup) // value for them. (currently this matters for xattrs and inline data) mdr->getattr_caps = mask; - mds->balancer->hit_inode(ceph_clock_now(g_ceph_context), ref, META_POP_IRD, + mds->balancer->hit_inode(ceph_clock_now(), ref, META_POP_IRD, req->get_source().num()); // reply @@ -3373,7 +3373,7 @@ void Server::handle_client_readdir(MDRequestRef& mdr) dir->verify_fragstat(); #endif - utime_t now = ceph_clock_now(NULL); + utime_t now = ceph_clock_now(); mdr->set_mds_stamp(now); snapid_t snapid = mdr->snapid; @@ -5073,7 +5073,7 @@ void Server::_link_remote(MDRequestRef& mdr, bool inc, CDentry *dn, CInode *targ assert(g_conf->mds_kill_link_at != 2); - mdr->set_mds_stamp(ceph_clock_now(NULL)); + mdr->set_mds_stamp(ceph_clock_now()); // add to event mdr->ls = mdlog->get_current_segment(); @@ -6495,7 +6495,7 @@ void Server::handle_client_rename(MDRequestRef& mdr) assert(g_conf->mds_kill_rename_at != 4); // -- declare now -- - mdr->set_mds_stamp(ceph_clock_now(g_ceph_context)); + mdr->set_mds_stamp(ceph_clock_now()); // -- prepare journal entry -- mdr->ls = mdlog->get_current_segment(); diff --git a/src/mds/SessionMap.cc b/src/mds/SessionMap.cc index 8e732a1d679d4..e7dd17534ee68 100644 --- a/src/mds/SessionMap.cc +++ b/src/mds/SessionMap.cc @@ -424,9 +424,10 @@ void SessionMap::save(MDSInternalContextBase *onsave, version_t needv) null_sessions.clear(); mds->objecter->mutate(oid, oloc, op, snapc, - ceph::real_clock::now(g_ceph_context), - 0, NULL, new C_OnFinisher(new C_IO_SM_Save(this, version), - mds->finisher)); + ceph::real_clock::now(), + 0, NULL, + new C_OnFinisher(new C_IO_SM_Save(this, version), + mds->finisher)); } void SessionMap::_save_finish(version_t v) @@ -472,7 +473,7 @@ uint64_t SessionMap::set_state(Session *session, int s) { void SessionMapStore::decode_legacy(bufferlist::iterator& p) { - utime_t now = ceph_clock_now(g_ceph_context); + utime_t now = ceph_clock_now(); uint64_t pre; ::decode(pre, p); if (pre == (uint64_t)-1) { @@ -615,7 +616,7 @@ void SessionMap::touch_session(Session *session) new xlist).first; by_state_entry->second->push_back(&session->item_session_list); - session->last_cap_renew = ceph_clock_now(g_ceph_context); + session->last_cap_renew = ceph_clock_now(); } void SessionMap::_mark_dirty(Session *s) @@ -758,7 +759,7 @@ void SessionMap::save_if_dirty(const std::set &tgt_sessions, object_locator_t oloc(mds->mdsmap->get_metadata_pool()); MDSInternalContextBase *on_safe = gather_bld->new_sub(); mds->objecter->mutate(oid, oloc, op, snapc, - ceph::real_clock::now(g_ceph_context), + ceph::real_clock::now(), 0, NULL, new C_OnFinisher( new C_IO_SM_Save_One(this, on_safe), mds->finisher)); @@ -819,12 +820,12 @@ void Session::notify_recall_sent(int const new_limit) if (recalled_at.is_zero()) { // Entering recall phase, set up counters so we can later // judge whether the client has respected the recall request - recalled_at = last_recall_sent = ceph_clock_now(g_ceph_context); + recalled_at = last_recall_sent = ceph_clock_now(); assert (new_limit < caps.size()); // Behaviour of Server::recall_client_state recall_count = caps.size() - new_limit; recall_release_count = 0; } else { - last_recall_sent = ceph_clock_now(g_ceph_context); + last_recall_sent = ceph_clock_now(); } } diff --git a/src/mds/SessionMap.h b/src/mds/SessionMap.h index fbd488ea83cc3..d395571a21c78 100644 --- a/src/mds/SessionMap.h +++ b/src/mds/SessionMap.h @@ -413,7 +413,7 @@ public: } else { s = session_map[i.name] = new Session; s->info.inst = i; - s->last_cap_renew = ceph_clock_now(g_ceph_context); + s->last_cap_renew = ceph_clock_now(); if (logger) { logger->set(l_mdssm_session_count, session_map.size()); logger->inc(l_mdssm_session_add); diff --git a/src/mds/StrayManager.cc b/src/mds/StrayManager.cc index f5bba7b526fd1..d7f449b66bd12 100644 --- a/src/mds/StrayManager.cc +++ b/src/mds/StrayManager.cc @@ -118,7 +118,7 @@ void StrayManager::purge(CDentry *dn, uint32_t op_allowance) object_t oid = CInode::get_object_name(in->inode.ino, *p, ""); dout(10) << __func__ << " remove dirfrag " << oid << dendl; mds->objecter->remove(oid, oloc, nullsnapc, - ceph::real_clock::now(g_ceph_context), + ceph::real_clock::now(), 0, NULL, gather.new_sub()); } assert(gather.has_subs()); @@ -149,7 +149,7 @@ void StrayManager::purge(CDentry *dn, uint32_t op_allowance) dout(10) << __func__ << " 0~" << to << " objects 0~" << num << " snapc " << snapc << " on " << *in << dendl; filer.purge_range(in->inode.ino, &in->inode.layout, *snapc, - 0, num, ceph::real_clock::now(g_ceph_context), 0, + 0, num, ceph::real_clock::now(), 0, gather.new_sub()); } } @@ -162,7 +162,7 @@ void StrayManager::purge(CDentry *dn, uint32_t op_allowance) dout(10) << __func__ << " remove backtrace object " << oid << " pool " << oloc.pool << " snapc " << snapc << dendl; mds->objecter->remove(oid, oloc, *snapc, - ceph::real_clock::now(g_ceph_context), 0, + ceph::real_clock::now(), 0, NULL, gather.new_sub()); } // remove old backtrace objects @@ -173,7 +173,7 @@ void StrayManager::purge(CDentry *dn, uint32_t op_allowance) dout(10) << __func__ << " remove backtrace object " << oid << " old pool " << *p << " snapc " << snapc << dendl; mds->objecter->remove(oid, oloc, *snapc, - ceph::real_clock::now(g_ceph_context), 0, + ceph::real_clock::now(), 0, NULL, gather.new_sub()); } assert(gather.has_subs()); @@ -909,12 +909,12 @@ void StrayManager::truncate(CDentry *dn, uint32_t op_allowance) // keep backtrace object if (num > 1) { filer.purge_range(in->ino(), &in->inode.layout, *snapc, - 1, num - 1, ceph::real_clock::now(g_ceph_context), + 1, num - 1, ceph::real_clock::now(), 0, gather.new_sub()); } filer.zero(in->ino(), &in->inode.layout, *snapc, 0, in->inode.layout.object_size, - ceph::real_clock::now(g_ceph_context), + ceph::real_clock::now(), 0, true, NULL, gather.new_sub()); } diff --git a/src/mds/mdstypes.h b/src/mds/mdstypes.h index b3f4f14bf8abb..961fbf8d37809 100644 --- a/src/mds/mdstypes.h +++ b/src/mds/mdstypes.h @@ -1158,7 +1158,7 @@ inline void decode(dirfrag_load_vec_t& c, bufferlist::iterator &p) { inline std::ostream& operator<<(std::ostream& out, dirfrag_load_vec_t& dl) { // ugliness! - utime_t now = ceph_clock_now(g_ceph_context); + utime_t now = ceph_clock_now(); DecayRate rate(g_conf->mds_decay_halflife); return out << "[" << dl.vec[0].get(now, rate) << "," << dl.vec[1].get(now, rate) << " " << dl.meta_load(now, rate) @@ -1230,7 +1230,7 @@ public: DecayCounter count; public: - load_spread_t() : p(0), n(0), count(ceph_clock_now(g_ceph_context)) + load_spread_t() : p(0), n(0), count(ceph_clock_now()) { for (int i=0; ipacked.begin(); diff --git a/src/mon/DataHealthService.cc b/src/mon/DataHealthService.cc index 521adfdb0263b..67ce4d4f91414 100644 --- a/src/mon/DataHealthService.cc +++ b/src/mon/DataHealthService.cc @@ -139,7 +139,7 @@ int DataHealthService::update_store_stats(DataStats &ours) ours.store_stats.bytes_sst = extra["sst"]; ours.store_stats.bytes_log = extra["log"]; ours.store_stats.bytes_misc = extra["misc"]; - ours.last_update = ceph_clock_now(g_ceph_context); + ours.last_update = ceph_clock_now(); return 0; } @@ -159,7 +159,7 @@ int DataHealthService::update_stats() << " total " << prettybyte_t(ours.fs_stats.byte_total) << ", used " << prettybyte_t(ours.fs_stats.byte_used) << ", avail " << prettybyte_t(ours.fs_stats.byte_avail) << dendl; - ours.last_update = ceph_clock_now(g_ceph_context); + ours.last_update = ceph_clock_now(); return update_store_stats(ours); } diff --git a/src/mon/Elector.cc b/src/mon/Elector.cc index fbcfae683ec5f..d8e66e24d453c 100644 --- a/src/mon/Elector.cc +++ b/src/mon/Elector.cc @@ -86,7 +86,7 @@ void Elector::start() int r = mon->store->apply_transaction(t); assert(r >= 0); } - start_stamp = ceph_clock_now(g_ceph_context); + start_stamp = ceph_clock_now(); electing_me = true; acked_me[mon->rank].cluster_features = CEPH_FEATURES_ALL; acked_me[mon->rank].mon_features = ceph::features::mon::get_supported(); @@ -117,7 +117,7 @@ void Elector::defer(int who) // ack them leader_acked = who; - ack_stamp = ceph_clock_now(g_ceph_context); + ack_stamp = ceph_clock_now(); MMonElection *m = new MMonElection(MMonElection::OP_ACK, epoch, mon->monmap); m->mon_features = ceph::features::mon::get_supported(); m->sharing_bl = mon->get_supported_commands_bl(); diff --git a/src/mon/LogMonitor.cc b/src/mon/LogMonitor.cc index c195c4f71ac5d..025e6ec4abadc 100644 --- a/src/mon/LogMonitor.cc +++ b/src/mon/LogMonitor.cc @@ -63,7 +63,7 @@ void LogMonitor::create_initial() dout(10) << "create_initial -- creating initial map" << dendl; LogEntry e; memset(&e.who, 0, sizeof(e.who)); - e.stamp = ceph_clock_now(g_ceph_context); + e.stamp = ceph_clock_now(); e.prio = CLOG_INFO; std::stringstream ss; ss << "mkfs " << mon->monmap->get_fsid(); @@ -553,7 +553,7 @@ void LogMonitor::_create_sub_incremental(MLog *mlog, int level, version_t sv) dout(10) << __func__ << " skipped from " << sv << " to first_committed " << get_first_committed() << dendl; LogEntry le; - le.stamp = ceph_clock_now(NULL); + le.stamp = ceph_clock_now(); le.prio = CLOG_WARN; ostringstream ss; ss << "skipped log messages from " << sv << " to " << get_first_committed(); diff --git a/src/mon/MDSMonitor.cc b/src/mon/MDSMonitor.cc index ff8ce954ce81f..9015405ee2a9e 100644 --- a/src/mon/MDSMonitor.cc +++ b/src/mon/MDSMonitor.cc @@ -96,8 +96,8 @@ void MDSMonitor::create_new_fs(FSMap &fsm, const std::string &name, fs->mds_map.cas_pool = -1; fs->mds_map.max_file_size = g_conf->mds_max_file_size; fs->mds_map.compat = fsm.compat; - fs->mds_map.created = ceph_clock_now(g_ceph_context); - fs->mds_map.modified = ceph_clock_now(g_ceph_context); + fs->mds_map.created = ceph_clock_now(); + fs->mds_map.modified = ceph_clock_now(); fs->mds_map.session_timeout = g_conf->mds_session_timeout; fs->mds_map.session_autoclose = g_conf->mds_session_autoclose; fs->mds_map.enabled = true; @@ -189,7 +189,7 @@ void MDSMonitor::encode_pending(MonitorDBStore::TransactionRef t) // Set 'modified' on maps modified this epoch for (auto &i : fsmap.filesystems) { if (i.second->mds_map.epoch == fsmap.epoch) { - i.second->mds_map.modified = ceph_clock_now(g_ceph_context); + i.second->mds_map.modified = ceph_clock_now(); } } @@ -285,7 +285,7 @@ void MDSMonitor::_note_beacon(MMDSBeacon *m) version_t seq = m->get_seq(); dout(15) << "_note_beacon " << *m << " noting time" << dendl; - last_beacon[gid].stamp = ceph_clock_now(g_ceph_context); + last_beacon[gid].stamp = ceph_clock_now(); last_beacon[gid].seq = seq; } @@ -549,7 +549,7 @@ bool MDSMonitor::prepare_beacon(MonOpRequestRef op) } // initialize the beacon timer - last_beacon[gid].stamp = ceph_clock_now(g_ceph_context); + last_beacon[gid].stamp = ceph_clock_now(); last_beacon[gid].seq = seq; // new incompat? @@ -621,7 +621,7 @@ bool MDSMonitor::prepare_beacon(MonOpRequestRef op) dout(4) << __func__ << ": marking rank " << info.rank << " damaged" << dendl; - utime_t until = ceph_clock_now(g_ceph_context); + utime_t until = ceph_clock_now(); until += g_conf->mds_blacklist_interval; const auto blacklist_epoch = mon->osdmon()->blacklist(info.addr, until); request_proposal(mon->osdmon()); @@ -1262,7 +1262,7 @@ bool MDSMonitor::fail_mds_gid(mds_gid_t gid) epoch_t blacklist_epoch = 0; if (info.rank >= 0 && info.state != MDSMap::STATE_STANDBY_REPLAY) { - utime_t until = ceph_clock_now(g_ceph_context); + utime_t until = ceph_clock_now(); until += g_conf->mds_blacklist_interval; blacklist_epoch = mon->osdmon()->blacklist(info.addr, until); } @@ -1749,8 +1749,8 @@ int MDSMonitor::management_command( new_fs->mds_map.fs_name = fs->mds_map.fs_name; new_fs->mds_map.max_file_size = g_conf->mds_max_file_size; new_fs->mds_map.compat = fsmap.compat; - new_fs->mds_map.created = ceph_clock_now(g_ceph_context); - new_fs->mds_map.modified = ceph_clock_now(g_ceph_context); + new_fs->mds_map.created = ceph_clock_now(); + new_fs->mds_map.modified = ceph_clock_now(); new_fs->mds_map.session_timeout = g_conf->mds_session_timeout; new_fs->mds_map.session_autoclose = g_conf->mds_session_autoclose; new_fs->mds_map.enabled = true; @@ -2843,7 +2843,7 @@ void MDSMonitor::maybe_replace_gid(mds_gid_t gid, << " " << ceph_mds_state_name(info.state) << " laggy" << dendl; pending_fsmap.modify_daemon(info.global_id, [](MDSMap::mds_info_t *info) { - info->laggy_since = ceph_clock_now(g_ceph_context); + info->laggy_since = ceph_clock_now(); }); *mds_propose = true; } @@ -2973,7 +2973,7 @@ void MDSMonitor::tick() do_propose |= maybe_expand_cluster(i.second); } - const auto now = ceph_clock_now(g_ceph_context); + const auto now = ceph_clock_now(); if (last_tick.is_zero()) { last_tick = now; } diff --git a/src/mon/MgrMonitor.cc b/src/mon/MgrMonitor.cc index ec3aaec11b21c..5b681e861a80b 100644 --- a/src/mon/MgrMonitor.cc +++ b/src/mon/MgrMonitor.cc @@ -122,7 +122,7 @@ bool MgrMonitor::preprocess_beacon(MonOpRequestRef op) MMgrBeacon *m = static_cast(op->get_req()); dout(4) << "beacon from " << m->get_gid() << dendl; - last_beacon[m->get_gid()] = ceph_clock_now(g_ceph_context); + last_beacon[m->get_gid()] = ceph_clock_now(); if (pending_map.active_gid == m->get_gid() && pending_map.active_addr == m->get_server_addr() @@ -163,7 +163,7 @@ bool MgrMonitor::prepare_beacon(MonOpRequestRef op) } } - last_beacon[m->get_gid()] = ceph_clock_now(g_ceph_context); + last_beacon[m->get_gid()] = ceph_clock_now(); // Track whether we modified pending_map bool updated = false; @@ -276,7 +276,7 @@ void MgrMonitor::send_digests() void MgrMonitor::tick() { - const utime_t now = ceph_clock_now(g_ceph_context); + const utime_t now = ceph_clock_now(); utime_t cutoff = now; cutoff -= g_conf->mon_mgr_beacon_grace; diff --git a/src/mon/MonClient.cc b/src/mon/MonClient.cc index 1e6dcc122f3b2..3db182f6da283 100644 --- a/src/mon/MonClient.cc +++ b/src/mon/MonClient.cc @@ -135,7 +135,7 @@ int MonClient::get_monmap_privately() utime_t interval; interval.set_from_double(cct->_conf->mon_client_hunt_interval); - map_cond.WaitInterval(cct, monc_lock, interval); + map_cond.WaitInterval(monc_lock, interval); if (monmap.fsid.is_zero() && cur_con) { cur_con->mark_down(); // nope, clean that connection up @@ -448,7 +448,7 @@ int MonClient::authenticate(double timeout) if (cur_mon.empty()) _reopen_session(); - utime_t until = ceph_clock_now(cct); + utime_t until = ceph_clock_now(); until += timeout; if (timeout > 0.0) ldout(cct, 10) << "authenticate will time out at " << until << dendl; @@ -723,7 +723,7 @@ void MonClient::tick() _reopen_session(); } else if (!cur_mon.empty()) { // just renew as needed - utime_t now = ceph_clock_now(cct); + utime_t now = ceph_clock_now(); if (!cur_con->has_feature(CEPH_FEATURE_MON_STATEFUL_SUB)) { ldout(cct, 10) << "renew subs? (now: " << now << "; renew after: " << sub_renew_after << ") -- " @@ -786,7 +786,7 @@ void MonClient::_renew_subs() _reopen_session(); else { if (sub_renew_sent == utime_t()) - sub_renew_sent = ceph_clock_now(cct); + sub_renew_sent = ceph_clock_now(); MMonSubscribe *m = new MMonSubscribe; m->what = sub_new; @@ -847,7 +847,7 @@ int MonClient::_check_auth_rotating() return 0; } - utime_t now = ceph_clock_now(cct); + utime_t now = ceph_clock_now(); utime_t cutoff = now; cutoff -= MIN(30.0, cct->_conf->auth_service_ticket_ttl / 4.0); utime_t issued_at_lower_bound = now; @@ -885,7 +885,7 @@ int MonClient::_check_auth_rotating() int MonClient::wait_auth_rotating(double timeout) { Mutex::Locker l(monc_lock); - utime_t now = ceph_clock_now(cct); + utime_t now = ceph_clock_now(); utime_t until = now; until += timeout; @@ -903,7 +903,7 @@ int MonClient::wait_auth_rotating(double timeout) } ldout(cct, 10) << "wait_auth_rotating waiting (until " << until << ")" << dendl; auth_cond.WaitUntil(monc_lock, until); - now = ceph_clock_now(cct); + now = ceph_clock_now(); } ldout(cct, 10) << "wait_auth_rotating done" << dendl; return 0; diff --git a/src/mon/MonClient.h b/src/mon/MonClient.h index b67217870ec6b..d2a29953ff4c5 100644 --- a/src/mon/MonClient.h +++ b/src/mon/MonClient.h @@ -62,7 +62,7 @@ struct MonClientPinger : public Dispatcher { { } int wait_for_reply(double timeout = 0.0) { - utime_t until = ceph_clock_now(cct); + utime_t until = ceph_clock_now(); until += (timeout > 0 ? timeout : cct->_conf->client_mount_timeout); done = false; diff --git a/src/mon/MonMap.cc b/src/mon/MonMap.cc index d388424c9ab78..836c6901c9cb6 100644 --- a/src/mon/MonMap.cc +++ b/src/mon/MonMap.cc @@ -462,7 +462,7 @@ int MonMap::build_initial(CephContext *cct, ostream& errout) << std::endl; return r; } - created = ceph_clock_now(cct); + created = ceph_clock_now(); last_changed = created; return 0; } @@ -548,7 +548,7 @@ int MonMap::build_initial(CephContext *cct, ostream& errout) errout << "no monitors specified to connect to." << std::endl; return -ENOENT; } - created = ceph_clock_now(cct); + created = ceph_clock_now(); last_changed = created; return 0; } diff --git a/src/mon/Monitor.cc b/src/mon/Monitor.cc index 4b76b9efc0bf9..1dcdd622710b3 100644 --- a/src/mon/Monitor.cc +++ b/src/mon/Monitor.cc @@ -217,7 +217,7 @@ Monitor::Monitor(CephContext* cct_, string nm, MonitorDBStore *s, bool r = mon_caps->parse("allow *", NULL); assert(r); - exited_quorum = ceph_clock_now(g_ceph_context); + exited_quorum = ceph_clock_now(); // assume our commands until we have an election. this only means // we won't reply with EINVAL before the election; any command that @@ -1218,7 +1218,7 @@ void Monitor::_reset() leader_since = utime_t(); if (!quorum.empty()) { - exited_quorum = ceph_clock_now(g_ceph_context); + exited_quorum = ceph_clock_now(); } quorum.clear(); outside_quorum.clear(); @@ -1702,7 +1702,7 @@ void Monitor::sync_trim_providers() { dout(20) << __func__ << dendl; - utime_t now = ceph_clock_now(g_ceph_context); + utime_t now = ceph_clock_now(); map::iterator p = sync_providers.begin(); while (p != sync_providers.end()) { if (now > p->second.timeout) { @@ -2053,7 +2053,7 @@ void Monitor::win_election(epoch_t epoch, set& active, uint64_t features, << dendl; assert(is_electing()); state = STATE_LEADER; - leader_since = ceph_clock_now(g_ceph_context); + leader_since = ceph_clock_now(); leader = rank; quorum = active; quorum_con_features = features; @@ -2398,7 +2398,7 @@ void Monitor::health_tick_stop() utime_t Monitor::health_interval_calc_next_update() { - utime_t now = ceph_clock_now(cct); + utime_t now = ceph_clock_now(); time_t secs = now.sec(); int remainder = secs % cct->_conf->mon_health_to_clog_interval; @@ -3068,9 +3068,9 @@ void Monitor::handle_command(MonOpRequestRef op) if (prefix == "compact" || prefix == "mon compact") { dout(1) << "triggering manual compaction" << dendl; - utime_t start = ceph_clock_now(g_ceph_context); + utime_t start = ceph_clock_now(); store->compact(); - utime_t end = ceph_clock_now(g_ceph_context); + utime_t end = ceph_clock_now(); end -= start; dout(1) << "finished manual compaction in " << end << " seconds" << dendl; ostringstream oss; @@ -3159,7 +3159,7 @@ void Monitor::handle_command(MonOpRequestRef op) f->dump_stream("cluster_fingerprint") << fingerprint; f->dump_string("version", ceph_version_to_str()); f->dump_string("commit", git_version_to_str()); - f->dump_stream("timestamp") << ceph_clock_now(NULL); + f->dump_stream("timestamp") << ceph_clock_now(); vector tagsvec; cmd_getval(g_ceph_context, cmdmap, "tags", tagsvec); @@ -3706,7 +3706,7 @@ void Monitor::waitlist_or_zap_client(MonOpRequestRef op) Message *m = op->get_req(); MonSession *s = op->get_session(); ConnectionRef con = op->get_connection(); - utime_t too_old = ceph_clock_now(g_ceph_context); + utime_t too_old = ceph_clock_now(); too_old -= g_ceph_context->_conf->mon_lease; if (m->get_recv_stamp() > too_old && con->is_connected()) { @@ -3776,7 +3776,7 @@ void Monitor::_ms_dispatch(Message *m) assert(s); - s->session_timeout = ceph_clock_now(NULL); + s->session_timeout = ceph_clock_now(); s->session_timeout += g_conf->mon_session_timeout; if (s->auth_handler) { @@ -4091,7 +4091,7 @@ void Monitor::timecheck_start_round() if (timecheck_round % 2) { dout(10) << __func__ << " there's a timecheck going on" << dendl; - utime_t curr_time = ceph_clock_now(g_ceph_context); + utime_t curr_time = ceph_clock_now(); double max = g_conf->mon_timecheck_interval*3; if (curr_time - timecheck_round_start < max) { dout(10) << __func__ << " keep current round going" << dendl; @@ -4106,7 +4106,7 @@ void Monitor::timecheck_start_round() assert(timecheck_round % 2 == 0); timecheck_acks = 0; timecheck_round ++; - timecheck_round_start = ceph_clock_now(g_ceph_context); + timecheck_round_start = ceph_clock_now(); dout(10) << __func__ << " new " << timecheck_round << dendl; timecheck(); @@ -4290,7 +4290,7 @@ void Monitor::timecheck() continue; entity_inst_t inst = monmap->get_inst(*it); - utime_t curr_time = ceph_clock_now(g_ceph_context); + utime_t curr_time = ceph_clock_now(); timecheck_waiting[inst] = curr_time; MTimeCheck *m = new MTimeCheck(MTimeCheck::OP_PING); m->epoch = get_epoch(); @@ -4341,7 +4341,7 @@ void Monitor::handle_timecheck_leader(MonOpRequestRef op) return; } - utime_t curr_time = ceph_clock_now(g_ceph_context); + utime_t curr_time = ceph_clock_now(); assert(timecheck_waiting.count(other) > 0); utime_t timecheck_sent = timecheck_waiting[other]; @@ -4469,7 +4469,7 @@ void Monitor::handle_timecheck_peon(MonOpRequestRef op) assert((timecheck_round % 2) != 0); MTimeCheck *reply = new MTimeCheck(MTimeCheck::OP_PONG); - utime_t curr_time = ceph_clock_now(g_ceph_context); + utime_t curr_time = ceph_clock_now(); reply->timestamp = curr_time; reply->epoch = m->epoch; reply->round = m->round; @@ -5085,7 +5085,7 @@ void Monitor::tick() } // trim sessions - utime_t now = ceph_clock_now(g_ceph_context); + utime_t now = ceph_clock_now(); xlist::iterator p = session_map.sessions.begin(); bool out_for_too_long = (!exited_quorum.is_zero() diff --git a/src/mon/Monitor.h b/src/mon/Monitor.h index 021ecc3ce39c5..81701dd1cde36 100644 --- a/src/mon/Monitor.h +++ b/src/mon/Monitor.h @@ -287,7 +287,7 @@ private: SyncProvider() : cookie(0), last_committed(0), full(false) {} void reset_timeout(CephContext *cct, int grace) { - timeout = ceph_clock_now(cct); + timeout = ceph_clock_now(); timeout += grace; } }; diff --git a/src/mon/MonmapMonitor.cc b/src/mon/MonmapMonitor.cc index c4512dc7dc234..4f5bdd463b11d 100644 --- a/src/mon/MonmapMonitor.cc +++ b/src/mon/MonmapMonitor.cc @@ -78,7 +78,7 @@ void MonmapMonitor::create_pending() { pending_map = *mon->monmap; pending_map.epoch++; - pending_map.last_changed = ceph_clock_now(g_ceph_context); + pending_map.last_changed = ceph_clock_now(); dout(10) << "create_pending monmap epoch " << pending_map.epoch << dendl; } @@ -455,7 +455,7 @@ bool MonmapMonitor::prepare_command(MonOpRequestRef op) */ pending_map.add(name, addr); - pending_map.last_changed = ceph_clock_now(g_ceph_context); + pending_map.last_changed = ceph_clock_now(); ss << "adding mon." << name << " at " << addr; propose = true; dout(0) << __func__ << " proposing new mon." << name << dendl; @@ -507,7 +507,7 @@ bool MonmapMonitor::prepare_command(MonOpRequestRef op) entity_addr_t addr = pending_map.get_addr(name); pending_map.remove(name); - pending_map.last_changed = ceph_clock_now(g_ceph_context); + pending_map.last_changed = ceph_clock_now(); ss << "removing mon." << name << " at " << addr << ", there will be " << pending_map.size() << " monitors" ; propose = true; @@ -556,7 +556,7 @@ bool MonmapMonitor::prepare_join(MonOpRequestRef op) if (pending_map.contains(join->addr)) pending_map.remove(pending_map.get_name(join->addr)); pending_map.add(join->name, join->addr); - pending_map.last_changed = ceph_clock_now(g_ceph_context); + pending_map.last_changed = ceph_clock_now(); return true; } diff --git a/src/mon/OSDMonitor.cc b/src/mon/OSDMonitor.cc index 22732e2ef265e..e64554caeefa5 100644 --- a/src/mon/OSDMonitor.cc +++ b/src/mon/OSDMonitor.cc @@ -123,7 +123,7 @@ void OSDMonitor::create_initial() g_conf->osd_pg_bits, g_conf->osd_pgp_bits); } newmap.set_epoch(1); - newmap.created = newmap.modified = ceph_clock_now(g_ceph_context); + newmap.created = newmap.modified = ceph_clock_now(); // new clusters should sort bitwise by default. newmap.set_flag(CEPH_OSDMAP_SORTBITWISE); @@ -296,7 +296,7 @@ void OSDMonitor::update_from_paxos(bool *need_bootstrap) // populate down -> out map if (found == down_pending_out.end()) { dout(10) << " adding osd." << o << " to down_pending_out map" << dendl; - down_pending_out[o] = ceph_clock_now(g_ceph_context); + down_pending_out[o] = ceph_clock_now(); } } else { if (found != down_pending_out.end()) { @@ -1092,7 +1092,7 @@ void OSDMonitor::maybe_prime_pg_temp() PGMap *pg_map = &mon->pgmon()->pg_map; - utime_t stop = ceph_clock_now(NULL); + utime_t stop = ceph_clock_now(); stop += g_conf->mon_osd_prime_pg_temp_max_time; int chunk = 1000; int n = chunk; @@ -1105,7 +1105,7 @@ void OSDMonitor::maybe_prime_pg_temp() prime_pg_temp(next, pp); if (--n <= 0) { n = chunk; - if (ceph_clock_now(NULL) > stop) { + if (ceph_clock_now() > stop) { dout(10) << __func__ << " consumed more than " << g_conf->mon_osd_prime_pg_temp_max_time << " seconds, stopping" @@ -1120,7 +1120,7 @@ void OSDMonitor::maybe_prime_pg_temp() n -= prime_pg_temp(next, pg_map, *p); if (n <= 0) { n = chunk; - if (ceph_clock_now(NULL) > stop) { + if (ceph_clock_now() > stop) { dout(10) << __func__ << " consumed more than " << g_conf->mon_osd_prime_pg_temp_max_time << " seconds, stopping" @@ -1193,7 +1193,7 @@ void OSDMonitor::encode_pending(MonitorDBStore::TransactionRef t) << dendl; // finalize up pending_inc - pending_inc.modified = ceph_clock_now(g_ceph_context); + pending_inc.modified = ceph_clock_now(); int r = pending_inc.propagate_snaps_to_tiers(g_ceph_context, osdmap); assert(r == 0); @@ -1877,7 +1877,7 @@ bool OSDMonitor::prepare_failure(MonOpRequestRef op) assert(osdmap.get_addr(target_osd) == m->get_target().addr); // calculate failure time - utime_t now = ceph_clock_now(g_ceph_context); + utime_t now = ceph_clock_now(); utime_t failed_since = m->get_recv_stamp() - utime_t(m->failed_for ? m->failed_for : g_conf->osd_heartbeat_grace, 0); @@ -2245,7 +2245,7 @@ bool OSDMonitor::prepare_boot(MonOpRequestRef op) dout(10) << " not laggy, new xi " << xi << dendl; } else { if (xi.down_stamp.sec()) { - int interval = ceph_clock_now(g_ceph_context).sec() - + int interval = ceph_clock_now().sec() - xi.down_stamp.sec(); if (g_conf->mon_osd_laggy_max_interval && (interval > g_conf->mon_osd_laggy_max_interval)) { @@ -2810,7 +2810,7 @@ void OSDMonitor::tick() if (!mon->is_leader()) return; bool do_propose = false; - utime_t now = ceph_clock_now(g_ceph_context); + utime_t now = ceph_clock_now(); // mark osds down? if (check_failures(now)) @@ -7047,7 +7047,7 @@ done: string blacklistop; cmd_getval(g_ceph_context, cmdmap, "blacklistop", blacklistop); if (blacklistop == "add") { - utime_t expires = ceph_clock_now(g_ceph_context); + utime_t expires = ceph_clock_now(); double d; // default one hour cmd_getval(g_ceph_context, cmdmap, "expire", d, double(60*60)); @@ -7108,7 +7108,7 @@ done: if (pp->snap_exists(snapname.c_str())) { ss << "pool " << poolstr << " snap " << snapname << " already exists"; } else { - pp->add_snap(snapname.c_str(), ceph_clock_now(g_ceph_context)); + pp->add_snap(snapname.c_str(), ceph_clock_now()); pp->set_snap_epoch(pending_inc.epoch); ss << "created pool " << poolstr << " snap " << snapname; } @@ -8161,7 +8161,7 @@ bool OSDMonitor::prepare_pool_op(MonOpRequestRef op) switch (m->op) { case POOL_OP_CREATE_SNAP: if (!pp.snap_exists(m->name.c_str())) { - pp.add_snap(m->name.c_str(), ceph_clock_now(g_ceph_context)); + pp.add_snap(m->name.c_str(), ceph_clock_now()); dout(10) << "create snap in pool " << m->pool << " " << m->name << " seq " << pp.get_snap_epoch() << dendl; changed = true; } diff --git a/src/mon/PGMap.cc b/src/mon/PGMap.cc index 9e1c95420024a..984776960f7bd 100644 --- a/src/mon/PGMap.cc +++ b/src/mon/PGMap.cc @@ -2171,7 +2171,7 @@ void PGMapUpdater::register_pg( stats.last_deep_scrub_stamp = ps.last_deep_scrub_stamp; stats.last_clean_scrub_stamp = ps.last_clean_scrub_stamp; } else { - utime_t now = ceph_clock_now(g_ceph_context); + utime_t now = ceph_clock_now(); stats.last_fresh = now; stats.last_active = now; stats.last_change = now; diff --git a/src/mon/PGMonitor.cc b/src/mon/PGMonitor.cc index 6b9529d6cd8df..b38a28e7fb9ef 100644 --- a/src/mon/PGMonitor.cc +++ b/src/mon/PGMonitor.cc @@ -120,7 +120,7 @@ void PGMonitor::tick() handle_osd_timeouts(); if (!pg_map.pg_sum_deltas.empty()) { - utime_t age = ceph_clock_now(g_ceph_context) - pg_map.stamp; + utime_t age = ceph_clock_now() - pg_map.stamp; if (age > 2 * g_conf->mon_delta_reset_interval) { dout(10) << " clearing pg_map delta (" << age << " > " << g_conf->mon_delta_reset_interval << " seconds old)" << dendl; pg_map.clear_delta(); @@ -138,7 +138,7 @@ void PGMonitor::tick() ceph::unordered_map >::iterator it; for (it = pg_map.per_pool_sum_delta.begin(); it != pg_map.per_pool_sum_delta.end(); ) { - utime_t age = ceph_clock_now(g_ceph_context) - it->second.second; + utime_t age = ceph_clock_now() - it->second.second; if (age > 2*g_conf->mon_delta_reset_interval) { dout(10) << " clearing pg_map delta for pool " << it->first << " (" << age << " > " << g_conf->mon_delta_reset_interval @@ -300,7 +300,7 @@ void PGMonitor::handle_osd_timeouts() if (!mon->is_leader()) return; - utime_t now(ceph_clock_now(g_ceph_context)); + utime_t now(ceph_clock_now()); utime_t timeo(g_conf->mon_osd_report_timeout, 0); if (now - mon->get_leader_since() < timeo) { // We haven't been the leader for long enough to consider OSD timeouts @@ -481,7 +481,7 @@ void PGMonitor::encode_pending(MonitorDBStore::TransactionRef t) version_t version = pending_inc.version; dout(10) << __func__ << " v " << version << dendl; assert(get_last_committed() + 1 == version); - pending_inc.stamp = ceph_clock_now(g_ceph_context); + pending_inc.stamp = ceph_clock_now(); uint64_t features = mon->get_quorum_con_features(); @@ -776,7 +776,7 @@ bool PGMonitor::prepare_pg_stats(MonOpRequestRef op) return false; } - last_osd_report[from] = ceph_clock_now(g_ceph_context); + last_osd_report[from] = ceph_clock_now(); if (!stats->get_orig_source().is_osd() || !mon->osdmon()->osdmap.is_up(from) || @@ -996,7 +996,7 @@ void PGMonitor::_try_mark_pg_stale( dout(10) << " marking pg " << pgid << " stale (acting_primary " << stat->acting_primary << ")" << dendl; stat->state |= PG_STATE_STALE; - stat->last_unstale = ceph_clock_now(g_ceph_context); + stat->last_unstale = ceph_clock_now(); } } @@ -1513,7 +1513,7 @@ static void note_stuck_detail(int what, if (since == utime_t()) { ss << " since forever"; } else { - utime_t dur = ceph_clock_now(g_ceph_context) - since; + utime_t dur = ceph_clock_now() - since; ss << " for " << dur; } ss << ", current state " << pg_state_string(p->second.state) @@ -1579,7 +1579,7 @@ namespace { return; int pgs_count = 0; - const utime_t now = ceph_clock_now(nullptr); + const utime_t now = ceph_clock_now(); for (const auto& pg_entry : pg_stats) { const auto& pg_stat(pg_entry.second); const utime_t time_since_ls = now - pg_stat.last_scrub_stamp; @@ -1660,7 +1660,7 @@ void PGMonitor::get_health(list >& summary, } ceph::unordered_map stuck_pgs; - utime_t now(ceph_clock_now(g_ceph_context)); + utime_t now(ceph_clock_now()); utime_t cutoff = now - utime_t(g_conf->mon_pg_stuck_threshold, 0); uint64_t num_inactive_pgs = 0; @@ -1973,7 +1973,7 @@ int PGMonitor::dump_stuck_pg_stats(stringstream &ds, } } - utime_t now(ceph_clock_now(g_ceph_context)); + utime_t now(ceph_clock_now()); utime_t cutoff = now - utime_t(threshold, 0); if (!f) { diff --git a/src/mon/Paxos.cc b/src/mon/Paxos.cc index 48e68cbaafcae..978f726795889 100644 --- a/src/mon/Paxos.cc +++ b/src/mon/Paxos.cc @@ -241,7 +241,7 @@ void Paxos::collect(version_t oldpn) if (*p == mon->rank) continue; MMonPaxos *collect = new MMonPaxos(mon->get_epoch(), MMonPaxos::OP_COLLECT, - ceph_clock_now(g_ceph_context)); + ceph_clock_now()); collect->last_committed = last_committed; collect->first_committed = first_committed; collect->pn = accepted_pn; @@ -285,7 +285,7 @@ void Paxos::handle_collect(MonOpRequestRef op) // reply MMonPaxos *last = new MMonPaxos(mon->get_epoch(), MMonPaxos::OP_LAST, - ceph_clock_now(g_ceph_context)); + ceph_clock_now()); last->last_committed = last_committed; last->first_committed = first_committed; @@ -311,11 +311,11 @@ void Paxos::handle_collect(MonOpRequestRef op) logger->inc(l_paxos_collect); logger->inc(l_paxos_collect_keys, t->get_keys()); logger->inc(l_paxos_collect_bytes, t->get_bytes()); - utime_t start = ceph_clock_now(NULL); + utime_t start = ceph_clock_now(); get_store()->apply_transaction(t); - utime_t end = ceph_clock_now(NULL); + utime_t end = ceph_clock_now(); logger->tinc(l_paxos_collect_latency, end - start); } else { // don't accept! @@ -478,11 +478,11 @@ bool Paxos::store_state(MMonPaxos *m) logger->inc(l_paxos_store_state); logger->inc(l_paxos_store_state_bytes, t->get_bytes()); logger->inc(l_paxos_store_state_keys, t->get_keys()); - utime_t start = ceph_clock_now(NULL); + utime_t start = ceph_clock_now(); get_store()->apply_transaction(t); - utime_t end = ceph_clock_now(NULL); + utime_t end = ceph_clock_now(); logger->tinc(l_paxos_store_state_latency, end - start); // refresh first_committed; this txn may have trimmed. @@ -559,7 +559,7 @@ void Paxos::handle_last(MonOpRequestRef op) dout(10) << " sending commit to mon." << p->first << dendl; MMonPaxos *commit = new MMonPaxos(mon->get_epoch(), MMonPaxos::OP_COMMIT, - ceph_clock_now(g_ceph_context)); + ceph_clock_now()); share_state(commit, peer_first_committed[p->first], p->second); mon->messenger->send_message(commit, mon->monmap->get_inst(p->first)); } @@ -705,11 +705,11 @@ void Paxos::begin(bufferlist& v) logger->inc(l_paxos_begin); logger->inc(l_paxos_begin_keys, t->get_keys()); logger->inc(l_paxos_begin_bytes, t->get_bytes()); - utime_t start = ceph_clock_now(NULL); + utime_t start = ceph_clock_now(); get_store()->apply_transaction(t); - utime_t end = ceph_clock_now(NULL); + utime_t end = ceph_clock_now(); logger->tinc(l_paxos_begin_latency, end - start); assert(g_conf->paxos_kill_at != 3); @@ -728,7 +728,7 @@ void Paxos::begin(bufferlist& v) dout(10) << " sending begin to mon." << *p << dendl; MMonPaxos *begin = new MMonPaxos(mon->get_epoch(), MMonPaxos::OP_BEGIN, - ceph_clock_now(g_ceph_context)); + ceph_clock_now()); begin->values[last_committed+1] = new_value; begin->last_committed = last_committed; begin->pn = accepted_pn; @@ -786,18 +786,18 @@ void Paxos::handle_begin(MonOpRequestRef op) *_dout << dendl; logger->inc(l_paxos_begin_bytes, t->get_bytes()); - utime_t start = ceph_clock_now(NULL); + utime_t start = ceph_clock_now(); get_store()->apply_transaction(t); - utime_t end = ceph_clock_now(NULL); + utime_t end = ceph_clock_now(); logger->tinc(l_paxos_begin_latency, end - start); assert(g_conf->paxos_kill_at != 5); // reply MMonPaxos *accept = new MMonPaxos(mon->get_epoch(), MMonPaxos::OP_ACCEPT, - ceph_clock_now(g_ceph_context)); + ceph_clock_now()); accept->pn = accepted_pn; accept->last_committed = last_committed; begin->get_connection()->send_message(accept); @@ -891,7 +891,7 @@ void Paxos::commit_start() logger->inc(l_paxos_commit); logger->inc(l_paxos_commit_keys, t->get_keys()); logger->inc(l_paxos_commit_bytes, t->get_bytes()); - commit_start_stamp = ceph_clock_now(NULL); + commit_start_stamp = ceph_clock_now(); get_store()->queue_transaction(t, new C_Committed(this)); @@ -912,7 +912,7 @@ void Paxos::commit_start() void Paxos::commit_finish() { dout(20) << __func__ << " " << (last_committed+1) << dendl; - utime_t end = ceph_clock_now(NULL); + utime_t end = ceph_clock_now(); logger->tinc(l_paxos_commit_latency, end - commit_start_stamp); assert(g_conf->paxos_kill_at != 8); @@ -923,7 +923,7 @@ void Paxos::commit_finish() lease_expire = utime_t(); // cancel lease last_committed++; - last_commit_time = ceph_clock_now(NULL); + last_commit_time = ceph_clock_now(); // refresh first_committed; this txn may have trimmed. first_committed = get_store()->get(get_name(), "first_committed"); @@ -938,7 +938,7 @@ void Paxos::commit_finish() dout(10) << " sending commit to mon." << *p << dendl; MMonPaxos *commit = new MMonPaxos(mon->get_epoch(), MMonPaxos::OP_COMMIT, - ceph_clock_now(g_ceph_context)); + ceph_clock_now()); commit->values[last_committed] = new_value; commit->pn = accepted_pn; commit->last_committed = last_committed; @@ -999,7 +999,7 @@ void Paxos::extend_lease() assert(mon->is_leader()); //assert(is_active()); - lease_expire = ceph_clock_now(g_ceph_context); + lease_expire = ceph_clock_now(); lease_expire += g_conf->mon_lease; acked_lease.clear(); acked_lease.insert(mon->rank); @@ -1013,7 +1013,7 @@ void Paxos::extend_lease() if (*p == mon->rank) continue; MMonPaxos *lease = new MMonPaxos(mon->get_epoch(), MMonPaxos::OP_LEASE, - ceph_clock_now(g_ceph_context)); + ceph_clock_now()); lease->last_committed = last_committed; lease->lease_timestamp = lease_expire; lease->first_committed = first_committed; @@ -1039,7 +1039,7 @@ void Paxos::extend_lease() void Paxos::warn_on_future_time(utime_t t, entity_name_t from) { - utime_t now = ceph_clock_now(g_ceph_context); + utime_t now = ceph_clock_now(); if (t > now) { utime_t diff = t - now; if (diff > g_conf->mon_clock_drift_allowed) { @@ -1048,7 +1048,7 @@ void Paxos::warn_on_future_time(utime_t t, entity_name_t from) pow(g_conf->mon_clock_drift_warn_backoff, clock_drift_warned)) { mon->clog->warn() << "message from " << from << " was stamped " << diff << "s in the future, clocks not synchronized"; - last_clock_drift_warn = ceph_clock_now(g_ceph_context); + last_clock_drift_warn = ceph_clock_now(); ++clock_drift_warned; } } @@ -1060,12 +1060,12 @@ bool Paxos::do_refresh() { bool need_bootstrap = false; - utime_t start = ceph_clock_now(NULL); + utime_t start = ceph_clock_now(); // make sure we have the latest state loaded up mon->refresh_from_paxos(&need_bootstrap); - utime_t end = ceph_clock_now(NULL); + utime_t end = ceph_clock_now(); logger->inc(l_paxos_refresh); logger->tinc(l_paxos_refresh_latency, end - start); @@ -1134,7 +1134,7 @@ void Paxos::handle_lease(MonOpRequestRef op) if (lease_expire < lease->lease_timestamp) { lease_expire = lease->lease_timestamp; - utime_t now = ceph_clock_now(g_ceph_context); + utime_t now = ceph_clock_now(); if (lease_expire < now) { utime_t diff = now - lease_expire; derr << "lease_expire from " << lease->get_source_inst() << " is " << diff << " seconds in the past; mons are probably laggy (or possibly clocks are too skewed)" << dendl; @@ -1148,10 +1148,10 @@ void Paxos::handle_lease(MonOpRequestRef op) // ack MMonPaxos *ack = new MMonPaxos(mon->get_epoch(), MMonPaxos::OP_LEASE_ACK, - ceph_clock_now(g_ceph_context)); + ceph_clock_now()); ack->last_committed = last_committed; ack->first_committed = first_committed; - ack->lease_timestamp = ceph_clock_now(g_ceph_context); + ack->lease_timestamp = ceph_clock_now(); lease->get_connection()->send_message(ack); // (re)set timeout event. @@ -1290,11 +1290,11 @@ version_t Paxos::get_new_proposal_number(version_t gt) *_dout << dendl; logger->inc(l_paxos_new_pn); - utime_t start = ceph_clock_now(NULL); + utime_t start = ceph_clock_now(); get_store()->apply_transaction(t); - utime_t end = ceph_clock_now(NULL); + utime_t end = ceph_clock_now(); logger->tinc(l_paxos_new_pn_latency, end - start); dout(10) << "get_new_proposal_number = " << last_pn << dendl; @@ -1492,7 +1492,7 @@ bool Paxos::is_readable(version_t v) (is_active() || is_updating() || is_writing()) && last_committed > 0 && is_lease_valid(); // must have a value alone, or have lease dout(5) << __func__ << " = " << (int)ret - << " - now=" << ceph_clock_now(g_ceph_context) + << " - now=" << ceph_clock_now() << " lease_expire=" << lease_expire << " has v" << v << " lc " << last_committed << dendl; @@ -1517,7 +1517,7 @@ version_t Paxos::read_current(bufferlist &bl) bool Paxos::is_lease_valid() { return ((mon->get_quorum().size() == 1) - || (ceph_clock_now(g_ceph_context) < lease_expire)); + || (ceph_clock_now() < lease_expire)); } // -- WRITE -- diff --git a/src/mon/Paxos.h b/src/mon/Paxos.h index 08c584c8e0dbf..432ce3d9c5683 100644 --- a/src/mon/Paxos.h +++ b/src/mon/Paxos.h @@ -657,8 +657,8 @@ public: C_Proposal(Context *c, bufferlist& proposal_bl) : proposer_context(c), bl(proposal_bl), - proposed(false), - proposal_time(ceph_clock_now(NULL)) + proposed(false), + proposal_time(ceph_clock_now()) { } void finish(int r) { @@ -1357,7 +1357,7 @@ inline ostream& operator<<(ostream& out, Paxos::C_Proposal& p) { string proposed = (p.proposed ? "proposed" : "unproposed"); out << " " << proposed - << " queued " << (ceph_clock_now(NULL) - p.proposal_time) + << " queued " << (ceph_clock_now() - p.proposal_time) << " tx dump:\n"; MonitorDBStore::TransactionRef t(new MonitorDBStore::Transaction); bufferlist::iterator p_it = p.bl.begin(); diff --git a/src/mon/PaxosService.cc b/src/mon/PaxosService.cc index 9b7fc58d05e85..09143e7c1250e 100644 --- a/src/mon/PaxosService.cc +++ b/src/mon/PaxosService.cc @@ -164,7 +164,7 @@ bool PaxosService::should_propose(double& delay) if (get_last_committed() <= 1) delay = 0.0; else { - utime_t now = ceph_clock_now(g_ceph_context); + utime_t now = ceph_clock_now(); if ((now - paxos->last_commit_time) > g_conf->paxos_propose_interval) delay = (double)g_conf->paxos_min_wait; else diff --git a/src/mon/Session.h b/src/mon/Session.h index d91de2666dbad..841df8cafeae7 100644 --- a/src/mon/Session.h +++ b/src/mon/Session.h @@ -66,7 +66,7 @@ struct MonSession : public RefCountedObject { osd_epoch(0), auth_handler(NULL), proxy_con(NULL), proxy_tid(0) { - time_established = ceph_clock_now(g_ceph_context); + time_established = ceph_clock_now(); } ~MonSession() { //generic_dout(0) << "~MonSession " << this << dendl; diff --git a/src/msg/DispatchQueue.cc b/src/msg/DispatchQueue.cc index 0f27873692161..919d355d50f78 100644 --- a/src/msg/DispatchQueue.cc +++ b/src/msg/DispatchQueue.cc @@ -95,7 +95,7 @@ void DispatchQueue::enqueue(Message *m, int priority, uint64_t id) void DispatchQueue::local_delivery(Message *m, int priority) { - m->set_recv_stamp(ceph_clock_now(msgr->cct)); + m->set_recv_stamp(ceph_clock_now()); Mutex::Locker l(local_delivery_lock); if (local_messages.empty()) local_delivery_cond.Signal(); diff --git a/src/msg/Messenger.h b/src/msg/Messenger.h index a356dd8163b48..08b53d9b7428e 100644 --- a/src/msg/Messenger.h +++ b/src/msg/Messenger.h @@ -556,7 +556,7 @@ public: * of one reference to it. */ void ms_fast_dispatch(Message *m) { - m->set_dispatch_stamp(ceph_clock_now(cct)); + m->set_dispatch_stamp(ceph_clock_now()); for (list::iterator p = fast_dispatchers.begin(); p != fast_dispatchers.end(); ++p) { @@ -586,7 +586,7 @@ public: * one reference to it. */ void ms_deliver_dispatch(Message *m) { - m->set_dispatch_stamp(ceph_clock_now(cct)); + m->set_dispatch_stamp(ceph_clock_now()); for (list::iterator p = dispatchers.begin(); p != dispatchers.end(); ++p) { diff --git a/src/msg/async/AsyncConnection.cc b/src/msg/async/AsyncConnection.cc index c10e9a1cf5277..fc4b75197cf2c 100644 --- a/src/msg/async/AsyncConnection.cc +++ b/src/msg/async/AsyncConnection.cc @@ -344,7 +344,7 @@ void AsyncConnection::process() if (tag == CEPH_MSGR_TAG_KEEPALIVE) { ldout(async_msgr->cct, 20) << __func__ << " got KEEPALIVE" << dendl; - set_last_keepalive(ceph_clock_now(NULL)); + set_last_keepalive(ceph_clock_now()); } else if (tag == CEPH_MSGR_TAG_KEEPALIVE2) { state = STATE_OPEN_KEEPALIVE2; } else if (tag == CEPH_MSGR_TAG_KEEPALIVE2_ACK) { @@ -381,7 +381,7 @@ void AsyncConnection::process() _append_keepalive_or_ack(true, &kp_t); write_lock.unlock(); ldout(async_msgr->cct, 20) << __func__ << " got KEEPALIVE2 " << kp_t << dendl; - set_last_keepalive(ceph_clock_now(NULL)); + set_last_keepalive(ceph_clock_now()); need_dispatch_writer = true; state = STATE_OPEN; break; @@ -480,7 +480,7 @@ void AsyncConnection::process() front.clear(); middle.clear(); data.clear(); - recv_stamp = ceph_clock_now(async_msgr->cct); + recv_stamp = ceph_clock_now(); current_header = header; state = STATE_OPEN_MESSAGE_THROTTLE_MESSAGE; break; @@ -548,7 +548,7 @@ void AsyncConnection::process() } } - throttle_stamp = ceph_clock_now(msgr->cct); + throttle_stamp = ceph_clock_now(); state = STATE_OPEN_MESSAGE_READ_FRONT; break; } @@ -714,7 +714,7 @@ void AsyncConnection::process() message->set_recv_stamp(recv_stamp); message->set_throttle_stamp(throttle_stamp); - message->set_recv_complete_stamp(ceph_clock_now(async_msgr->cct)); + message->set_recv_complete_stamp(ceph_clock_now()); // check received seq#. if it is old, drop the message. // note that incoming messages may skip ahead. this is convenient for the client @@ -2314,7 +2314,7 @@ void AsyncConnection::DelayedDelivery::do_request(int id) utime_t release = delay_queue.front().first; m = delay_queue.front().second; string delay_msg_type = msgr->cct->_conf->ms_inject_delay_msg_type; - utime_t now = ceph_clock_now(msgr->cct); + utime_t now = ceph_clock_now(); if ((release > now && (delay_msg_type.empty() || m->get_type_name() == delay_msg_type))) { utime_t t = release - now; @@ -2378,7 +2378,7 @@ void AsyncConnection::_append_keepalive_or_ack(bool ack, utime_t *tp) outcoming_bl.append((char*)&ts, sizeof(ts)); } else if (has_feature(CEPH_FEATURE_MSGR_KEEPALIVE2)) { struct ceph_timespec ts; - utime_t t = ceph_clock_now(async_msgr->cct); + utime_t t = ceph_clock_now(); t.encode_timeval(&ts); outcoming_bl.append(CEPH_MSGR_TAG_KEEPALIVE2); outcoming_bl.append((char*)&ts, sizeof(ts)); diff --git a/src/msg/async/dpdk/IP.cc b/src/msg/async/dpdk/IP.cc index c8b1c3c5aae52..3d3e8fb50bca3 100644 --- a/src/msg/async/dpdk/IP.cc +++ b/src/msg/async/dpdk/IP.cc @@ -191,7 +191,7 @@ int ipv4::handle_received_packet(Packet p, ethernet_address from) // This is a newly created frag_id if (frag.mem_size == 0) { _frags_age.push_back(frag_id); - frag.rx_time = ceph_clock_now(cct); + frag.rx_time = ceph_clock_now(); } auto added_size = frag.merge(h, offset, std::move(p)); _frag_mem += added_size; @@ -374,7 +374,7 @@ void ipv4::frag_timeout() { if (_frags.empty()) { return; } - auto now = ceph_clock_now(cct); + auto now = ceph_clock_now(); for (auto it = _frags_age.begin(); it != _frags_age.end();) { auto frag_id = *it; auto& frag = _frags[frag_id]; diff --git a/src/msg/async/dpdk/IP.h b/src/msg/async/dpdk/IP.h index 23f8d012e2abe..d5a4d305481f1 100644 --- a/src/msg/async/dpdk/IP.h +++ b/src/msg/async/dpdk/IP.h @@ -289,7 +289,7 @@ class ipv4 { frag_timefd.construct(center->create_time_event(tp.to_nsec() / 1000, frag_handler)); } void frag_arm() { - auto now = ceph_clock_now(cct); + auto now = ceph_clock_now(); frag_timefd.construct(center->create_time_event(now.to_nsec() / 1000, frag_handler)); } diff --git a/src/msg/simple/Pipe.cc b/src/msg/simple/Pipe.cc index 9233b5f6b660f..ef0d0b6f47cdb 100644 --- a/src/msg/simple/Pipe.cc +++ b/src/msg/simple/Pipe.cc @@ -271,7 +271,7 @@ void *Pipe::DelayedDelivery::entry() Message *m = delay_queue.front().second; string delay_msg_type = pipe->msgr->cct->_conf->ms_inject_delay_msg_type; if (!flush_count && - (release > ceph_clock_now(pipe->msgr->cct) && + (release > ceph_clock_now() && (delay_msg_type.empty() || m->get_type_name() == delay_msg_type))) { lgeneric_subdout(pipe->msgr->cct, ms, 10) << *pipe << "DelayedDelivery::entry sleeping on delay_cond until " << release << dendl; delay_cond.WaitUntil(delay_lock, release); @@ -1510,7 +1510,7 @@ void Pipe::fault(bool onread) backoff.set_from_double(conf->ms_initial_backoff); } else { ldout(msgr->cct,10) << "fault waiting " << backoff << dendl; - cond.WaitInterval(msgr->cct, pipe_lock, backoff); + cond.WaitInterval(pipe_lock, backoff); backoff += backoff; if (backoff > conf->ms_max_backoff) backoff.set_from_double(conf->ms_max_backoff); @@ -1630,7 +1630,7 @@ void Pipe::reader() if (tag == CEPH_MSGR_TAG_KEEPALIVE) { ldout(msgr->cct,2) << "reader got KEEPALIVE" << dendl; pipe_lock.Lock(); - connection_state->set_last_keepalive(ceph_clock_now(NULL)); + connection_state->set_last_keepalive(ceph_clock_now()); continue; } if (tag == CEPH_MSGR_TAG_KEEPALIVE2) { @@ -1647,7 +1647,7 @@ void Pipe::reader() keepalive_ack_stamp = utime_t(t); ldout(msgr->cct,2) << "reader got KEEPALIVE2 " << keepalive_ack_stamp << dendl; - connection_state->set_last_keepalive(ceph_clock_now(NULL)); + connection_state->set_last_keepalive(ceph_clock_now()); cond.Signal(); } continue; @@ -1835,7 +1835,7 @@ void Pipe::writer() if (connection_state->has_feature(CEPH_FEATURE_MSGR_KEEPALIVE2)) { pipe_lock.Unlock(); rc = write_keepalive2(CEPH_MSGR_TAG_KEEPALIVE2, - ceph_clock_now(msgr->cct)); + ceph_clock_now()); } else { pipe_lock.Unlock(); rc = write_keepalive(); @@ -2038,7 +2038,7 @@ int Pipe::read_message(Message **pm, AuthSessionHandler* auth_handler) unsigned data_len, data_off; int aborted; Message *message; - utime_t recv_stamp = ceph_clock_now(msgr->cct); + utime_t recv_stamp = ceph_clock_now(); if (policy.throttler_messages) { ldout(msgr->cct,10) << "reader wants " << 1 << " message from policy throttler " @@ -2066,7 +2066,7 @@ int Pipe::read_message(Message **pm, AuthSessionHandler* auth_handler) in_q->dispatch_throttler.get(message_size); } - utime_t throttle_stamp = ceph_clock_now(msgr->cct); + utime_t throttle_stamp = ceph_clock_now(); // read front front_len = header.front_len; @@ -2202,7 +2202,7 @@ int Pipe::read_message(Message **pm, AuthSessionHandler* auth_handler) message->set_recv_stamp(recv_stamp); message->set_throttle_stamp(throttle_stamp); - message->set_recv_complete_stamp(ceph_clock_now(msgr->cct)); + message->set_recv_complete_stamp(ceph_clock_now()); *pm = message; return 0; diff --git a/src/msg/xio/XioConnection.cc b/src/msg/xio/XioConnection.cc index 8ad5853a99855..98eebcc822481 100644 --- a/src/msg/xio/XioConnection.cc +++ b/src/msg/xio/XioConnection.cc @@ -193,7 +193,7 @@ void XioConnection::send_keepalive_or_ack_internal(bool ack, const utime_t *tp) xcmd->get_bl_ref().append(CEPH_MSGR_TAG_KEEPALIVE2_ACK); xcmd->get_bl_ref().append((char*)&ts, sizeof(ts)); } else if (has_feature(CEPH_FEATURE_MSGR_KEEPALIVE2)) { - utime_t t = ceph_clock_now(msgr->cct); + utime_t t = ceph_clock_now(); t.encode_timeval(&ts); xcmd->get_bl_ref().append(CEPH_MSGR_TAG_KEEPALIVE2); xcmd->get_bl_ref().append((char*)&ts, sizeof(ts)); @@ -311,7 +311,7 @@ int XioConnection::handle_data_msg(struct xio_session *session, ceph_msg_footer footer; buffer::list payload, middle, data; - const utime_t recv_stamp = ceph_clock_now(msgr->cct); + const utime_t recv_stamp = ceph_clock_now(); ldout(msgr->cct,4) << __func__ << " " << "msg_seq.size()=" << msg_seq.size() << dendl; @@ -452,7 +452,7 @@ int XioConnection::handle_data_msg(struct xio_session *session, /* update timestamps */ m->set_recv_stamp(recv_stamp); - m->set_recv_complete_stamp(ceph_clock_now(msgr->cct)); + m->set_recv_complete_stamp(ceph_clock_now()); m->set_seq(header.seq); /* MP-SAFE */ @@ -511,7 +511,7 @@ int XioConnection::on_msg(struct xio_session *session, case CEPH_MSGR_TAG_KEEPALIVE: ldout(msgr->cct, 20) << __func__ << " got KEEPALIVE" << dendl; - set_last_keepalive(ceph_clock_now(nullptr)); + set_last_keepalive(ceph_clock_now()); break; case CEPH_MSGR_TAG_KEEPALIVE2: @@ -524,7 +524,7 @@ int XioConnection::on_msg(struct xio_session *session, utime_t kp_t = utime_t(*t); ldout(msgr->cct, 20) << __func__ << " got KEEPALIVE2 with timestamp" << kp_t << dendl; send_keepalive_or_ack(true, &kp_t); - set_last_keepalive(ceph_clock_now(nullptr)); + set_last_keepalive(ceph_clock_now()); } break; @@ -855,7 +855,7 @@ int XioLoopbackConnection::send_message(Message *m) void XioLoopbackConnection::send_keepalive() { - utime_t t = ceph_clock_now(nullptr); + utime_t t = ceph_clock_now(); set_last_keepalive(t); set_last_keepalive_ack(t); } diff --git a/src/os/bluestore/BlueFS.cc b/src/os/bluestore/BlueFS.cc index 8ddb5fb0a2d0a..a49c9a7e8e22f 100644 --- a/src/os/bluestore/BlueFS.cc +++ b/src/os/bluestore/BlueFS.cc @@ -1476,7 +1476,7 @@ int BlueFS::_flush_range(FileWriter *h, uint64_t offset, uint64_t length) } } if (must_dirty) { - h->file->fnode.mtime = ceph_clock_now(NULL); + h->file->fnode.mtime = ceph_clock_now(); assert(h->file->fnode.ino >= 1); if (h->file->dirty_seq == 0) { h->file->dirty_seq = log_seq + 1; @@ -1619,13 +1619,13 @@ void BlueFS::wait_for_aio(FileWriter *h) // NOTE: this is safe to call without a lock, as long as our reference is // stable. dout(10) << __func__ << " " << h << dendl; - utime_t start = ceph_clock_now(NULL); + utime_t start = ceph_clock_now(); for (auto p : h->iocv) { if (p) { p->aio_wait(); } } - utime_t end = ceph_clock_now(NULL); + utime_t end = ceph_clock_now(); utime_t dur = end - start; dout(10) << __func__ << " " << h << " done in " << dur << dendl; } @@ -1822,7 +1822,7 @@ void BlueFS::sync_metadata() return; } dout(10) << __func__ << dendl; - utime_t start = ceph_clock_now(NULL); + utime_t start = ceph_clock_now(); vector> to_release(pending_release.size()); to_release.swap(pending_release); _flush_and_sync_log(l); @@ -1840,7 +1840,7 @@ void BlueFS::sync_metadata() } } - utime_t end = ceph_clock_now(NULL); + utime_t end = ceph_clock_now(); utime_t dur = end - start; dout(10) << __func__ << " done in " << dur << dendl; } @@ -1900,7 +1900,7 @@ int BlueFS::open_for_write( } assert(file->fnode.ino > 1); - file->fnode.mtime = ceph_clock_now(NULL); + file->fnode.mtime = ceph_clock_now(); file->fnode.prefer_bdev = BlueFS::BDEV_DB; if (dirname.length() > 5) { // the "db.slow" and "db.wal" directory names are hard-coded at @@ -2133,7 +2133,7 @@ int BlueFS::lock_file(const string& dirname, const string& filename, << " not found, creating" << dendl; file = new File; file->fnode.ino = ++ino_last; - file->fnode.mtime = ceph_clock_now(NULL); + file->fnode.mtime = ceph_clock_now(); file_map[ino_last] = file; dir->file_map[filename] = file; ++file->refs; diff --git a/src/os/bluestore/BlueStore.cc b/src/os/bluestore/BlueStore.cc index f876aa581e324..43c95b81c835e 100644 --- a/src/os/bluestore/BlueStore.cc +++ b/src/os/bluestore/BlueStore.cc @@ -2504,7 +2504,7 @@ void *BlueStore::MempoolThread::entry() ++store->mempool_seq; utime_t wait; wait += g_conf->bluestore_cache_trim_interval; - cond.WaitInterval(g_ceph_context, lock, wait); + cond.WaitInterval(lock, wait); } stop = false; return NULL; @@ -2905,7 +2905,7 @@ int BlueStore::_check_or_set_bdev_label( if (create) { label.osd_uuid = fsid; label.size = size; - label.btime = ceph_clock_now(NULL); + label.btime = ceph_clock_now(); label.description = desc; int r = _write_bdev_label(path, label); if (r < 0) @@ -4228,7 +4228,7 @@ int BlueStore::fsck(bool deep) uint64_t num_sharded_objects = 0; uint64_t num_object_shards = 0; - utime_t start = ceph_clock_now(NULL); + utime_t start = ceph_clock_now(); int r = _open_path(); if (r < 0) @@ -4682,7 +4682,7 @@ int BlueStore::fsck(bool deep) << num_shared_blobs << " shared." << dendl; - utime_t duration = ceph_clock_now(NULL) - start; + utime_t duration = ceph_clock_now() - start; dout(1) << __func__ << " finish with " << errors << " errors in " << duration << " seconds" << dendl; return errors; @@ -5226,7 +5226,7 @@ int BlueStore::_verify_csum(OnodeRef& o, { int bad; uint64_t bad_csum; - utime_t start = ceph_clock_now(g_ceph_context); + utime_t start = ceph_clock_now(); int r = blob->verify_csum(blob_xoffset, bl, &bad, &bad_csum); if (r < 0) { if (r == -1) { @@ -5250,14 +5250,14 @@ int BlueStore::_verify_csum(OnodeRef& o, derr << __func__ << " failed with exit code: " << cpp_strerror(r) << dendl; } } - logger->tinc(l_bluestore_csum_lat, ceph_clock_now(g_ceph_context) - start); + logger->tinc(l_bluestore_csum_lat, ceph_clock_now() - start); return r; } int BlueStore::_decompress(bufferlist& source, bufferlist* result) { int r = 0; - utime_t start = ceph_clock_now(g_ceph_context); + utime_t start = ceph_clock_now(); bufferlist::iterator i = source.begin(); bluestore_compression_header_t chdr; ::decode(chdr, i); @@ -5279,7 +5279,7 @@ int BlueStore::_decompress(bufferlist& source, bufferlist* result) r = -EIO; } } - logger->tinc(l_bluestore_decompress_lat, ceph_clock_now(g_ceph_context) - start); + logger->tinc(l_bluestore_decompress_lat, ceph_clock_now() - start); return r; } @@ -6481,7 +6481,7 @@ void BlueStore::_txc_finish_kv(TransContext *txc) } unsigned n = txc->osr->parent->shard_hint.hash_to_shard(m_finisher_num); if (txc->oncommit) { - logger->tinc(l_bluestore_commit_lat, ceph_clock_now(g_ceph_context) - txc->start); + logger->tinc(l_bluestore_commit_lat, ceph_clock_now() - txc->start); finishers[n]->queue(txc->oncommit); txc->oncommit = NULL; } @@ -6664,7 +6664,7 @@ void BlueStore::_kv_sync_thread() kv_committing.swap(kv_queue); kv_submitting.swap(kv_queue_unsubmitted); wal_cleaning.swap(wal_cleanup_queue); - utime_t start = ceph_clock_now(NULL); + utime_t start = ceph_clock_now(); l.unlock(); dout(30) << __func__ << " committing txc " << kv_committing << dendl; @@ -6757,7 +6757,7 @@ void BlueStore::_kv_sync_thread() dout(10) << __func__ << " blobid_max now " << blobid_max << dendl; } - utime_t finish = ceph_clock_now(NULL); + utime_t finish = ceph_clock_now(); utime_t dur = finish - start; dout(20) << __func__ << " committed " << kv_committing.size() << " cleaned " << wal_cleaning.size() @@ -7810,7 +7810,7 @@ int BlueStore::_do_alloc_write( bool compressed = false; if(c && wi.blob_length > min_alloc_size) { - utime_t start = ceph_clock_now(g_ceph_context); + utime_t start = ceph_clock_now(); // compress assert(b_off == 0); @@ -7860,7 +7860,7 @@ int BlueStore::_do_alloc_write( logger->inc(l_bluestore_compress_rejected_count); } logger->tinc(l_bluestore_compress_lat, - ceph_clock_now(g_ceph_context) - start); + ceph_clock_now() - start); } if (!compressed) { dblob.set_flag(bluestore_blob_t::FLAG_MUTABLE); diff --git a/src/os/bluestore/BlueStore.h b/src/os/bluestore/BlueStore.h index 3ec48500cb739..c33aa4a3e30a1 100644 --- a/src/os/bluestore/BlueStore.h +++ b/src/os/bluestore/BlueStore.h @@ -1207,7 +1207,7 @@ public: } void log_state_latency(PerfCounters *logger, int state) { - utime_t lat, now = ceph_clock_now(g_ceph_context); + utime_t lat, now = ceph_clock_now(); lat = now - last_stamp; logger->tinc(state, lat); last_stamp = now; @@ -1308,7 +1308,7 @@ public: onreadable_sync(NULL), wal_txn(NULL), ioc(this), - start(ceph_clock_now(g_ceph_context)) { + start(ceph_clock_now()) { last_stamp = start; } ~TransContext() { diff --git a/src/os/bluestore/KernelDevice.cc b/src/os/bluestore/KernelDevice.cc index 22b54b43d42a4..4ed431fe0a4a4 100644 --- a/src/os/bluestore/KernelDevice.cc +++ b/src/os/bluestore/KernelDevice.cc @@ -201,9 +201,9 @@ int KernelDevice::flush() g_ceph_context->_log->flush(); _exit(1); } - utime_t start = ceph_clock_now(NULL); + utime_t start = ceph_clock_now(); int r = ::fdatasync(fd_direct); - utime_t end = ceph_clock_now(NULL); + utime_t end = ceph_clock_now(); utime_t dur = end - start; if (r < 0) { r = -errno; @@ -280,7 +280,7 @@ void KernelDevice::_aio_thread() } } if (g_conf->bdev_debug_aio) { - utime_t now = ceph_clock_now(NULL); + utime_t now = ceph_clock_now(); std::lock_guard l(debug_queue_lock); if (debug_oldest) { if (debug_stall_since == utime_t()) { diff --git a/src/os/filestore/FileJournal.cc b/src/os/filestore/FileJournal.cc index d41136268da59..f11e419a944b9 100644 --- a/src/os/filestore/FileJournal.cc +++ b/src/os/filestore/FileJournal.cc @@ -908,7 +908,7 @@ void FileJournal::queue_write_fin(uint64_t seq, Context *fin) void FileJournal::queue_completions_thru(uint64_t seq) { assert(finisher_lock.is_locked()); - utime_t now = ceph_clock_now(g_ceph_context); + utime_t now = ceph_clock_now(); list items; batch_pop_completions(items); list::iterator it = items.begin(); @@ -1039,7 +1039,7 @@ void FileJournal::do_write(bufferlist& bl) << (hbp.length() ? " + header":"") << dendl; - utime_t from = ceph_clock_now(g_ceph_context); + utime_t from = ceph_clock_now(); // entry off64_t pos = write_pos; @@ -1142,7 +1142,7 @@ void FileJournal::do_write(bufferlist& bl) #endif } - utime_t lat = ceph_clock_now(g_ceph_context) - from; + utime_t lat = ceph_clock_now() - from; dout(20) << "do_write latency " << lat << dendl; write_lock.Lock(); @@ -1621,7 +1621,7 @@ void FileJournal::submit_entry(uint64_t seq, bufferlist& e, uint32_t orig_len, completions.push_back( completion_item( - seq, oncommit, ceph_clock_now(g_ceph_context), osd_op)); + seq, oncommit, ceph_clock_now(), osd_op)); if (writeq.empty()) writeq_cond.Signal(); writeq.push_back(write_item(seq, e, orig_len, osd_op)); diff --git a/src/os/filestore/FileStore.cc b/src/os/filestore/FileStore.cc index 69d8a6bf33104..aa3c14110f345 100644 --- a/src/os/filestore/FileStore.cc +++ b/src/os/filestore/FileStore.cc @@ -1927,7 +1927,7 @@ FileStore::Op *FileStore::build_op(vector& tls, } Op *o = new Op; - o->start = ceph_clock_now(g_ceph_context); + o->start = ceph_clock_now(); o->tls = std::move(tls); o->onreadable = onreadable; o->onreadable_sync = onreadable_sync; @@ -2008,7 +2008,7 @@ void FileStore::_finish_op(OpSequencer *osr) list to_queue; Op *o = osr->dequeue(&to_queue); - utime_t lat = ceph_clock_now(g_ceph_context); + utime_t lat = ceph_clock_now(); lat -= o->start; dout(10) << "_finish_op " << o << " seq " << o->op << " " << *osr << "/" << osr->parent << " lat " << lat << dendl; @@ -2064,7 +2064,7 @@ int FileStore::queue_transactions(Sequencer *posr, vector& tls, return 0; } - utime_t start = ceph_clock_now(g_ceph_context); + utime_t start = ceph_clock_now(); // set up the sequencer OpSequencer *osr; assert(posr); @@ -2125,7 +2125,7 @@ int FileStore::queue_transactions(Sequencer *posr, vector& tls, ceph_abort(); } submit_manager.op_submit_finish(op_num); - utime_t end = ceph_clock_now(g_ceph_context); + utime_t end = ceph_clock_now(); logger->tinc(l_filestore_queue_transaction_latency_avg, end - start); return 0; } @@ -2153,7 +2153,7 @@ int FileStore::queue_transactions(Sequencer *posr, vector& tls, if (ondisk) apply_manager.add_waiter(op_num, ondisk); submit_manager.op_submit_finish(op_num); - utime_t end = ceph_clock_now(g_ceph_context); + utime_t end = ceph_clock_now(); logger->tinc(l_filestore_queue_transaction_latency_avg, end - start); return 0; } @@ -2190,7 +2190,7 @@ int FileStore::queue_transactions(Sequencer *posr, vector& tls, submit_manager.op_submit_finish(op); apply_manager.op_apply_finish(op); - utime_t end = ceph_clock_now(g_ceph_context); + utime_t end = ceph_clock_now(); logger->tinc(l_filestore_queue_transaction_latency_avg, end - start); return r; } @@ -3816,10 +3816,10 @@ void FileStore::sync_entry() utime_t min_interval; min_interval.set_from_double(m_filestore_min_sync_interval); - utime_t startwait = ceph_clock_now(g_ceph_context); + utime_t startwait = ceph_clock_now(); if (!force_sync) { dout(20) << "sync_entry waiting for max_interval " << max_interval << dendl; - sync_cond.WaitInterval(g_ceph_context, lock, max_interval); + sync_cond.WaitInterval(lock, max_interval); } else { dout(20) << "sync_entry not waiting, force_sync set" << dendl; } @@ -3832,7 +3832,7 @@ void FileStore::sync_entry() break; } else { // wait for at least the min interval - utime_t woke = ceph_clock_now(g_ceph_context); + utime_t woke = ceph_clock_now(); woke -= startwait; dout(20) << "sync_entry woke after " << woke << dendl; if (woke < min_interval) { @@ -3840,7 +3840,7 @@ void FileStore::sync_entry() t -= woke; dout(20) << "sync_entry waiting for another " << t << " to reach min interval " << min_interval << dendl; - sync_cond.WaitInterval(g_ceph_context, lock, t); + sync_cond.WaitInterval(lock, t); } } @@ -3851,7 +3851,7 @@ void FileStore::sync_entry() op_tp.pause(); if (apply_manager.commit_start()) { - utime_t start = ceph_clock_now(g_ceph_context); + utime_t start = ceph_clock_now(); uint64_t cp = apply_manager.get_committing_seq(); sync_entry_timeo_lock.Lock(); @@ -3928,7 +3928,7 @@ void FileStore::sync_entry() } } - utime_t done = ceph_clock_now(g_ceph_context); + utime_t done = ceph_clock_now(); utime_t lat = done - start; utime_t dur = done - startwait; dout(10) << "sync_entry commit took " << lat << ", interval was " << dur << dendl; diff --git a/src/os/kstore/KStore.cc b/src/os/kstore/KStore.cc index 625e4c0cd36d4..d4756b12ee58c 100755 --- a/src/os/kstore/KStore.cc +++ b/src/os/kstore/KStore.cc @@ -2075,7 +2075,7 @@ void KStore::_kv_sync_thread() } else { dout(20) << __func__ << " committing " << kv_queue.size() << dendl; kv_committing.swap(kv_queue); - utime_t start = ceph_clock_now(NULL); + utime_t start = ceph_clock_now(); l.unlock(); dout(30) << __func__ << " committing txc " << kv_committing << dendl; @@ -2092,7 +2092,7 @@ void KStore::_kv_sync_thread() } int r = db->submit_transaction_sync(t); assert(r == 0); - utime_t finish = ceph_clock_now(NULL); + utime_t finish = ceph_clock_now(); utime_t dur = finish - start; dout(20) << __func__ << " committed " << kv_committing.size() << " in " << dur << dendl; diff --git a/src/os/kstore/KStore.h b/src/os/kstore/KStore.h index dac579d302ff2..b0533b0736451 100644 --- a/src/os/kstore/KStore.h +++ b/src/os/kstore/KStore.h @@ -207,8 +207,8 @@ public: return "???"; } - void log_state_latency(PerfCounters *logger, int state) { - utime_t lat, now = ceph_clock_now(g_ceph_context); + void log_state_latency(PerfCounters *logger, int state) { + utime_t lat, now = ceph_clock_now(); lat = now - start; logger->tinc(state, lat); start = now; @@ -237,7 +237,7 @@ public: oncommit(NULL), onreadable(NULL), onreadable_sync(NULL), - start(ceph_clock_now(g_ceph_context)){ + start(ceph_clock_now()){ //cout << "txc new " << this << std::endl; } ~TransContext() { diff --git a/src/osd/OSD.cc b/src/osd/OSD.cc index 3b62ef3755b80..35c70be4f8888 100644 --- a/src/osd/OSD.cc +++ b/src/osd/OSD.cc @@ -241,7 +241,7 @@ OSDService::OSDService(OSD *osd) : agent_stop_flag(false), agent_timer_lock("OSDService::agent_timer_lock"), agent_timer(osd->client_messenger->cct, agent_timer_lock), - last_recalibrate(ceph_clock_now(NULL)), + last_recalibrate(ceph_clock_now()), promote_max_objects(0), promote_max_bytes(0), objecter(new Objecter(osd->client_messenger->cct, osd->objecter_messenger, osd->monc, NULL, 0, 0)), @@ -612,7 +612,7 @@ void OSDService::agent_stop() void OSDService::promote_throttle_recalibrate() { - utime_t now = ceph_clock_now(NULL); + utime_t now = ceph_clock_now(); double dur = now - last_recalibrate; last_recalibrate = now; unsigned prob = promote_probability_millis; @@ -702,7 +702,7 @@ void OSDService::check_nearfull_warning(const osd_stat_t &osd_stat) Mutex::Locker l(full_status_lock); enum s_names new_state; - time_t now = ceph_clock_gettime(NULL); + time_t now = ceph_clock_gettime(); // We base ratio on kb_avail rather than kb_used because they can // differ significantly e.g. on btrfs volumes with a large number of @@ -1164,10 +1164,10 @@ bool OSDService::prepare_to_stop() osdmap->get_epoch(), true // request ack )); - utime_t now = ceph_clock_now(cct); + utime_t now = ceph_clock_now(); utime_t timeout; timeout.set_from_double(now + cct->_conf->osd_mon_shutdown_timeout); - while ((ceph_clock_now(cct) < timeout) && + while ((ceph_clock_now() < timeout) && (get_state() != STOPPING)) { is_stopping_cond.WaitUntil(is_stopping_lock, timeout); } @@ -1462,7 +1462,7 @@ void OSDService::queue_for_snap_trim(PG *pg) { PGSnapTrim(pg->get_osdmap()->get_epoch()), cct->_conf->osd_snap_trim_cost, cct->_conf->osd_snap_trim_priority, - ceph_clock_now(cct), + ceph_clock_now(), entity_inst_t()))); } @@ -2207,7 +2207,7 @@ int OSD::init() mgrc.set_pgstats_cb([this](){ RWLock::RLocker l(map_lock); - utime_t had_for = ceph_clock_now(cct) - had_map_since; + utime_t had_for = ceph_clock_now() - had_map_since; osd_stat_t cur_stat = service.get_osd_stat(); cur_stat.os_perf_stat = store->get_cur_stats(); @@ -3926,7 +3926,7 @@ void OSD::maybe_update_heartbeat_peers() assert(osd_lock.is_locked()); if (is_waiting_for_healthy()) { - utime_t now = ceph_clock_now(cct); + utime_t now = ceph_clock_now(); if (last_heartbeat_resample == utime_t()) { last_heartbeat_resample = now; heartbeat_set_peers_need_update(); @@ -4151,7 +4151,7 @@ void OSD::handle_osd_ping(MOSDPing *m) i->second.last_rx_front = m->stamp; } - utime_t cutoff = ceph_clock_now(cct); + utime_t cutoff = ceph_clock_now(); cutoff -= cct->_conf->osd_heartbeat_grace; if (i->second.is_healthy(cutoff)) { // Cancel false reports @@ -4209,7 +4209,7 @@ void OSD::heartbeat_entry() utime_t w; w.set_from_double(wait); dout(30) << "heartbeat_entry sleeping for " << wait << dendl; - heartbeat_cond.WaitInterval(cct, heartbeat_lock, w); + heartbeat_cond.WaitInterval(heartbeat_lock, w); if (is_stopping()) return; dout(30) << "heartbeat_entry woke up" << dendl; @@ -4219,7 +4219,7 @@ void OSD::heartbeat_entry() void OSD::heartbeat_check() { assert(heartbeat_lock.is_locked()); - utime_t now = ceph_clock_now(cct); + utime_t now = ceph_clock_now(); // check for heartbeat replies (move me elsewhere?) utime_t cutoff = now; @@ -4285,7 +4285,7 @@ void OSD::heartbeat() dout(5) << "heartbeat: " << service.get_osd_stat() << dendl; - utime_t now = ceph_clock_now(cct); + utime_t now = ceph_clock_now(); // send heartbeats for (map::iterator i = heartbeat_peers.begin(); @@ -4419,7 +4419,7 @@ void OSD::tick_without_osd_lock() // mon report? bool reset = false; bool report = false; - utime_t now = ceph_clock_now(cct); + utime_t now = ceph_clock_now(); pg_stat_queue_lock.Lock(); double backoff = stats_ack_timeout / g_conf->osd_mon_ack_timeout; double adjusted_min = cct->_conf->osd_mon_report_interval_min * backoff; @@ -4676,7 +4676,7 @@ void TestOpsSocketHook::test_ops(OSDService *service, ObjectStore *store, double scrub_max_interval = pool_scrub_max_interval > 0 ? pool_scrub_max_interval : g_conf->osd_scrub_max_interval; // Instead of marking must_scrub force a schedule scrub - utime_t stamp = ceph_clock_now(service->cct); + utime_t stamp = ceph_clock_now(); stamp -= scrub_max_interval; stamp -= 100.0; // push back last scrub more for good measure pg->info.history.last_scrub_stamp = stamp; @@ -4825,7 +4825,7 @@ void OSD::ms_handle_connect(Connection *con) map_lock.get_read(); Mutex::Locker l2(mon_report_lock); - utime_t now = ceph_clock_now(NULL); + utime_t now = ceph_clock_now(); last_mon_report = now; // resend everything, it's a new session @@ -5013,7 +5013,7 @@ bool OSD::_is_healthy() if (is_waiting_for_healthy()) { Mutex::Locker l(heartbeat_lock); - utime_t cutoff = ceph_clock_now(cct); + utime_t cutoff = ceph_clock_now(); cutoff -= cct->_conf->osd_heartbeat_grace; int num = 0, up = 0; for (map::iterator p = heartbeat_peers.begin(); @@ -5222,7 +5222,7 @@ void OSD::send_failures() assert(map_lock.is_locked()); assert(mon_report_lock.is_locked()); Mutex::Locker l(heartbeat_lock); - utime_t now = ceph_clock_now(cct); + utime_t now = ceph_clock_now(); while (!failure_queue.empty()) { int osd = failure_queue.begin()->first; entity_inst_t i = osdmap->get_inst(osd); @@ -5289,7 +5289,7 @@ void OSD::send_pg_stats(const utime_t &now) } if (last_pg_stats_ack == utime_t() || !outstanding_pg_stats.empty()) { - last_pg_stats_ack = ceph_clock_now(cct); + last_pg_stats_ack = ceph_clock_now(); } outstanding_pg_stats.insert(tid); dout(20) << __func__ << " updates pending: " << outstanding_pg_stats << dendl; @@ -5314,7 +5314,7 @@ void OSD::handle_pg_stats_ack(MPGStatsAck *ack) pg_stat_queue_lock.Lock(); - last_pg_stats_ack = ceph_clock_now(cct); + last_pg_stats_ack = ceph_clock_now(); // decay timeout slowly (analogous to TCP) stats_ack_timeout = @@ -5366,7 +5366,7 @@ void OSD::flush_pg_stats() { dout(10) << "flush_pg_stats" << dendl; osd_lock.Unlock(); - utime_t now = ceph_clock_now(cct); + utime_t now = ceph_clock_now(); map_lock.get_read(); mon_report_lock.Lock(); send_pg_stats(now); @@ -5763,7 +5763,7 @@ void OSD::do_command(Connection *con, ceph_tid_t tid, vector& cmd, buffe } } - utime_t start = ceph_clock_now(cct); + utime_t start = ceph_clock_now(); for (int64_t pos = 0; pos < count; pos += bsize) { char nm[30]; unsigned offset = 0; @@ -5788,7 +5788,7 @@ void OSD::do_command(Connection *con, ceph_tid_t tid, vector& cmd, buffe waiter.wait(); } } - utime_t end = ceph_clock_now(cct); + utime_t end = ceph_clock_now(); // clean up store->queue_transaction(osr.get(), std::move(cleanupt), NULL); @@ -6620,7 +6620,7 @@ void OSD::sched_scrub() return; } - utime_t now = ceph_clock_now(cct); + utime_t now = ceph_clock_now(); bool time_permit = scrub_time_permit(now); bool load_is_low = scrub_load_below_threshold(); dout(20) << "sched_scrub load_is_low=" << (int)load_is_low << dendl; @@ -7076,7 +7076,7 @@ void OSD::_committed_osd_maps(epoch_t first, epoch_t last, MOSDMap *m) } } - had_map_since = ceph_clock_now(cct); + had_map_since = ceph_clock_now(); epoch_t _bind_epoch = service.get_bind_epoch(); if (osdmap->is_up(whoami) && @@ -7151,7 +7151,7 @@ void OSD::_committed_osd_maps(epoch_t first, epoch_t last, MOSDMap *m) do_restart = true; //add markdown log - utime_t now = ceph_clock_now(g_ceph_context); + utime_t now = ceph_clock_now(); utime_t grace = utime_t(g_conf->osd_max_markdown_period, 0); osd_markdown_log.push_back(now); //clear all out-of-date log @@ -8454,7 +8454,7 @@ void OSD::check_replay_queue() { assert(osd_lock.is_locked()); - utime_t now = ceph_clock_now(cct); + utime_t now = ceph_clock_now(); list< pair > pgids; replay_queue_lock.Lock(); while (!replay_queue.empty() && @@ -8514,7 +8514,7 @@ bool OSDService::_recover_now(uint64_t *available_pushes) if (available_pushes) *available_pushes = max - recovery_ops_active - recovery_ops_reserved; - if (ceph_clock_now(cct) < defer_recovery_until) { + if (ceph_clock_now() < defer_recovery_until) { dout(15) << "_recover_now defer until " << defer_recovery_until << dendl; return false; } @@ -8870,7 +8870,7 @@ bool OSD::op_is_discardable(MOSDOp *op) void OSD::enqueue_op(PGRef pg, OpRequestRef& op) { - utime_t latency = ceph_clock_now(cct) - op->get_req()->get_recv_stamp(); + utime_t latency = ceph_clock_now() - op->get_req()->get_recv_stamp(); dout(15) << "enqueue_op " << op << " prio " << op->get_req()->get_priority() << " cost " << op->get_req()->get_cost() << " latency " << latency @@ -8890,7 +8890,7 @@ void OSD::ShardedOpWQ::_process(uint32_t thread_index, heartbeat_handle_d *hb ) osd->cct->get_heartbeat_map()->reset_timeout(hb, osd->cct->_conf->threadpool_default_timeout, 0); sdata->sdata_lock.Lock(); - sdata->sdata_cond.WaitInterval(osd->cct, sdata->sdata_lock, + sdata->sdata_cond.WaitInterval(sdata->sdata_lock, utime_t(osd->cct->_conf->threadpool_empty_queue_max_wait, 0)); sdata->sdata_lock.Unlock(); sdata->sdata_op_ordering_lock.Lock(); @@ -9022,7 +9022,7 @@ void OSD::dequeue_op( PGRef pg, OpRequestRef op, ThreadPool::TPHandle &handle) { - utime_t now = ceph_clock_now(cct); + utime_t now = ceph_clock_now(); op->set_dequeued_time(now); utime_t latency = now - op->get_req()->get_recv_stamp(); dout(10) << "dequeue_op " << op << " prio " << op->get_req()->get_priority() diff --git a/src/osd/OSD.h b/src/osd/OSD.h index c1850ee813f1a..d5c19ff3b4574 100644 --- a/src/osd/OSD.h +++ b/src/osd/OSD.h @@ -898,7 +898,7 @@ public: PGScrub(pg->get_osdmap()->get_epoch()), cct->_conf->osd_scrub_cost, pg->get_scrub_priority(), - ceph_clock_now(cct), + ceph_clock_now(), entity_inst_t()))); } @@ -925,7 +925,7 @@ private: PGRecovery(p.first, reserved_pushes), cct->_conf->osd_recovery_cost, cct->_conf->osd_recovery_priority, - ceph_clock_now(cct), + ceph_clock_now(), entity_inst_t())); op_wq.queue(to_queue); } @@ -940,7 +940,7 @@ public: _maybe_queue_recovery(); } void defer_recovery(float defer_for) { - defer_recovery_until = ceph_clock_now(cct); + defer_recovery_until = ceph_clock_now(); defer_recovery_until += defer_for; } void pause_recovery() { diff --git a/src/osd/OSDMap.cc b/src/osd/OSDMap.cc index e2f0f5c1237dd..3bce6697b42cf 100644 --- a/src/osd/OSDMap.cc +++ b/src/osd/OSDMap.cc @@ -2649,7 +2649,7 @@ int OSDMap::build_simple(CephContext *cct, epoch_t e, uuid_d &fsid, << dendl; epoch = e; set_fsid(fsid); - created = modified = ceph_clock_now(cct); + created = modified = ceph_clock_now(); if (nosd >= 0) { set_max_osd(nosd); diff --git a/src/osd/PG.cc b/src/osd/PG.cc index 2e03e10be0e72..6d81e83c68b0d 100644 --- a/src/osd/PG.cc +++ b/src/osd/PG.cc @@ -1558,7 +1558,7 @@ void PG::activate(ObjectStore::Transaction& t, is_primary() && pool.info.crash_replay_interval > 0 && may_need_replay(get_osdmap())) { - replay_until = ceph_clock_now(cct); + replay_until = ceph_clock_now(); replay_until += pool.info.crash_replay_interval; dout(10) << "activate starting replay interval for " << pool.info.crash_replay_interval << " until " << replay_until << dendl; @@ -2654,7 +2654,7 @@ void PG::publish_stats_to_osd() else state_clear(PG_STATE_INCONSISTENT); - utime_t now = ceph_clock_now(cct); + utime_t now = ceph_clock_now(); if (info.stats.state != state) { info.stats.last_change = now; if ((state & PG_STATE_ACTIVE) && @@ -3424,7 +3424,7 @@ bool PG::sched_scrub() if (deep_scrub_interval <= 0) { deep_scrub_interval = cct->_conf->osd_deep_scrub_interval; } - bool time_for_deep = ceph_clock_now(cct) >= + bool time_for_deep = ceph_clock_now() >= info.history.last_deep_scrub_stamp + deep_scrub_interval; bool deep_coin_flip = false; @@ -3535,7 +3535,7 @@ void PG::reg_next_scrub() utime_t reg_stamp; if (scrubber.must_scrub || (info.stats.stats_invalid && g_conf->osd_scrub_invalid_stats)) { - reg_stamp = ceph_clock_now(cct); + reg_stamp = ceph_clock_now(); } else { reg_stamp = info.history.last_scrub_stamp; } @@ -4635,7 +4635,7 @@ void PG::scrub_finish() // finish up unreg_next_scrub(); - utime_t now = ceph_clock_now(cct); + utime_t now = ceph_clock_now(); info.history.last_scrub = info.last_update; info.history.last_scrub_stamp = now; if (scrubber.deep) { @@ -5850,7 +5850,7 @@ void PG::RecoveryState::Initial::exit() { context< RecoveryMachine >().log_exit(state_name, enter_time); PG *pg = context< RecoveryMachine >().pg; - utime_t dur = ceph_clock_now(pg->cct) - enter_time; + utime_t dur = ceph_clock_now() - enter_time; pg->osd->recoverystate_perf->tinc(rs_initial_latency, dur); } @@ -5913,7 +5913,7 @@ void PG::RecoveryState::Started::exit() { context< RecoveryMachine >().log_exit(state_name, enter_time); PG *pg = context< RecoveryMachine >().pg; - utime_t dur = ceph_clock_now(pg->cct) - enter_time; + utime_t dur = ceph_clock_now() - enter_time; pg->osd->recoverystate_perf->tinc(rs_started_latency, dur); } @@ -6008,7 +6008,7 @@ void PG::RecoveryState::Reset::exit() { context< RecoveryMachine >().log_exit(state_name, enter_time); PG *pg = context< RecoveryMachine >().pg; - utime_t dur = ceph_clock_now(pg->cct) - enter_time; + utime_t dur = ceph_clock_now() - enter_time; pg->osd->recoverystate_perf->tinc(rs_reset_latency, dur); } @@ -6033,7 +6033,7 @@ void PG::RecoveryState::Start::exit() { context< RecoveryMachine >().log_exit(state_name, enter_time); PG *pg = context< RecoveryMachine >().pg; - utime_t dur = ceph_clock_now(pg->cct) - enter_time; + utime_t dur = ceph_clock_now() - enter_time; pg->osd->recoverystate_perf->tinc(rs_start_latency, dur); } @@ -6095,7 +6095,7 @@ void PG::RecoveryState::Primary::exit() context< RecoveryMachine >().log_exit(state_name, enter_time); PG *pg = context< RecoveryMachine >().pg; pg->want_acting.clear(); - utime_t dur = ceph_clock_now(pg->cct) - enter_time; + utime_t dur = ceph_clock_now() - enter_time; pg->osd->recoverystate_perf->tinc(rs_primary_latency, dur); pg->clear_primary_state(); pg->state_clear(PG_STATE_CREATING); @@ -6198,7 +6198,7 @@ void PG::RecoveryState::Peering::exit() pg->state_clear(PG_STATE_PEERING); pg->clear_probe_targets(); - utime_t dur = ceph_clock_now(pg->cct) - enter_time; + utime_t dur = ceph_clock_now() - enter_time; pg->osd->recoverystate_perf->tinc(rs_peering_latency, dur); } @@ -6254,7 +6254,7 @@ void PG::RecoveryState::Backfilling::exit() pg->backfill_reserved = false; pg->backfill_reserving = false; pg->state_clear(PG_STATE_BACKFILL); - utime_t dur = ceph_clock_now(pg->cct) - enter_time; + utime_t dur = ceph_clock_now() - enter_time; pg->osd->recoverystate_perf->tinc(rs_backfilling_latency, dur); } @@ -6301,7 +6301,7 @@ void PG::RecoveryState::WaitRemoteBackfillReserved::exit() { context< RecoveryMachine >().log_exit(state_name, enter_time); PG *pg = context< RecoveryMachine >().pg; - utime_t dur = ceph_clock_now(pg->cct) - enter_time; + utime_t dur = ceph_clock_now() - enter_time; pg->osd->recoverystate_perf->tinc(rs_waitremotebackfillreserved_latency, dur); } @@ -6359,7 +6359,7 @@ void PG::RecoveryState::WaitLocalBackfillReserved::exit() { context< RecoveryMachine >().log_exit(state_name, enter_time); PG *pg = context< RecoveryMachine >().pg; - utime_t dur = ceph_clock_now(pg->cct) - enter_time; + utime_t dur = ceph_clock_now() - enter_time; pg->osd->recoverystate_perf->tinc(rs_waitlocalbackfillreserved_latency, dur); } @@ -6387,7 +6387,7 @@ void PG::RecoveryState::NotBackfilling::exit() { context< RecoveryMachine >().log_exit(state_name, enter_time); PG *pg = context< RecoveryMachine >().pg; - utime_t dur = ceph_clock_now(pg->cct) - enter_time; + utime_t dur = ceph_clock_now() - enter_time; pg->osd->recoverystate_perf->tinc(rs_notbackfilling_latency, dur); } @@ -6403,7 +6403,7 @@ void PG::RecoveryState::RepNotRecovering::exit() { context< RecoveryMachine >().log_exit(state_name, enter_time); PG *pg = context< RecoveryMachine >().pg; - utime_t dur = ceph_clock_now(pg->cct) - enter_time; + utime_t dur = ceph_clock_now() - enter_time; pg->osd->recoverystate_perf->tinc(rs_repnotrecovering_latency, dur); } @@ -6441,7 +6441,7 @@ void PG::RecoveryState::RepWaitRecoveryReserved::exit() { context< RecoveryMachine >().log_exit(state_name, enter_time); PG *pg = context< RecoveryMachine >().pg; - utime_t dur = ceph_clock_now(pg->cct) - enter_time; + utime_t dur = ceph_clock_now() - enter_time; pg->osd->recoverystate_perf->tinc(rs_repwaitrecoveryreserved_latency, dur); } @@ -6483,7 +6483,7 @@ void PG::RecoveryState::RepWaitBackfillReserved::exit() { context< RecoveryMachine >().log_exit(state_name, enter_time); PG *pg = context< RecoveryMachine >().pg; - utime_t dur = ceph_clock_now(pg->cct) - enter_time; + utime_t dur = ceph_clock_now() - enter_time; pg->osd->recoverystate_perf->tinc(rs_repwaitbackfillreserved_latency, dur); } @@ -6549,7 +6549,7 @@ void PG::RecoveryState::RepRecovering::exit() context< RecoveryMachine >().log_exit(state_name, enter_time); PG *pg = context< RecoveryMachine >().pg; pg->osd->remote_reserver.cancel_reservation(pg->info.pgid); - utime_t dur = ceph_clock_now(pg->cct) - enter_time; + utime_t dur = ceph_clock_now() - enter_time; pg->osd->recoverystate_perf->tinc(rs_reprecovering_latency, dur); } @@ -6565,7 +6565,7 @@ void PG::RecoveryState::Activating::exit() { context< RecoveryMachine >().log_exit(state_name, enter_time); PG *pg = context< RecoveryMachine >().pg; - utime_t dur = ceph_clock_now(pg->cct) - enter_time; + utime_t dur = ceph_clock_now() - enter_time; pg->osd->recoverystate_perf->tinc(rs_activating_latency, dur); } @@ -6588,7 +6588,7 @@ void PG::RecoveryState::WaitLocalRecoveryReserved::exit() { context< RecoveryMachine >().log_exit(state_name, enter_time); PG *pg = context< RecoveryMachine >().pg; - utime_t dur = ceph_clock_now(pg->cct) - enter_time; + utime_t dur = ceph_clock_now() - enter_time; pg->osd->recoverystate_perf->tinc(rs_waitlocalrecoveryreserved_latency, dur); } @@ -6628,7 +6628,7 @@ void PG::RecoveryState::WaitRemoteRecoveryReserved::exit() { context< RecoveryMachine >().log_exit(state_name, enter_time); PG *pg = context< RecoveryMachine >().pg; - utime_t dur = ceph_clock_now(pg->cct) - enter_time; + utime_t dur = ceph_clock_now() - enter_time; pg->osd->recoverystate_perf->tinc(rs_waitremoterecoveryreserved_latency, dur); } @@ -6691,7 +6691,7 @@ void PG::RecoveryState::Recovering::exit() { context< RecoveryMachine >().log_exit(state_name, enter_time); PG *pg = context< RecoveryMachine >().pg; - utime_t dur = ceph_clock_now(pg->cct) - enter_time; + utime_t dur = ceph_clock_now() - enter_time; pg->osd->recoverystate_perf->tinc(rs_recovering_latency, dur); } @@ -6731,7 +6731,7 @@ void PG::RecoveryState::Recovered::exit() { context< RecoveryMachine >().log_exit(state_name, enter_time); PG *pg = context< RecoveryMachine >().pg; - utime_t dur = ceph_clock_now(pg->cct) - enter_time; + utime_t dur = ceph_clock_now() - enter_time; pg->osd->recoverystate_perf->tinc(rs_recovered_latency, dur); } @@ -6759,7 +6759,7 @@ void PG::RecoveryState::Clean::exit() context< RecoveryMachine >().log_exit(state_name, enter_time); PG *pg = context< RecoveryMachine >().pg; pg->state_clear(PG_STATE_CLEAN); - utime_t dur = ceph_clock_now(pg->cct) - enter_time; + utime_t dur = ceph_clock_now() - enter_time; pg->osd->recoverystate_perf->tinc(rs_clean_latency, dur); } @@ -7089,7 +7089,7 @@ void PG::RecoveryState::Active::exit() pg->state_clear(PG_STATE_BACKFILL_WAIT); pg->state_clear(PG_STATE_RECOVERY_WAIT); pg->state_clear(PG_STATE_REPLAY); - utime_t dur = ceph_clock_now(pg->cct) - enter_time; + utime_t dur = ceph_clock_now() - enter_time; pg->osd->recoverystate_perf->tinc(rs_active_latency, dur); pg->agent_stop(); } @@ -7182,7 +7182,7 @@ void PG::RecoveryState::ReplicaActive::exit() context< RecoveryMachine >().log_exit(state_name, enter_time); PG *pg = context< RecoveryMachine >().pg; pg->osd->remote_reserver.cancel_reservation(pg->info.pgid); - utime_t dur = ceph_clock_now(pg->cct) - enter_time; + utime_t dur = ceph_clock_now() - enter_time; pg->osd->recoverystate_perf->tinc(rs_replicaactive_latency, dur); } @@ -7294,7 +7294,7 @@ void PG::RecoveryState::Stray::exit() { context< RecoveryMachine >().log_exit(state_name, enter_time); PG *pg = context< RecoveryMachine >().pg; - utime_t dur = ceph_clock_now(pg->cct) - enter_time; + utime_t dur = ceph_clock_now() - enter_time; pg->osd->recoverystate_perf->tinc(rs_stray_latency, dur); } @@ -7493,7 +7493,7 @@ void PG::RecoveryState::GetInfo::exit() { context< RecoveryMachine >().log_exit(state_name, enter_time); PG *pg = context< RecoveryMachine >().pg; - utime_t dur = ceph_clock_now(pg->cct) - enter_time; + utime_t dur = ceph_clock_now() - enter_time; pg->osd->recoverystate_perf->tinc(rs_getinfo_latency, dur); pg->blocked_by.clear(); pg->publish_stats_to_osd(); @@ -7625,7 +7625,7 @@ void PG::RecoveryState::GetLog::exit() { context< RecoveryMachine >().log_exit(state_name, enter_time); PG *pg = context< RecoveryMachine >().pg; - utime_t dur = ceph_clock_now(pg->cct) - enter_time; + utime_t dur = ceph_clock_now() - enter_time; pg->osd->recoverystate_perf->tinc(rs_getlog_latency, dur); pg->blocked_by.clear(); pg->publish_stats_to_osd(); @@ -7687,7 +7687,7 @@ void PG::RecoveryState::WaitActingChange::exit() { context< RecoveryMachine >().log_exit(state_name, enter_time); PG *pg = context< RecoveryMachine >().pg; - utime_t dur = ceph_clock_now(pg->cct) - enter_time; + utime_t dur = ceph_clock_now() - enter_time; pg->osd->recoverystate_perf->tinc(rs_waitactingchange_latency, dur); } @@ -7714,7 +7714,7 @@ void PG::RecoveryState::Down::exit() PG *pg = context< RecoveryMachine >().pg; pg->state_clear(PG_STATE_DOWN); - utime_t dur = ceph_clock_now(pg->cct) - enter_time; + utime_t dur = ceph_clock_now() - enter_time; pg->osd->recoverystate_perf->tinc(rs_down_latency, dur); pg->blocked_by.clear(); @@ -7796,7 +7796,7 @@ void PG::RecoveryState::Incomplete::exit() PG *pg = context< RecoveryMachine >().pg; pg->state_clear(PG_STATE_INCOMPLETE); - utime_t dur = ceph_clock_now(pg->cct) - enter_time; + utime_t dur = ceph_clock_now() - enter_time; pg->osd->recoverystate_perf->tinc(rs_incomplete_latency, dur); pg->blocked_by.clear(); @@ -7935,7 +7935,7 @@ void PG::RecoveryState::GetMissing::exit() { context< RecoveryMachine >().log_exit(state_name, enter_time); PG *pg = context< RecoveryMachine >().pg; - utime_t dur = ceph_clock_now(pg->cct) - enter_time; + utime_t dur = ceph_clock_now() - enter_time; pg->osd->recoverystate_perf->tinc(rs_getmissing_latency, dur); pg->blocked_by.clear(); pg->publish_stats_to_osd(); @@ -7981,7 +7981,7 @@ void PG::RecoveryState::WaitUpThru::exit() { context< RecoveryMachine >().log_exit(state_name, enter_time); PG *pg = context< RecoveryMachine >().pg; - utime_t dur = ceph_clock_now(pg->cct) - enter_time; + utime_t dur = ceph_clock_now() - enter_time; pg->osd->recoverystate_perf->tinc(rs_waitupthru_latency, dur); } @@ -7997,9 +7997,9 @@ void PG::RecoveryState::RecoveryMachine::log_enter(const char *state_name) void PG::RecoveryState::RecoveryMachine::log_exit(const char *state_name, utime_t enter_time) { - utime_t dur = ceph_clock_now(pg->cct) - enter_time; + utime_t dur = ceph_clock_now() - enter_time; dout(5) << "exit " << state_name << " " << dur << " " << event_count << " " << event_time << dendl; - pg->osd->pg_recovery_stats.log_exit(state_name, ceph_clock_now(pg->cct) - enter_time, + pg->osd->pg_recovery_stats.log_exit(state_name, ceph_clock_now() - enter_time, event_count, event_time); event_count = 0; event_time = utime_t(); @@ -8224,7 +8224,7 @@ void PG::RecoveryState::start_handle(RecoveryCtx *new_ctx) { } else { rctx = *new_ctx; } - rctx->start_time = ceph_clock_now(pg->cct); + rctx->start_time = ceph_clock_now(); } } @@ -8254,7 +8254,7 @@ void PG::RecoveryState::end_block_outgoing() { void PG::RecoveryState::end_handle() { if (rctx) { - utime_t dur = ceph_clock_now(pg->cct) - rctx->start_time; + utime_t dur = ceph_clock_now() - rctx->start_time; machine.event_time += dur; } diff --git a/src/osd/PG.h b/src/osd/PG.h index 5bb439f01f44b..06e2603067f38 100644 --- a/src/osd/PG.h +++ b/src/osd/PG.h @@ -670,7 +670,7 @@ public: const char *get_state_name() { return state_name; } NamedState(CephContext *cct_, const char *state_name_) : state_name(state_name_), - enter_time(ceph_clock_now(cct_)) {} + enter_time(ceph_clock_now()) {} virtual ~NamedState() {} }; diff --git a/src/osd/PGBackend.cc b/src/osd/PGBackend.cc index f574d21f4f8da..199d4ba3a8ea4 100644 --- a/src/osd/PGBackend.cc +++ b/src/osd/PGBackend.cc @@ -729,7 +729,7 @@ void PGBackend::be_compare_scrubmaps( map::const_iterator i; map::const_iterator j; set master_set; - utime_t now = ceph_clock_now(NULL); + utime_t now = ceph_clock_now(); // Construct master set for (j = maps.begin(); j != maps.end(); ++j) { diff --git a/src/osd/PrimaryLogPG.cc b/src/osd/PrimaryLogPG.cc index 05b450ec9d6c3..a1b4cc3e5f922 100644 --- a/src/osd/PrimaryLogPG.cc +++ b/src/osd/PrimaryLogPG.cc @@ -2307,7 +2307,7 @@ void PrimaryLogPG::do_op(OpRequestRef& op) ctx->src_obc.swap(src_obc); execute_ctx(ctx); - utime_t prepare_latency = ceph_clock_now(cct); + utime_t prepare_latency = ceph_clock_now(); prepare_latency -= op->get_dequeued_time(); osd->logger->tinc(l_osd_op_prepare_lat, prepare_latency); if (op->may_read() && op->may_write()) { @@ -2661,7 +2661,7 @@ struct C_ProxyRead : public Context { C_ProxyRead(PrimaryLogPG *p, hobject_t o, epoch_t lpr, const PrimaryLogPG::ProxyReadOpRef& prd) : pg(p), oid(o), last_peering_reset(lpr), - tid(0), prdop(prd), start(ceph_clock_now(NULL)) + tid(0), prdop(prd), start(ceph_clock_now()) {} void finish(int r) { if (prdop->canceled) @@ -2673,7 +2673,7 @@ struct C_ProxyRead : public Context { } if (last_peering_reset == pg->get_last_peering_reset()) { pg->finish_proxy_read(oid, tid, r); - pg->osd->logger->tinc(l_osd_tier_r_lat, ceph_clock_now(NULL) - start); + pg->osd->logger->tinc(l_osd_tier_r_lat, ceph_clock_now() - start); } pg->unlock(); } @@ -3004,13 +3004,13 @@ public: PromoteCallback(ObjectContextRef obc_, PrimaryLogPG *pg_) : obc(obc_), pg(pg_), - start(ceph_clock_now(NULL)) {} + start(ceph_clock_now()) {} virtual void finish(PrimaryLogPG::CopyCallbackResults results) { PrimaryLogPG::CopyResults *results_data = results.get<1>(); int r = results.get<0>(); pg->finish_promote(r, results_data, obc); - pg->osd->logger->tinc(l_osd_tier_promote_lat, ceph_clock_now(NULL) - start); + pg->osd->logger->tinc(l_osd_tier_promote_lat, ceph_clock_now() - start); } }; @@ -3282,7 +3282,7 @@ void PrimaryLogPG::execute_ctx(OpContext *ctx) // _prior_ to being committed; it will not get set with // writeahead journaling, for instance. if (ctx->readable_stamp == utime_t()) - ctx->readable_stamp = ceph_clock_now(cct); + ctx->readable_stamp = ceph_clock_now(); }); ctx->register_on_commit( [m, ctx, this](){ @@ -3348,7 +3348,7 @@ void PrimaryLogPG::log_op_stats(OpContext *ctx) OpRequestRef op = ctx->op; MOSDOp *m = static_cast(op->get_req()); - utime_t now = ceph_clock_now(cct); + utime_t now = ceph_clock_now(); utime_t latency = now; latency -= ctx->op->get_req()->get_recv_stamp(); utime_t process_latency = now; @@ -5540,7 +5540,7 @@ int PrimaryLogPG::do_osd_ops(OpContext *ctx, vector& ops) break; } dout(10) << " found existing watch " << w << " by " << entity << dendl; - p->second->got_ping(ceph_clock_now(NULL)); + p->second->got_ping(ceph_clock_now()); result = 0; } else if (op.watch.op == CEPH_OSD_WATCH_OP_UNWATCH) { map, watch_info_t>::iterator oi_iter = @@ -6833,7 +6833,7 @@ void PrimaryLogPG::finish_ctx(OpContext *ctx, int log_op_type, bool maintain_ssc dout(20) << __func__ << " " << soid << " " << ctx << " op " << pg_log_entry_t::get_op_name(log_op_type) << dendl; - utime_t now = ceph_clock_now(cct); + utime_t now = ceph_clock_now(); // snapset bufferlist bss; @@ -7991,7 +7991,7 @@ struct C_Flush : public Context { utime_t start; C_Flush(PrimaryLogPG *p, hobject_t o, epoch_t lpr) : pg(p), oid(o), last_peering_reset(lpr), - tid(0), start(ceph_clock_now(NULL)) + tid(0), start(ceph_clock_now()) {} void finish(int r) { if (r == -ECANCELED) @@ -7999,7 +7999,7 @@ struct C_Flush : public Context { pg->lock(); if (last_peering_reset == pg->get_last_peering_reset()) { pg->finish_flush(oid, tid, r); - pg->osd->logger->tinc(l_osd_tier_flush_lat, ceph_clock_now(NULL) - start); + pg->osd->logger->tinc(l_osd_tier_flush_lat, ceph_clock_now() - start); } pg->unlock(); } @@ -8674,7 +8674,7 @@ PrimaryLogPG::RepGather *PrimaryLogPG::new_repop( RepGather *repop = new RepGather( ctx, rep_tid, info.last_complete, false); - repop->start = ceph_clock_now(cct); + repop->start = ceph_clock_now(); repop_queue.push_back(&repop->queue_item); repop->get(); @@ -8702,7 +8702,7 @@ boost::intrusive_ptr PrimaryLogPG::new_repop( r); repop->v = version; - repop->start = ceph_clock_now(cct); + repop->start = ceph_clock_now(); repop_queue.push_back(&repop->queue_item); @@ -8737,7 +8737,7 @@ PrimaryLogPG::OpContextUPtr PrimaryLogPG::simple_opc_create(ObjectContextRef obc osd_reqid_t reqid(osd->get_cluster_msgr_name(), 0, rep_tid); OpContextUPtr ctx(new OpContext(OpRequestRef(), reqid, ops, obc, this)); ctx->op_t.reset(new PGTransaction()); - ctx->mtime = ceph_clock_now(g_ceph_context); + ctx->mtime = ceph_clock_now(); return ctx; } @@ -9923,7 +9923,7 @@ void PrimaryLogPG::mark_all_unfound_lost( mempool::osd::list log_entries; - utime_t mtime = ceph_clock_now(cct); + utime_t mtime = ceph_clock_now(); map::const_iterator m = missing_loc.get_needs_recovery().begin(); map::const_iterator mend = @@ -11681,7 +11681,7 @@ void PrimaryLogPG::hit_set_remove_all() OpContextUPtr ctx = simple_opc_create(obc); ctx->at_version = get_next_version(); ctx->updated_hset_history = info.hit_set; - utime_t now = ceph_clock_now(cct); + utime_t now = ceph_clock_now(); ctx->mtime = now; hit_set_trim(ctx, 0); simple_opc_submit(std::move(ctx)); @@ -11695,7 +11695,7 @@ void PrimaryLogPG::hit_set_remove_all() void PrimaryLogPG::hit_set_create() { - utime_t now = ceph_clock_now(NULL); + utime_t now = ceph_clock_now(); // make a copy of the params to modify HitSet::Params params(pool.info.hit_set_params); @@ -11770,7 +11770,7 @@ void PrimaryLogPG::hit_set_persist() bufferlist bl; unsigned max = pool.info.hit_set_count; - utime_t now = ceph_clock_now(cct); + utime_t now = ceph_clock_now(); hobject_t oid; time_t flush_time = 0; @@ -12226,7 +12226,7 @@ bool PrimaryLogPG::agent_maybe_flush(ObjectContextRef& obc) return false; } - utime_t now = ceph_clock_now(NULL); + utime_t now = ceph_clock_now(); utime_t ob_local_mtime; if (obc->obs.oi.local_mtime != utime_t()) { ob_local_mtime = obc->obs.oi.local_mtime; @@ -12306,7 +12306,7 @@ bool PrimaryLogPG::agent_maybe_evict(ObjectContextRef& obc, bool after_flush) if (agent_state->evict_mode != TierAgentState::EVICT_MODE_FULL) { // is this object old than cache_min_evict_age? - utime_t now = ceph_clock_now(NULL); + utime_t now = ceph_clock_now(); utime_t ob_local_mtime; if (obc->obs.oi.local_mtime != utime_t()) { ob_local_mtime = obc->obs.oi.local_mtime; diff --git a/src/osd/ReplicatedBackend.cc b/src/osd/ReplicatedBackend.cc index c051db8828efa..81c0c99365bcb 100644 --- a/src/osd/ReplicatedBackend.cc +++ b/src/osd/ReplicatedBackend.cc @@ -82,7 +82,7 @@ static void log_subop_stats( PerfCounters *logger, OpRequestRef op, int subop) { - utime_t now = ceph_clock_now(g_ceph_context); + utime_t now = ceph_clock_now(); utime_t latency = now; latency -= op->get_req()->get_recv_stamp(); diff --git a/src/osd/Watch.cc b/src/osd/Watch.cc index ad9877d99de30..4090d1c56492e 100644 --- a/src/osd/Watch.cc +++ b/src/osd/Watch.cc @@ -377,7 +377,7 @@ void Watch::connect(ConnectionRef con, bool _will_ping) } } if (will_ping) { - last_ping = ceph_clock_now(NULL); + last_ping = ceph_clock_now(); register_cb(); } else { unregister_cb(); @@ -449,7 +449,7 @@ void Watch::start_notify(NotifyRef notif) assert(in_progress_notifies.find(notif->notify_id) == in_progress_notifies.end()); if (will_ping) { - utime_t cutoff = ceph_clock_now(NULL); + utime_t cutoff = ceph_clock_now(); cutoff.sec_ref() -= timeout; if (last_ping < cutoff) { dout(10) << __func__ << " " << notif->notify_id diff --git a/src/osdc/Journaler.cc b/src/osdc/Journaler.cc index 781ca1c7aa947..4b517cc34ce6e 100644 --- a/src/osdc/Journaler.cc +++ b/src/osdc/Journaler.cc @@ -439,7 +439,7 @@ void Journaler::_write_head(Context *oncommit) assert(last_written.write_pos >= last_written.expire_pos); assert(last_written.expire_pos >= last_written.trimmed_pos); - last_wrote_head = ceph::real_clock::now(cct); + last_wrote_head = ceph::real_clock::now(); bufferlist bl; ::encode(last_written, bl); @@ -447,7 +447,7 @@ void Journaler::_write_head(Context *oncommit) object_t oid = file_object_t(ino, 0); object_locator_t oloc(pg_pool); - objecter->write_full(oid, oloc, snapc, bl, ceph::real_clock::now(cct), 0, + objecter->write_full(oid, oloc, snapc, bl, ceph::real_clock::now(), 0, NULL, wrap_finisher(new C_WriteHead( this, last_written, wrap_finisher(oncommit))), @@ -505,7 +505,7 @@ void Journaler::_finish_flush(int r, uint64_t start, ceph::real_time stamp) // calc latency? if (logger) { - ceph::timespan lat = ceph::real_clock::now(cct) - stamp; + ceph::timespan lat = ceph::real_clock::now() - stamp; logger->tinc(logger_key_lat, lat); } @@ -632,7 +632,7 @@ void Journaler::_do_flush(unsigned amount) // submit write for anything pending // flush _start_ pos to _finish_flush - ceph::real_time now = ceph::real_clock::now(cct); + ceph::real_time now = ceph::real_clock::now(); SnapContext snapc; Context *onsafe = new C_Flush(this, flush_pos, now); // on COMMIT @@ -648,7 +648,7 @@ void Journaler::_do_flush(unsigned amount) } filer.write(ino, &layout, snapc, - flush_pos, len, write_bl, ceph::real_clock::now(cct), + flush_pos, len, write_bl, ceph::real_clock::now(), 0, NULL, wrap_finisher(onsafe), write_iohint); @@ -736,7 +736,7 @@ void Journaler::_flush(C_OnFinisher *onsafe) // write head? if (last_wrote_head + seconds(cct->_conf->journaler_write_head_interval) - < ceph::real_clock::now(cct)) { + < ceph::real_clock::now()) { _write_head(); } } @@ -791,7 +791,7 @@ void Journaler::_issue_prezero() Context *c = wrap_finisher(new C_Journaler_Prezero(this, prezeroing_pos, len)); filer.zero(ino, &layout, snapc, prezeroing_pos, len, - ceph::real_clock::now(cct), 0, NULL, c); + ceph::real_clock::now(), 0, NULL, c); prezeroing_pos += len; } } @@ -1126,7 +1126,7 @@ void Journaler::erase(Context *completion) uint64_t first = trimmed_pos / get_layout_period(); uint64_t num = (write_pos - trimmed_pos) / get_layout_period() + 2; filer.purge_range(ino, &layout, SnapContext(), first, num, - ceph::real_clock::now(cct), 0, + ceph::real_clock::now(), 0, wrap_finisher(new C_EraseFinish( this, wrap_finisher(completion)))); @@ -1142,7 +1142,8 @@ void Journaler::_finish_erase(int data_result, C_OnFinisher *completion) if (data_result == 0) { // Async delete the journal header - filer.purge_range(ino, &layout, SnapContext(), 0, 1, ceph::real_clock::now(cct), + filer.purge_range(ino, &layout, SnapContext(), 0, 1, + ceph::real_clock::now(), 0, wrap_finisher(completion)); } else { lderr(cct) << "Failed to delete journal " << ino << " data: " @@ -1273,7 +1274,7 @@ void Journaler::_trim() uint64_t num = (trim_to - trimming_pos) / period; SnapContext snapc; filer.purge_range(ino, &layout, snapc, first, num, - ceph::real_clock::now(cct), 0, + ceph::real_clock::now(), 0, wrap_finisher(new C_Trim(this, trim_to))); trimming_pos = trim_to; } diff --git a/src/osdc/ObjectCacher.cc b/src/osdc/ObjectCacher.cc index e544a0ec5bbf1..849bb635f25f4 100644 --- a/src/osdc/ObjectCacher.cc +++ b/src/osdc/ObjectCacher.cc @@ -1850,7 +1850,7 @@ void ObjectCacher::flusher_entry() if (flusher_stop) break; - flusher_cond.WaitInterval(cct, lock, seconds(1)); + flusher_cond.WaitInterval(lock, seconds(1)); } /* Wait for reads to finish. This is only possible if handling diff --git a/src/rgw/librgw.cc b/src/rgw/librgw.cc index 9ddf99955f82d..89f71ee2565f0 100644 --- a/src/rgw/librgw.cc +++ b/src/rgw/librgw.cc @@ -120,7 +120,7 @@ namespace rgw { // XXX move RGWLibIO and timing setup into process_request #if 0 /* XXX */ - utime_t tm = ceph_clock_now(NULL); + utime_t tm = ceph_clock_now(); #endif RGWLibIO io_ctx; @@ -139,7 +139,7 @@ namespace rgw { // XXX move RGWLibIO and timing setup into process_request #if 0 /* XXX */ - utime_t tm = ceph_clock_now(NULL); + utime_t tm = ceph_clock_now(); #endif RGWLibIO io_ctx; diff --git a/src/rgw/rgw_admin.cc b/src/rgw/rgw_admin.cc index ede5781dd9095..36c8033a8d3d6 100644 --- a/src/rgw/rgw_admin.cc +++ b/src/rgw/rgw_admin.cc @@ -6487,7 +6487,7 @@ next: cerr << "ERROR: marker was not specified" <_conf->rgw_data_log_window * 3 / 4; lock.Lock(); - cond.WaitInterval(cct, lock, utime_t(interval, 0)); + cond.WaitInterval(lock, utime_t(interval, 0)); lock.Unlock(); } while (!log->going_down()); diff --git a/src/rgw/rgw_common.cc b/src/rgw/rgw_common.cc index 21ee4a06bbfb3..c4e34a344d767 100644 --- a/src/rgw/rgw_common.cc +++ b/src/rgw/rgw_common.cc @@ -193,7 +193,7 @@ req_state::req_state(CephContext* _cct, RGWEnv* e, RGWUserInfo* u) system_request = false; - time = ceph_clock_now(cct); + time = ceph_clock_now(); perm_mask = 0; bucket_instance_shard_id = -1; content_length = 0; diff --git a/src/rgw/rgw_coroutine.cc b/src/rgw/rgw_coroutine.cc index 090aed2337757..eccae8b1a8cba 100644 --- a/src/rgw/rgw_coroutine.cc +++ b/src/rgw/rgw_coroutine.cc @@ -157,7 +157,7 @@ stringstream& RGWCoroutine::Status::set_status() if (history.size() > (size_t)max_history) { history.pop_front(); } - timestamp = ceph_clock_now(cct); + timestamp = ceph_clock_now(); return status; } diff --git a/src/rgw/rgw_cr_rados.h b/src/rgw/rgw_cr_rados.h index 02f79290b163b..fa8ac58241d5d 100644 --- a/src/rgw/rgw_cr_rados.h +++ b/src/rgw/rgw_cr_rados.h @@ -574,7 +574,7 @@ class RGWAsyncWait : public RGWAsyncRadosRequest { protected: int _send_request() { Mutex::Locker l(*lock); - return cond->WaitInterval(cct, *lock, interval); + return cond->WaitInterval(*lock, interval); } public: RGWAsyncWait(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, CephContext *_cct, diff --git a/src/rgw/rgw_file.cc b/src/rgw/rgw_file.cc index b04c716285189..e2a39dff59cbf 100644 --- a/src/rgw/rgw_file.cc +++ b/src/rgw/rgw_file.cc @@ -1080,7 +1080,7 @@ namespace rgw { done: dispose_processor(processor); perfcounter->tinc(l_rgw_put_lat, - (ceph_clock_now(s->cct) - s->time)); + (ceph_clock_now() - s->time)); return op_ret; } /* exec_finish */ diff --git a/src/rgw/rgw_gc.cc b/src/rgw/rgw_gc.cc index 05fcbe3e2ffc2..a5f5d395055b3 100644 --- a/src/rgw/rgw_gc.cc +++ b/src/rgw/rgw_gc.cc @@ -131,7 +131,7 @@ int RGWGC::list(int *index, string& marker, uint32_t max, bool expired_only, std int RGWGC::process(int index, int max_secs) { rados::cls::lock::Lock l(gc_index_lock_name); - utime_t end = ceph_clock_now(g_ceph_context); + utime_t end = ceph_clock_now(); std::list remove_tags; /* max_secs should be greater than zero. We don't want a zero max_secs @@ -175,7 +175,7 @@ int RGWGC::process(int index, int max_secs) std::list::iterator liter; cls_rgw_obj_chain& chain = info.chain; - utime_t now = ceph_clock_now(g_ceph_context); + utime_t now = ceph_clock_now(); if (now >= end) goto done; @@ -275,7 +275,7 @@ void RGWGC::stop_processor() void *RGWGC::GCWorker::entry() { do { - utime_t start = ceph_clock_now(cct); + utime_t start = ceph_clock_now(); dout(2) << "garbage collection: start" << dendl; int r = gc->process(); if (r < 0) { @@ -286,7 +286,7 @@ void *RGWGC::GCWorker::entry() { if (gc->going_down()) break; - utime_t end = ceph_clock_now(cct); + utime_t end = ceph_clock_now(); end -= start; int secs = cct->_conf->rgw_gc_processor_period; @@ -296,7 +296,7 @@ void *RGWGC::GCWorker::entry() { secs -= end.sec(); lock.Lock(); - cond.WaitInterval(cct, lock, utime_t(secs, 0)); + cond.WaitInterval(lock, utime_t(secs, 0)); lock.Unlock(); } while (!gc->going_down()); diff --git a/src/rgw/rgw_keystone.cc b/src/rgw/rgw_keystone.cc index c2c8fe39da9ce..4d2fc81113409 100644 --- a/src/rgw/rgw_keystone.cc +++ b/src/rgw/rgw_keystone.cc @@ -508,8 +508,8 @@ void * RGWKeystoneTokenCache::RevokeThread::entry() } lock.Lock(); - cond.WaitInterval(cct, lock, - utime_t(cct->_conf->rgw_keystone_revocation_interval, 0)); + cond.WaitInterval(lock, + utime_t(cct->_conf->rgw_keystone_revocation_interval, 0)); lock.Unlock(); } while (!cache->going_down()); diff --git a/src/rgw/rgw_keystone.h b/src/rgw/rgw_keystone.h index 042a84ffb6465..f81d32777ce85 100644 --- a/src/rgw/rgw_keystone.h +++ b/src/rgw/rgw_keystone.h @@ -123,7 +123,7 @@ public: const std::string& get_user_name() const {return user.name;}; bool has_role(const string& r) const; bool expired() { - uint64_t now = ceph_clock_now(NULL).sec(); + uint64_t now = ceph_clock_now().sec(); return (now >= (uint64_t)get_expires()); } int parse(CephContext *cct, diff --git a/src/rgw/rgw_lc.cc b/src/rgw/rgw_lc.cc index 5b07c5990af88..4821b1888ee39 100644 --- a/src/rgw/rgw_lc.cc +++ b/src/rgw/rgw_lc.cc @@ -45,7 +45,7 @@ void RGWLifecycleConfiguration::_add_rule(LCRule *rule) void *RGWLC::LCWorker::entry() { do { - utime_t start = ceph_clock_now(cct); + utime_t start = ceph_clock_now(); if (should_work(start)) { dout(5) << "life cycle: start" << dendl; int r = lc->process(); @@ -57,7 +57,7 @@ void *RGWLC::LCWorker::entry() { if (lc->going_down()) break; - utime_t end = ceph_clock_now(cct); + utime_t end = ceph_clock_now(); int secs = schedule_next_start_time(start, end); time_t next_time = end + secs; char buf[30]; @@ -65,7 +65,7 @@ void *RGWLC::LCWorker::entry() { dout(5) << "schedule life cycle next start time: " << nt <going_down()); @@ -103,7 +103,7 @@ bool RGWLC::if_already_run_today(time_t& start_date) { struct tm bdt; time_t begin_of_day; - utime_t now = ceph_clock_now(cct); + utime_t now = ceph_clock_now(); localtime_r(&start_date, &bdt); if (cct->_conf->rgw_lc_debug_interval > 0) { @@ -244,7 +244,7 @@ int RGWLC::bucket_lc_process(string& shard_id) vector::iterator obj_iter; int pos = 0; - utime_t now = ceph_clock_now(cct); + utime_t now = ceph_clock_now(); for (obj_iter = objs.begin(); obj_iter != objs.end(); obj_iter++) { bool prefix_match = false; int match_days = 0; @@ -311,7 +311,7 @@ int RGWLC::bucket_lc_process(string& shard_id) vector::iterator obj_iter; int days = prefix_iter->second; - utime_t now = ceph_clock_now(cct); + utime_t now = ceph_clock_now(); for (obj_iter = objs.begin(); obj_iter != objs.end(); obj_iter++) { if (obj_has_expired(now - ceph::real_clock::to_time_t((*obj_iter).mtime), days)) { @@ -421,7 +421,7 @@ int RGWLC::process(int index, int max_lock_secs) { rados::cls::lock::Lock l(lc_index_lock_name); do { - utime_t now = ceph_clock_now(cct); + utime_t now = ceph_clock_now(); pair entry;//string = bucket_name:bucket_id ,int = LC_BUCKET_STATUS if (max_lock_secs <= 0) return -EAGAIN; diff --git a/src/rgw/rgw_loadgen_process.cc b/src/rgw/rgw_loadgen_process.cc index 62a4ebd52bcb4..d74688309b6f4 100644 --- a/src/rgw/rgw_loadgen_process.cc +++ b/src/rgw/rgw_loadgen_process.cc @@ -118,7 +118,7 @@ void RGWLoadGenProcess::handle_request(RGWRequest* r) RGWLoadGenRequestEnv env; - utime_t tm = ceph_clock_now(NULL); + utime_t tm = ceph_clock_now(); env.port = 80; env.content_length = req->content_length; diff --git a/src/rgw/rgw_log.cc b/src/rgw/rgw_log.cc index 3b91b0685bf85..b8cbcb5ae1606 100644 --- a/src/rgw/rgw_log.cc +++ b/src/rgw/rgw_log.cc @@ -113,7 +113,7 @@ public: timer.init(); Mutex::Locker l(timer_lock); set_timer(); - utime_t ts = ceph_clock_now(cct); + utime_t ts = ceph_clock_now(); recalc_round_timestamp(ts); } @@ -223,7 +223,7 @@ static void log_usage(struct req_state *s, const string& op_name) entry.add(op_name, data); - utime_t ts = ceph_clock_now(s->cct); + utime_t ts = ceph_clock_now(); usage_logger->insert(ts, entry); } @@ -371,7 +371,7 @@ int rgw_log_op(RGWRados *store, RGWREST* const rest, struct req_state *s, uint64_t bytes_received = ACCOUNTING_IO(s)->get_bytes_received(); entry.time = s->time; - entry.total_time = ceph_clock_now(s->cct) - s->time; + entry.total_time = ceph_clock_now() - s->time; entry.bytes_sent = bytes_sent; entry.bytes_received = bytes_received; if (s->err.http_ret) { diff --git a/src/rgw/rgw_object_expirer_core.cc b/src/rgw/rgw_object_expirer_core.cc index 1480a29fd65b2..9bbd0095112ad 100644 --- a/src/rgw/rgw_object_expirer_core.cc +++ b/src/rgw/rgw_object_expirer_core.cc @@ -157,7 +157,7 @@ bool RGWObjectExpirer::process_single_shard(const string& shard, int num_entries = cct->_conf->rgw_objexp_chunk_size; int max_secs = cct->_conf->rgw_objexp_gc_interval; - utime_t end = ceph_clock_now(cct); + utime_t end = ceph_clock_now(); end += max_secs; rados::cls::lock::Lock l(objexp_lock_name); @@ -192,7 +192,7 @@ bool RGWObjectExpirer::process_single_shard(const string& shard, trim_chunk(shard, last_run, round_start, marker, out_marker); } - utime_t now = ceph_clock_now(g_ceph_context); + utime_t now = ceph_clock_now(); if (now >= end) { done = false; break; @@ -252,7 +252,7 @@ void RGWObjectExpirer::stop_processor() void *RGWObjectExpirer::OEWorker::entry() { utime_t last_run; do { - utime_t start = ceph_clock_now(cct); + utime_t start = ceph_clock_now(); ldout(cct, 2) << "object expiration: start" << dendl; if (oe->inspect_all_shards(last_run, start)) { /* All shards have been processed properly. Next time we can start @@ -265,7 +265,7 @@ void *RGWObjectExpirer::OEWorker::entry() { if (oe->going_down()) break; - utime_t end = ceph_clock_now(cct); + utime_t end = ceph_clock_now(); end -= start; int secs = cct->_conf->rgw_objexp_gc_interval; @@ -275,7 +275,7 @@ void *RGWObjectExpirer::OEWorker::entry() { secs -= end.sec(); lock.Lock(); - cond.WaitInterval(cct, lock, utime_t(secs, 0)); + cond.WaitInterval(lock, utime_t(secs, 0)); lock.Unlock(); } while (!oe->going_down()); diff --git a/src/rgw/rgw_op.cc b/src/rgw/rgw_op.cc index 5eaf0e9981d0e..fd10cfcb73979 100644 --- a/src/rgw/rgw_op.cc +++ b/src/rgw/rgw_op.cc @@ -890,7 +890,7 @@ static int iterate_user_manifest_parts(CephContext * const cct, bool is_truncated; vector objs; - utime_t start_time = ceph_clock_now(cct); + utime_t start_time = ceph_clock_now(); RGWRados::Bucket target(store, *pbucket_info); RGWRados::Bucket::List list_op(&target); @@ -927,7 +927,7 @@ static int iterate_user_manifest_parts(CephContext * const cct, } perfcounter->tinc(l_rgw_get_lat, - (ceph_clock_now(cct) - start_time)); + (ceph_clock_now() - start_time)); if (found_start && !handled_end) { len_count += end_ofs - start_ofs; @@ -941,7 +941,7 @@ static int iterate_user_manifest_parts(CephContext * const cct, } handled_end = found_end; - start_time = ceph_clock_now(cct); + start_time = ceph_clock_now(); } } while (is_truncated); @@ -987,7 +987,7 @@ static int iterate_slo_parts(CephContext *cct, return 0; } - utime_t start_time = ceph_clock_now(cct); + utime_t start_time = ceph_clock_now(); map::iterator iter = slo_parts.upper_bound(ofs); if (iter != slo_parts.begin()) { @@ -1020,7 +1020,7 @@ static int iterate_slo_parts(CephContext *cct, } perfcounter->tinc(l_rgw_get_lat, - (ceph_clock_now(cct) - start_time)); + (ceph_clock_now() - start_time)); if (found_start) { if (cb) { @@ -1030,7 +1030,7 @@ static int iterate_slo_parts(CephContext *cct, } } - start_time = ceph_clock_now(cct); + start_time = ceph_clock_now(); } return 0; @@ -1263,7 +1263,7 @@ int RGWGetObj::handle_slo_manifest(bufferlist& bl) int RGWGetObj::get_data_cb(bufferlist& bl, off_t bl_ofs, off_t bl_len) { /* garbage collection related handling */ - utime_t start_time = ceph_clock_now(s->cct); + utime_t start_time = ceph_clock_now(); if (start_time > gc_invalidate_time) { int r = store->defer_gc(s->obj_ctx, obj); if (r < 0) { @@ -1318,7 +1318,7 @@ static bool object_is_expired(map& attrs) { return false; } - if (delete_at <= ceph_clock_now(g_ceph_context)) { + if (delete_at <= ceph_clock_now()) { return true; } } @@ -1330,7 +1330,7 @@ void RGWGetObj::execute() { utime_t start_time = s->time; bufferlist bl; - gc_invalidate_time = ceph_clock_now(s->cct); + gc_invalidate_time = ceph_clock_now(); gc_invalidate_time += (s->cct->_conf->rgw_gc_obj_min_wait / 2); bool need_decompress; @@ -1467,7 +1467,7 @@ void RGWGetObj::execute() op_ret = filter->flush(); perfcounter->tinc(l_rgw_get_lat, - (ceph_clock_now(s->cct) - start_time)); + (ceph_clock_now() - start_time)); if (op_ret < 0) { goto done_err; } @@ -3188,7 +3188,7 @@ void RGWPutObj::execute() done: dispose_processor(processor); perfcounter->tinc(l_rgw_put_lat, - (ceph_clock_now(s->cct) - s->time)); + (ceph_clock_now() - s->time)); } int RGWPostObj::verify_permission() diff --git a/src/rgw/rgw_orphan.cc b/src/rgw/rgw_orphan.cc index 49a602d880644..4739950d36f57 100644 --- a/src/rgw/rgw_orphan.cc +++ b/src/rgw/rgw_orphan.cc @@ -206,7 +206,7 @@ int RGWOrphanSearch::init(const string& job_name, RGWOrphanSearchInfo *info) { search_info = *info; search_info.job_name = job_name; search_info.num_shards = (info->num_shards ? info->num_shards : DEFAULT_NUM_SHARDS); - search_info.start_time = ceph_clock_now(store->ctx()); + search_info.start_time = ceph_clock_now(); search_stage = RGWOrphanSearchStage(ORPHAN_SEARCH_STAGE_INIT); r = save_state(); diff --git a/src/rgw/rgw_policy_s3.cc b/src/rgw/rgw_policy_s3.cc index 8af70a832223a..99001f2f71d57 100644 --- a/src/rgw/rgw_policy_s3.cc +++ b/src/rgw/rgw_policy_s3.cc @@ -182,7 +182,7 @@ int RGWPolicy::add_condition(const string& op, const string& first, const string int RGWPolicy::check(RGWPolicyEnv *env, string& err_msg) { - uint64_t now = ceph_clock_now(NULL).sec(); + uint64_t now = ceph_clock_now().sec(); if (expires <= now) { dout(0) << "NOTICE: policy calculated as expired: " << expiration_str << dendl; err_msg = "Policy expired"; diff --git a/src/rgw/rgw_quota.cc b/src/rgw/rgw_quota.cc index 82041e48c1890..f1062121788f5 100644 --- a/src/rgw/rgw_quota.cc +++ b/src/rgw/rgw_quota.cc @@ -172,7 +172,7 @@ template void RGWQuotaCache::set_stats(const rgw_user& user, rgw_bucket& bucket, RGWQuotaCacheStats& qs, RGWStorageStats& stats) { qs.stats = stats; - qs.expiration = ceph_clock_now(store->ctx()); + qs.expiration = ceph_clock_now(); qs.async_refresh_time = qs.expiration; qs.expiration += store->ctx()->_conf->rgw_bucket_quota_ttl; qs.async_refresh_time += store->ctx()->_conf->rgw_bucket_quota_ttl / 2; @@ -183,7 +183,7 @@ void RGWQuotaCache::set_stats(const rgw_user& user, rgw_bucket& bucket, RGWQu template int RGWQuotaCache::get_stats(const rgw_user& user, rgw_bucket& bucket, RGWStorageStats& stats, RGWQuotaInfo& quota) { RGWQuotaCacheStats qs; - utime_t now = ceph_clock_now(store->ctx()); + utime_t now = ceph_clock_now(); if (map_find(user, bucket, qs)) { if (qs.async_refresh_time.sec() > 0 && now >= qs.async_refresh_time) { int r = async_refresh(user, bucket, qs); @@ -194,7 +194,8 @@ int RGWQuotaCache::get_stats(const rgw_user& user, rgw_bucket& bucket, RGWSto } } - if (can_use_cached_stats(quota, qs.stats) && qs.expiration > ceph_clock_now(store->ctx())) { + if (can_use_cached_stats(quota, qs.stats) && qs.expiration > + ceph_clock_now()) { stats = qs.stats; return 0; } @@ -426,7 +427,7 @@ class RGWUserStatsCache : public RGWQuotaCache { break; lock.Lock(); - cond.WaitInterval(cct, lock, utime_t(cct->_conf->rgw_user_quota_bucket_sync_interval, 0)); + cond.WaitInterval(lock, utime_t(cct->_conf->rgw_user_quota_bucket_sync_interval, 0)); lock.Unlock(); } while (!stats->going_down()); ldout(cct, 20) << "BucketsSyncThread: done" << dendl; @@ -466,7 +467,7 @@ class RGWUserStatsCache : public RGWQuotaCache { } lock.Lock(); - cond.WaitInterval(cct, lock, utime_t(cct->_conf->rgw_user_quota_sync_interval, 0)); + cond.WaitInterval(lock, utime_t(cct->_conf->rgw_user_quota_sync_interval, 0)); lock.Unlock(); } while (!stats->going_down()); ldout(cct, 20) << "UserSyncThread: done" << dendl; diff --git a/src/rgw/rgw_rados.cc b/src/rgw/rgw_rados.cc index 4f7af0f4785ea..db12138e05b7e 100644 --- a/src/rgw/rgw_rados.cc +++ b/src/rgw/rgw_rados.cc @@ -2855,7 +2855,7 @@ void *RGWRadosThread::Worker::entry() { utime_t interval = utime_t(msec / 1000, (msec % 1000) * 1000000); do { - utime_t start = ceph_clock_now(cct); + utime_t start = ceph_clock_now(); int r = processor->process(); if (r < 0) { dout(0) << "ERROR: processor->process() returned error r=" << r << dendl; @@ -2864,7 +2864,7 @@ void *RGWRadosThread::Worker::entry() { if (processor->going_down()) break; - utime_t end = ceph_clock_now(cct); + utime_t end = ceph_clock_now(); end -= start; uint64_t cur_msec = processor->interval_msec(); @@ -2881,7 +2881,7 @@ void *RGWRadosThread::Worker::entry() { wait_time -= end; lock.Lock(); - cond.WaitInterval(cct, lock, wait_time); + cond.WaitInterval(lock, wait_time); lock.Unlock(); } else { lock.Lock(); @@ -5360,7 +5360,7 @@ int RGWRados::create_bucket(RGWUserInfo& owner, rgw_bucket& bucket, info.bucket_index_shard_hash_type = RGWBucketInfo::MOD; info.requester_pays = false; if (real_clock::is_zero(creation_time)) { - info.creation_time = ceph::real_clock::now(cct); + info.creation_time = ceph::real_clock::now(); } else { info.creation_time = creation_time; } @@ -11809,7 +11809,7 @@ int RGWRados::process_lc() int RGWRados::process_expire_objects() { - obj_expirer->inspect_all_shards(utime_t(), ceph_clock_now(cct)); + obj_expirer->inspect_all_shards(utime_t(), ceph_clock_now()); return 0; } @@ -12645,7 +12645,7 @@ int RGWStateLog::store_entry(const string& client_id, const string& op_id, const if (check_state) { cls_statelog_check_state(op, client_id, op_id, object, *check_state); } - utime_t ts = ceph_clock_now(store->ctx()); + utime_t ts = ceph_clock_now(); bufferlist nobl; cls_statelog_add(op, client_id, op_id, object, ts, state, (bl ? *bl : nobl)); r = ioctx.operate(oid, &op); diff --git a/src/rgw/rgw_request.cc b/src/rgw/rgw_request.cc index ed5335f7ab7e1..e51b3d7e36be1 100644 --- a/src/rgw/rgw_request.cc +++ b/src/rgw/rgw_request.cc @@ -21,7 +21,7 @@ void RGWRequest::log_format(struct req_state *s, const char *fmt, ...) } /* RGWRequest::log_format */ void RGWRequest::log_init() { - ts = ceph_clock_now(g_ceph_context); + ts = ceph_clock_now(); } void RGWRequest::log(struct req_state *s, const char *msg) { @@ -30,7 +30,7 @@ void RGWRequest::log(struct req_state *s, const char *msg) { req_str.append(" "); req_str.append(s->info.request_uri); } - utime_t t = ceph_clock_now(g_ceph_context) - ts; + utime_t t = ceph_clock_now() - ts; dout(2) << "req " << id << ":" << t << ":" << s->dialect << ":" << req_str << ":" << (op ? op->name() : "") << ":" << msg << dendl; diff --git a/src/rgw/rgw_rest_client.cc b/src/rgw/rgw_rest_client.cc index 24ea21dcdb5a1..1fe03c6fd1533 100644 --- a/src/rgw/rgw_rest_client.cc +++ b/src/rgw/rgw_rest_client.cc @@ -95,9 +95,9 @@ int RGWRESTSimpleRequest::receive_header(void *ptr, size_t len) return 0; } -static void get_new_date_str(CephContext *cct, string& date_str) +static void get_new_date_str(string& date_str) { - utime_t tm = ceph_clock_now(cct); + utime_t tm = ceph_clock_now(); stringstream s; tm.asctime(s); date_str = s.str(); @@ -117,7 +117,7 @@ int RGWRESTSimpleRequest::execute(RGWAccessKey& key, const char *method, const c new_url.append(new_resource); string date_str; - get_new_date_str(cct, date_str); + get_new_date_str(date_str); headers.push_back(pair("HTTP_DATE", date_str)); string canonical_header; @@ -248,7 +248,7 @@ int RGWRESTSimpleRequest::forward_request(RGWAccessKey& key, req_info& info, siz { string date_str; - get_new_date_str(cct, date_str); + get_new_date_str(date_str); RGWEnv new_env; req_info new_info(cct, &new_env); @@ -433,7 +433,7 @@ int RGWRESTStreamWriteRequest::put_obj_init(RGWAccessKey& key, rgw_obj& obj, uin new_url.append("/"); string date_str; - get_new_date_str(cct, date_str); + get_new_date_str(date_str); RGWEnv new_env; req_info new_info(cct, &new_env); @@ -631,7 +631,7 @@ int RGWRESTStreamRWRequest::get_resource(RGWAccessKey& key, map& new_url.append("/"); string date_str; - get_new_date_str(cct, date_str); + get_new_date_str(date_str); RGWEnv new_env; req_info new_info(cct, &new_env); diff --git a/src/rgw/rgw_rest_s3.cc b/src/rgw/rgw_rest_s3.cc index a6417c51c9f3a..63b100e19a69d 100644 --- a/src/rgw/rgw_rest_s3.cc +++ b/src/rgw/rgw_rest_s3.cc @@ -3526,7 +3526,7 @@ int RGW_Auth_S3::authorize_v4(RGWRados *store, struct req_state *s, bool force_b bool using_qs; uint64_t now_req = 0; - uint64_t now = ceph_clock_now(s->cct); + uint64_t now = ceph_clock_now(); /* v4 requires rados auth */ if (!store->ctx()->_conf->rgw_s3_auth_use_rados) { diff --git a/src/rgw/rgw_rest_swift.cc b/src/rgw/rgw_rest_swift.cc index 463dcd6bebe39..2ce570306bb95 100644 --- a/src/rgw/rgw_rest_swift.cc +++ b/src/rgw/rgw_rest_swift.cc @@ -80,7 +80,7 @@ static void dump_account_metadata(struct req_state * const s, const RGWAccessControlPolicy_SWIFTAcct &policy) { /* Adding X-Timestamp to keep align with Swift API */ - dump_header(s, "X-Timestamp", ceph_clock_now(g_ceph_context)); + dump_header(s, "X-Timestamp", ceph_clock_now()); dump_header(s, "X-Account-Container-Count", buckets_count); dump_header(s, "X-Account-Object-Count", buckets_object_count); @@ -1545,7 +1545,7 @@ void RGWInfo_ObjStore_SWIFT::list_slo_data(Formatter& formatter, bool RGWInfo_ObjStore_SWIFT::is_expired(const std::string& expires, CephContext* cct) { string err; - const utime_t now = ceph_clock_now(cct); + const utime_t now = ceph_clock_now(); const uint64_t expiration = (uint64_t)strict_strtoll(expires.c_str(), 10, &err); if (!err.empty()) { diff --git a/src/rgw/rgw_swift_auth.cc b/src/rgw/rgw_swift_auth.cc index 80c2f483f7733..5b8cec9c0264c 100644 --- a/src/rgw/rgw_swift_auth.cc +++ b/src/rgw/rgw_swift_auth.cc @@ -116,7 +116,7 @@ void RGWTempURLAuthEngine::get_owner_info(RGWUserInfo& owner_info) const bool RGWTempURLAuthEngine::is_expired(const std::string& expires) const { string err; - const utime_t now = ceph_clock_now(g_ceph_context); + const utime_t now = ceph_clock_now(); const uint64_t expiration = (uint64_t)strict_strtoll(expires.c_str(), 10, &err); if (!err.empty()) { @@ -373,7 +373,7 @@ static int encode_token(CephContext *cct, string& swift_user, string& key, if (ret < 0) return ret; - utime_t expiration = ceph_clock_now(cct); + utime_t expiration = ceph_clock_now(); expiration += cct->_conf->rgw_swift_token_expiration; return build_token(swift_user, key, nonce, expiration, bl); @@ -426,7 +426,7 @@ RGWAuthApplier::aplptr_t RGWSignedTokenAuthEngine::authenticate() const throw -EINVAL; } - const utime_t now = ceph_clock_now(cct); + const utime_t now = ceph_clock_now(); if (expiration < now) { ldout(cct, 0) << "NOTICE: old timed out token was used now=" << now << " token.expiration=" << expiration diff --git a/src/test/TestTimers.cc b/src/test/TestTimers.cc index 19f67f7b51575..69be6921ceff1 100644 --- a/src/test/TestTimers.cc +++ b/src/test/TestTimers.cc @@ -94,7 +94,7 @@ static int basic_timer_test(T &timer, Mutex *lock) if (lock) lock->Lock(); utime_t inc(2 * i, 0); - utime_t t = ceph_clock_now(g_ceph_context) + inc; + utime_t t = ceph_clock_now() + inc; timer.add_event_at(t, test_contexts[i]); if (lock) lock->Unlock(); @@ -133,7 +133,7 @@ static int test_out_of_order_insertion(SafeTimer &timer, Mutex *lock) { utime_t inc(100, 0); - utime_t t = ceph_clock_now(g_ceph_context) + inc; + utime_t t = ceph_clock_now() + inc; lock->Lock(); timer.add_event_at(t, test_contexts[0]); lock->Unlock(); @@ -141,7 +141,7 @@ static int test_out_of_order_insertion(SafeTimer &timer, Mutex *lock) { utime_t inc(2, 0); - utime_t t = ceph_clock_now(g_ceph_context) + inc; + utime_t t = ceph_clock_now() + inc; lock->Lock(); timer.add_event_at(t, test_contexts[1]); lock->Unlock(); @@ -182,7 +182,7 @@ static int safe_timer_cancel_all_test(SafeTimer &safe_timer, Mutex& safe_timer_l safe_timer_lock.Lock(); for (int i = 0; i < MAX_TEST_CONTEXTS; ++i) { utime_t inc(4 * i, 0); - utime_t t = ceph_clock_now(g_ceph_context) + inc; + utime_t t = ceph_clock_now() + inc; safe_timer.add_event_at(t, test_contexts[i]); } safe_timer_lock.Unlock(); @@ -220,7 +220,7 @@ static int safe_timer_cancellation_test(SafeTimer &safe_timer, Mutex& safe_timer safe_timer_lock.Lock(); for (int i = 0; i < MAX_TEST_CONTEXTS; ++i) { utime_t inc(4 * i, 0); - utime_t t = ceph_clock_now(g_ceph_context) + inc; + utime_t t = ceph_clock_now() + inc; safe_timer.add_event_at(t, test_contexts[i]); } safe_timer_lock.Unlock(); diff --git a/src/test/bench_log.cc b/src/test/bench_log.cc index 9f5892f1e23e8..0caa06c82a6bc 100644 --- a/src/test/bench_log.cc +++ b/src/test/bench_log.cc @@ -42,7 +42,7 @@ int main(int argc, const char **argv) auto cct = global_init(NULL, args, CEPH_ENTITY_TYPE_OSD, CODE_ENVIRONMENT_UTILITY, 0); - utime_t start = ceph_clock_now(NULL); + utime_t start = ceph_clock_now(); list ls; for (int i=0; i_log->flush(); - utime_t end = ceph_clock_now(NULL); + utime_t end = ceph_clock_now(); utime_t dur = end - start; cout << dur << std::endl; diff --git a/src/test/bufferlist.cc b/src/test/bufferlist.cc index 6042c515ff78f..253de0106dad5 100644 --- a/src/test/bufferlist.cc +++ b/src/test/bufferlist.cc @@ -219,12 +219,12 @@ TEST(Buffer, constructors) { void bench_buffer_alloc(int size, int num) { - utime_t start = ceph_clock_now(NULL); + utime_t start = ceph_clock_now(); for (int i=0; i start + dur); + ASSERT_TRUE(ceph_clock_now() > start + dur); } else { ASSERT_EQ(-EEXIST, r); } diff --git a/src/test/cls_log/test_cls_log.cc b/src/test/cls_log/test_cls_log.cc index 6b8a31d7262cd..ca5ebafaf9bc2 100644 --- a/src/test/cls_log/test_cls_log.cc +++ b/src/test/cls_log/test_cls_log.cc @@ -122,7 +122,7 @@ TEST(cls_rgw, test_log_add_same_time) ASSERT_EQ(0, ioctx.create(oid, true)); /* generate log */ - utime_t start_time = ceph_clock_now(g_ceph_context); + utime_t start_time = ceph_clock_now(); generate_log(ioctx, oid, 10, start_time, false); librados::ObjectReadOperation *rop = new_rop(); @@ -209,7 +209,7 @@ TEST(cls_rgw, test_log_add_different_time) ASSERT_EQ(0, ioctx.create(oid, true)); /* generate log */ - utime_t start_time = ceph_clock_now(g_ceph_context); + utime_t start_time = ceph_clock_now(); generate_log(ioctx, oid, 10, start_time, true); librados::ObjectReadOperation *rop = new_rop(); @@ -305,7 +305,7 @@ TEST(cls_rgw, test_log_trim) ASSERT_EQ(0, ioctx.create(oid, true)); /* generate log */ - utime_t start_time = ceph_clock_now(g_ceph_context); + utime_t start_time = ceph_clock_now(); generate_log(ioctx, oid, 10, start_time, true); librados::ObjectReadOperation *rop = new_rop(); diff --git a/src/test/cls_statelog/test_cls_statelog.cc b/src/test/cls_statelog/test_cls_statelog.cc index accab956c1909..9cc6175ad785c 100644 --- a/src/test/cls_statelog/test_cls_statelog.cc +++ b/src/test/cls_statelog/test_cls_statelog.cc @@ -38,7 +38,7 @@ void add_log(librados::ObjectWriteOperation *op, const string& client_id, const bufferlist bl; ::encode(state, bl); - utime_t ts = ceph_clock_now(g_ceph_context); + utime_t ts = ceph_clock_now(); cls_statelog_add(*op, client_id, op_id, obj, ts, state, bl); } diff --git a/src/test/common/test_crc32c.cc b/src/test/common/test_crc32c.cc index 54200b3a0dbaf..93bdd4385e68a 100644 --- a/src/test/common/test_crc32c.cc +++ b/src/test/common/test_crc32c.cc @@ -50,42 +50,42 @@ TEST(Crc32c, Performance) { std::cout << "calculating crc" << std::endl; { - utime_t start = ceph_clock_now(NULL); + utime_t start = ceph_clock_now(); unsigned val = ceph_crc32c(0, (unsigned char *)a, len); - utime_t end = ceph_clock_now(NULL); + utime_t end = ceph_clock_now(); float rate = (float)len / (float)(1024*1024) / (float)(end - start); std::cout << "best choice = " << rate << " MB/sec" << std::endl; ASSERT_EQ(261108528u, val); } { - utime_t start = ceph_clock_now(NULL); + utime_t start = ceph_clock_now(); unsigned val = ceph_crc32c(0xffffffff, (unsigned char *)a, len); - utime_t end = ceph_clock_now(NULL); + utime_t end = ceph_clock_now(); float rate = (float)len / (float)(1024*1024) / (float)(end - start); std::cout << "best choice 0xffffffff = " << rate << " MB/sec" << std::endl; ASSERT_EQ(3895876243u, val); } { - utime_t start = ceph_clock_now(NULL); + utime_t start = ceph_clock_now(); unsigned val = ceph_crc32c_sctp(0, (unsigned char *)a, len); - utime_t end = ceph_clock_now(NULL); + utime_t end = ceph_clock_now(); float rate = (float)len / (float)(1024*1024) / (float)(end - start); std::cout << "sctp = " << rate << " MB/sec" << std::endl; ASSERT_EQ(261108528u, val); } { - utime_t start = ceph_clock_now(NULL); + utime_t start = ceph_clock_now(); unsigned val = ceph_crc32c_intel_baseline(0, (unsigned char *)a, len); - utime_t end = ceph_clock_now(NULL); + utime_t end = ceph_clock_now(); float rate = (float)len / (float)(1024*1024) / (float)(end - start); std::cout << "intel baseline = " << rate << " MB/sec" << std::endl; ASSERT_EQ(261108528u, val); } if (ceph_arch_aarch64_crc32) // Skip if CRC32C instructions are not defined. { - utime_t start = ceph_clock_now(NULL); + utime_t start = ceph_clock_now(); unsigned val = ceph_crc32c_aarch64(0, (unsigned char *)a, len); - utime_t end = ceph_clock_now(NULL); + utime_t end = ceph_clock_now(); float rate = (float)len / (float)(1024*1024) / (float)(end - start); std::cout << "aarch64 = " << rate << " MB/sec" << std::endl; ASSERT_EQ(261108528u, val); diff --git a/src/test/crypto.cc b/src/test/crypto.cc index 03530c18168aa..9e33d233d0e74 100644 --- a/src/test/crypto.cc +++ b/src/test/crypto.cc @@ -158,14 +158,14 @@ TEST(AES, Loop) { TEST(AES, LoopKey) { bufferptr k(16); get_random_bytes(k.c_str(), k.length()); - CryptoKey key(CEPH_CRYPTO_AES, ceph_clock_now(NULL), k); + CryptoKey key(CEPH_CRYPTO_AES, ceph_clock_now(), k); bufferlist data; bufferptr r(128); get_random_bytes(r.c_str(), r.length()); data.append(r); - utime_t start = ceph_clock_now(NULL); + utime_t start = ceph_clock_now(); int n = 100000; for (int i=0; i encoded; code = erasure_code->encode(want_to_encode, in, &encoded); if (code) return code; } - utime_t end_time = ceph_clock_now(g_ceph_context); + utime_t end_time = ceph_clock_now(); cout << (end_time - begin_time) << "\t" << (max_iterations * (in_size / 1024)) << endl; return 0; } @@ -295,7 +295,7 @@ int ErasureCodeBench::decode() display_chunks(encoded, erasure_code->get_chunk_count()); } - utime_t begin_time = ceph_clock_now(g_ceph_context); + utime_t begin_time = ceph_clock_now(); for (int i = 0; i < max_iterations; i++) { if (exhaustive_erasures) { code = decode_erasures(encoded, encoded, 0, erasures, erasure_code); @@ -321,7 +321,7 @@ int ErasureCodeBench::decode() return code; } } - utime_t end_time = ceph_clock_now(g_ceph_context); + utime_t end_time = ceph_clock_now(); cout << (end_time - begin_time) << "\t" << (max_iterations * (in_size / 1024)) << endl; return 0; } diff --git a/src/test/journal/RadosTestFixture.cc b/src/test/journal/RadosTestFixture.cc index a066559c92330..0a71a85904389 100644 --- a/src/test/journal/RadosTestFixture.cc +++ b/src/test/journal/RadosTestFixture.cc @@ -119,8 +119,7 @@ bool RadosTestFixture::wait_for_update(journal::JournalMetadataPtr metadata) { Mutex::Locker locker(m_listener.mutex); while (m_listener.updates[metadata.get()] == 0) { if (m_listener.cond.WaitInterval( - reinterpret_cast(m_ioctx.cct()), - m_listener.mutex, utime_t(10, 0)) != 0) { + m_listener.mutex, utime_t(10, 0)) != 0) { return false; } } diff --git a/src/test/journal/test_JournalPlayer.cc b/src/test/journal/test_JournalPlayer.cc index 03255d06b7662..6888ee06bb52c 100644 --- a/src/test/journal/test_JournalPlayer.cc +++ b/src/test/journal/test_JournalPlayer.cc @@ -101,7 +101,6 @@ public: if (m_replay_hander.entries_available) { m_replay_hander.entries_available = false; } else if (m_replay_hander.cond.WaitInterval( - reinterpret_cast(m_ioctx.cct()), m_replay_hander.lock, utime_t(10, 0)) != 0) { break; } @@ -117,7 +116,6 @@ public: player->try_pop_front(&entry, &commit_tid); if (m_replay_hander.cond.WaitInterval( - reinterpret_cast(m_ioctx.cct()), m_replay_hander.lock, utime_t(10, 0)) != 0) { return false; } diff --git a/src/test/journal/test_ObjectRecorder.cc b/src/test/journal/test_ObjectRecorder.cc index e2113365af9c7..1e0a366d7b410 100644 --- a/src/test/journal/test_ObjectRecorder.cc +++ b/src/test/journal/test_ObjectRecorder.cc @@ -372,7 +372,6 @@ TEST_F(TestObjectRecorder, Close) { Mutex::Locker locker(m_handler.lock); while (!m_handler.is_closed) { if (m_handler.cond.WaitInterval( - reinterpret_cast(m_ioctx.cct()), m_handler.lock, utime_t(10, 0)) != 0) { break; } @@ -423,7 +422,6 @@ TEST_F(TestObjectRecorder, Overflow) { Mutex::Locker locker(m_handler.lock); while (m_handler.overflows == 0) { if (m_handler.cond.WaitInterval( - reinterpret_cast(m_ioctx.cct()), m_handler.lock, utime_t(10, 0)) != 0) { break; } diff --git a/src/test/kv_store_bench.cc b/src/test/kv_store_bench.cc index 6bf7d3883e995..3cdc7cfe01a79 100644 --- a/src/test/kv_store_bench.cc +++ b/src/test/kv_store_bench.cc @@ -326,7 +326,7 @@ void KvStoreBench::aio_callback_timed(int * err, void *arg) { //throughput args->kvsb->data.throughput_jf.open_object_section("throughput"); args->kvsb->data.throughput_jf.dump_unsigned(string(1, args->op).c_str(), - ceph_clock_now(g_ceph_context)); + ceph_clock_now()); args->kvsb->data.throughput_jf.close_section(); data_lock->Unlock(); diff --git a/src/test/kv_store_bench.h b/src/test/kv_store_bench.h index d12c5e850c0b1..da23c402bc161 100644 --- a/src/test/kv_store_bench.h +++ b/src/test/kv_store_bench.h @@ -50,10 +50,10 @@ struct StopWatch { utime_t end_time; void start_time() { - begin_time = ceph_clock_now(g_ceph_context); + begin_time = ceph_clock_now(); } void stop_time() { - end_time = ceph_clock_now(g_ceph_context); + end_time = ceph_clock_now(); } double get_time() { return (end_time - begin_time) * 1000; diff --git a/src/test/librados/tier.cc b/src/test/librados/tier.cc index 6a1d21ab64011..0a1dbf22c4925 100755 --- a/src/test/librados/tier.cc +++ b/src/test/librados/tier.cc @@ -2162,11 +2162,11 @@ TEST_F(LibRadosTwoPoolsPP, HitSetRead) { cache_ioctx.set_namespace(""); // keep reading until we see our object appear in the HitSet - utime_t start = ceph_clock_now(NULL); + utime_t start = ceph_clock_now(); utime_t hard_stop = start + utime_t(600, 0); while (true) { - utime_t now = ceph_clock_now(NULL); + utime_t now = ceph_clock_now(); ASSERT_TRUE(now < hard_stop); string name = "foo"; @@ -2345,7 +2345,7 @@ TEST_F(LibRadosTwoPoolsPP, HitSetTrim) { cache_ioctx.set_namespace(""); // do a bunch of writes and make sure the hitsets rotate - utime_t start = ceph_clock_now(NULL); + utime_t start = ceph_clock_now(); utime_t hard_stop = start + utime_t(count * period * 50, 0); time_t first = 0; @@ -2378,7 +2378,7 @@ TEST_F(LibRadosTwoPoolsPP, HitSetTrim) { } } - utime_t now = ceph_clock_now(NULL); + utime_t now = ceph_clock_now(); ASSERT_TRUE(now < hard_stop); sleep(1); @@ -4828,11 +4828,11 @@ TEST_F(LibRadosTwoPoolsECPP, HitSetRead) { cache_ioctx.set_namespace(""); // keep reading until we see our object appear in the HitSet - utime_t start = ceph_clock_now(NULL); + utime_t start = ceph_clock_now(); utime_t hard_stop = start + utime_t(600, 0); while (true) { - utime_t now = ceph_clock_now(NULL); + utime_t now = ceph_clock_now(); ASSERT_TRUE(now < hard_stop); string name = "foo"; @@ -4971,7 +4971,7 @@ TEST_F(LibRadosTwoPoolsECPP, HitSetTrim) { cache_ioctx.set_namespace(""); // do a bunch of writes and make sure the hitsets rotate - utime_t start = ceph_clock_now(NULL); + utime_t start = ceph_clock_now(); utime_t hard_stop = start + utime_t(count * period * 50, 0); time_t first = 0; @@ -5008,7 +5008,7 @@ TEST_F(LibRadosTwoPoolsECPP, HitSetTrim) { } } - utime_t now = ceph_clock_now(NULL); + utime_t now = ceph_clock_now(); ASSERT_TRUE(now < hard_stop); sleep(1); diff --git a/src/test/librados_test_stub/TestMemIoCtxImpl.cc b/src/test/librados_test_stub/TestMemIoCtxImpl.cc index ae04c86971e96..239b0779dabc4 100644 --- a/src/test/librados_test_stub/TestMemIoCtxImpl.cc +++ b/src/test/librados_test_stub/TestMemIoCtxImpl.cc @@ -614,7 +614,7 @@ TestMemRadosClient::SharedFile TestMemIoCtxImpl::get_file( if (new_version) { file->snap_id = snapc.seq; - file->mtime = ceph_clock_now(m_client->cct()).sec(); + file->mtime = ceph_clock_now().sec(); m_pool->files[oid].push_back(file); } return file; diff --git a/src/test/librbd/journal/test_Entries.cc b/src/test/librbd/journal/test_Entries.cc index cea3f93dfadc4..3b9aa98140ea6 100644 --- a/src/test/librbd/journal/test_Entries.cc +++ b/src/test/librbd/journal/test_Entries.cc @@ -93,9 +93,9 @@ public: bool wait_for_entries_available(librbd::ImageCtx *ictx) { Mutex::Locker locker(m_replay_handler.lock); while (!m_replay_handler.entries_available) { - if (m_replay_handler.cond.WaitInterval(ictx->cct, m_replay_handler.lock, - utime_t(10, 0)) != 0) { - return false; + if (m_replay_handler.cond.WaitInterval(m_replay_handler.lock, + utime_t(10, 0)) != 0) { + return false; } } m_replay_handler.entries_available = false; diff --git a/src/test/librbd/test_ImageWatcher.cc b/src/test/librbd/test_ImageWatcher.cc index 52f52f2bd7c8d..c2f7d8f12cf91 100644 --- a/src/test/librbd/test_ImageWatcher.cc +++ b/src/test/librbd/test_ImageWatcher.cc @@ -135,8 +135,8 @@ public: bool wait_for_notifies(librbd::ImageCtx &ictx) { Mutex::Locker l(m_callback_lock); while (m_notifies.size() < m_notify_acks.size()) { - int r = m_callback_cond.WaitInterval(ictx.cct, m_callback_lock, - utime_t(10, 0)); + int r = m_callback_cond.WaitInterval(m_callback_lock, + utime_t(10, 0)); if (r != 0) { break; } @@ -238,7 +238,7 @@ struct ProgressContext : public librbd::ProgressContext { bool wait(librbd::ImageCtx *ictx, uint64_t offset_, uint64_t total_) { Mutex::Locker l(mutex); while (!received) { - int r = cond.WaitInterval(ictx->cct, mutex, utime_t(10, 0)); + int r = cond.WaitInterval(mutex, utime_t(10, 0)); if (r != 0) { break; } diff --git a/src/test/librbd/test_librbd.cc b/src/test/librbd/test_librbd.cc index 155b615a1ee11..0b2e0b2128117 100644 --- a/src/test/librbd/test_librbd.cc +++ b/src/test/librbd/test_librbd.cc @@ -633,8 +633,7 @@ TEST_F(TestLibRBD, UpdateWatchAndResize) void wait_for_size(size_t size) { Mutex::Locker locker(m_lock); while (m_size != size) { - CephContext* cct = reinterpret_cast(_rados.cct()); - ASSERT_EQ(0, m_cond.WaitInterval(cct, m_lock, seconds(5))); + ASSERT_EQ(0, m_cond.WaitInterval(m_lock, seconds(5))); } } rbd_image_t &m_image; @@ -685,8 +684,7 @@ TEST_F(TestLibRBD, UpdateWatchAndResizePP) void wait_for_size(size_t size) { Mutex::Locker locker(m_lock); while (m_size != size) { - CephContext* cct = reinterpret_cast(_rados.cct()); - ASSERT_EQ(0, m_cond.WaitInterval(cct, m_lock, seconds(5))); + ASSERT_EQ(0, m_cond.WaitInterval(m_lock, seconds(5))); } } librbd::Image &m_image; diff --git a/src/test/librbd/test_mock_ObjectWatcher.cc b/src/test/librbd/test_mock_ObjectWatcher.cc index f62661468e8ff..81b4c545f2443 100644 --- a/src/test/librbd/test_mock_ObjectWatcher.cc +++ b/src/test/librbd/test_mock_ObjectWatcher.cc @@ -153,8 +153,7 @@ public: bool wait_for_watch(MockImageCtx &mock_image_ctx, size_t count) { Mutex::Locker locker(m_lock); while (m_watch_count < count) { - if (m_cond.WaitInterval(mock_image_ctx.cct, m_lock, - utime_t(10, 0)) != 0) { + if (m_cond.WaitInterval(m_lock, utime_t(10, 0)) != 0) { return false; } } diff --git a/src/test/mon/test-mon-msg.cc b/src/test/mon/test-mon-msg.cc index 33ec9d0d1f6c7..9a5558dc2f1b9 100644 --- a/src/test/mon/test-mon-msg.cc +++ b/src/test/mon/test-mon-msg.cc @@ -262,9 +262,9 @@ public: if (timeout > 0) { utime_t cond_timeout; cond_timeout.set_from_double(timeout); - utime_t s = ceph_clock_now(g_ceph_context); - err = cond.WaitInterval(g_ceph_context, lock, cond_timeout); - utime_t e = ceph_clock_now(g_ceph_context); + utime_t s = ceph_clock_now(); + err = cond.WaitInterval(lock, cond_timeout); + utime_t e = ceph_clock_now(); dout(20) << __func__ << " took " << (e-s) << " seconds" << dendl; } else { err = cond.Wait(lock); diff --git a/src/test/mon/test_mon_workloadgen.cc b/src/test/mon/test_mon_workloadgen.cc index c1b26efa5573b..0da95b368f10a 100644 --- a/src/test/mon/test_mon_workloadgen.cc +++ b/src/test/mon/test_mon_workloadgen.cc @@ -449,7 +449,7 @@ class OSDStub : public TestStub void boot() { dout(1) << __func__ << " boot?" << dendl; - utime_t now = ceph_clock_now(messenger->cct); + utime_t now = ceph_clock_now(); if ((last_boot_attempt > 0.0) && ((now - last_boot_attempt)) <= STUB_BOOT_INTERVAL) { dout(1) << __func__ << " backoff and try again later." << dendl; @@ -465,7 +465,7 @@ class OSDStub : public TestStub void add_pg(pg_t pgid, epoch_t epoch, pg_t parent) { - utime_t now = ceph_clock_now(messenger->cct); + utime_t now = ceph_clock_now(); pg_stat_t s; s.created = epoch; @@ -543,7 +543,7 @@ class OSDStub : public TestStub void send_pg_stats() { dout(10) << __func__ << " pgs " << pgs.size() << " osdmap " << osdmap << dendl; - utime_t now = ceph_clock_now(messenger->cct); + utime_t now = ceph_clock_now(); MPGStats *mstats = new MPGStats(monc.get_fsid(), osdmap.get_epoch(), now); mstats->set_tid(1); @@ -577,7 +577,7 @@ class OSDStub : public TestStub assert(pgs.count(pgid) > 0); pg_stat_t &s = pgs[pgid]; - utime_t now = ceph_clock_now(messenger->cct); + utime_t now = ceph_clock_now(); if (now - s.last_change < 10.0) { dout(10) << __func__ @@ -702,7 +702,7 @@ class OSDStub : public TestStub dout(10) << __func__ << " send " << num_entries << " log messages" << dendl; - utime_t now = ceph_clock_now(messenger->cct); + utime_t now = ceph_clock_now(); int seq = 0; for (; num_entries > 0; --num_entries) { LogEntry e; diff --git a/src/test/msgr/test_async_networkstack.cc b/src/test/msgr/test_async_networkstack.cc index d1f8f0a0960f5..af39f79c47c7f 100644 --- a/src/test/msgr/test_async_networkstack.cc +++ b/src/test/msgr/test_async_networkstack.cc @@ -124,12 +124,12 @@ class C_poll : public EventCallback { woken = true; } bool poll(int milliseconds) { - auto start = ceph::coarse_real_clock::now(g_ceph_context); + auto start = ceph::coarse_real_clock::now(); while (!woken) { center->process_events(sleepus); usleep(sleepus); auto r = std::chrono::duration_cast( - ceph::coarse_real_clock::now(g_ceph_context) - start); + ceph::coarse_real_clock::now() - start); if (r >= std::chrono::milliseconds(milliseconds)) break; } diff --git a/src/test/msgr/test_msgr.cc b/src/test/msgr/test_msgr.cc index ac57c303d60ea..9aa5332b096bc 100644 --- a/src/test/msgr/test_msgr.cc +++ b/src/test/msgr/test_msgr.cc @@ -738,7 +738,7 @@ TEST_P(MessengerTest, MessageTest) { t += 1000*1000*500; Mutex::Locker l(cli_dispatcher.lock); while (!cli_dispatcher.got_new) - cli_dispatcher.cond.WaitInterval(g_ceph_context, cli_dispatcher.lock, t); + cli_dispatcher.cond.WaitInterval(cli_dispatcher.lock, t); ASSERT_TRUE(cli_dispatcher.got_new); cli_dispatcher.got_new = false; } @@ -756,7 +756,7 @@ TEST_P(MessengerTest, MessageTest) { t += 1000*1000*500; Mutex::Locker l(cli_dispatcher.lock); while (!cli_dispatcher.got_new) - cli_dispatcher.cond.WaitInterval(g_ceph_context, cli_dispatcher.lock, t); + cli_dispatcher.cond.WaitInterval(cli_dispatcher.lock, t); ASSERT_TRUE(cli_dispatcher.got_new); cli_dispatcher.got_new = false; } diff --git a/src/test/objectstore/test_kv.cc b/src/test/objectstore/test_kv.cc index d55841c9434ed..b233a32b2f683 100644 --- a/src/test/objectstore/test_kv.cc +++ b/src/test/objectstore/test_kv.cc @@ -159,7 +159,7 @@ TEST_P(KVTest, PutReopen) { TEST_P(KVTest, BenchCommit) { int n = 1024; ASSERT_EQ(0, db->create_and_open(cout)); - utime_t start = ceph_clock_now(NULL); + utime_t start = ceph_clock_now(); { cout << "priming" << std::endl; // prime @@ -183,7 +183,7 @@ TEST_P(KVTest, BenchCommit) { t->set("prefix", "key" + stringify(i), data); db->submit_transaction_sync(t); } - utime_t end = ceph_clock_now(NULL); + utime_t end = ceph_clock_now(); utime_t dur = end - start; cout << n << " commits in " << dur << ", avg latency " << (dur / (double)n) << std::endl; diff --git a/src/test/objectstore/test_transaction.cc b/src/test/objectstore/test_transaction.cc index 3f73f3564e3be..6d7c9fe0f984b 100644 --- a/src/test/objectstore/test_transaction.cc +++ b/src/test/objectstore/test_transaction.cc @@ -186,7 +186,7 @@ void bench_num_bytes(bool legacy) cout << "get_encoded_bytes: "; } - utime_t start = ceph_clock_now(NULL); + utime_t start = ceph_clock_now(); if (legacy) { for (int i = 0; i < max; ++i) { a.get_encoded_bytes_test(); @@ -197,7 +197,7 @@ void bench_num_bytes(bool legacy) } } - utime_t end = ceph_clock_now(NULL); + utime_t end = ceph_clock_now(); cout << max << " encodes in " << (end - start) << std::endl; } diff --git a/src/test/objectstore/workload_generator.cc b/src/test/objectstore/workload_generator.cc index 2c55fb27bc0f6..5b0d1c1e84fdd 100644 --- a/src/test/objectstore/workload_generator.cc +++ b/src/test/objectstore/workload_generator.cc @@ -387,7 +387,7 @@ TestObjectStoreState::coll_entry_t void WorkloadGenerator::do_stats() { - utime_t now = ceph_clock_now(NULL); + utime_t now = ceph_clock_now(); m_stats_lock.Lock(); utime_t duration = (now - m_stats_begin); @@ -412,7 +412,7 @@ void WorkloadGenerator::run() int ops_run = 0; utime_t stats_interval(m_stats_show_secs, 0); - utime_t now = ceph_clock_now(NULL); + utime_t now = ceph_clock_now(); utime_t stats_time = now; m_stats_begin = now; @@ -441,7 +441,7 @@ void WorkloadGenerator::run() if (m_do_stats) { - utime_t now = ceph_clock_now(NULL); + utime_t now = ceph_clock_now(); utime_t elapsed = now - stats_time; if (elapsed >= stats_interval) { do_stats(); diff --git a/src/test/old/test_disk_bw.cc b/src/test/old/test_disk_bw.cc index 63b2a5f7b1d27..d509a510dd670 100644 --- a/src/test/old/test_disk_bw.cc +++ b/src/test/old/test_disk_bw.cc @@ -39,7 +39,7 @@ int main(int argc, char **argv) } - utime_t start = ceph_clock_now(g_ceph_context); + utime_t start = ceph_clock_now(); while (loop++ < count) { int ret = safe_write(fd, buf, bsize); if (ret) @@ -49,7 +49,7 @@ int main(int argc, char **argv) } ::fsync(fd); ::close(fd); - utime_t end = ceph_clock_now(g_ceph_context); + utime_t end = ceph_clock_now(); end -= start; diff --git a/src/test/old/test_seek_read.c b/src/test/old/test_seek_read.c index 1ea3b750b455f..c63ed34d77eba 100644 --- a/src/test/old/test_seek_read.c +++ b/src/test/old/test_seek_read.c @@ -36,7 +36,7 @@ int main(int argc, char **argv) int s = blocks*4096; - utime_t start = ceph_clock_now(g_ceph_context); + utime_t start = ceph_clock_now(); for (int i=0; i fn = []() {}) { Mutex::Locker locker(mock_object_copy_request.lock); while (mock_object_copy_request.object_contexts.count(object_num) == 0) { - if (mock_object_copy_request.cond.WaitInterval(m_local_image_ctx->cct, - mock_object_copy_request.lock, + if (mock_object_copy_request.cond.WaitInterval(mock_object_copy_request.lock, utime_t(10, 0)) != 0) { return false; } @@ -153,8 +152,7 @@ public: MockImageCopyRequest::SnapMap wait_for_snap_map(MockObjectCopyRequest &mock_object_copy_request) { Mutex::Locker locker(mock_object_copy_request.lock); while (mock_object_copy_request.snap_map == nullptr) { - if (mock_object_copy_request.cond.WaitInterval(m_local_image_ctx->cct, - mock_object_copy_request.lock, + if (mock_object_copy_request.cond.WaitInterval(mock_object_copy_request.lock, utime_t(10, 0)) != 0) { return MockImageCopyRequest::SnapMap(); } diff --git a/src/test/rbd_mirror/random_write.cc b/src/test/rbd_mirror/random_write.cc index 9ad4cd01899f8..deed9ec21a542 100644 --- a/src/test/rbd_mirror/random_write.cc +++ b/src/test/rbd_mirror/random_write.cc @@ -71,7 +71,7 @@ struct rbd_bencher { while (in_flight > max) { utime_t dur; dur.set_from_double(.2); - cond.WaitInterval(g_ceph_context, lock, dur); + cond.WaitInterval(lock, dur); } } diff --git a/src/test/rbd_mirror/test_ImageReplayer.cc b/src/test/rbd_mirror/test_ImageReplayer.cc index b2e5b16dc2666..58f8deefe0dff 100644 --- a/src/test/rbd_mirror/test_ImageReplayer.cc +++ b/src/test/rbd_mirror/test_ImageReplayer.cc @@ -262,7 +262,7 @@ public: Mutex::Locker locker(m_watch_ctx->lock); while (!m_watch_ctx->notified) { - if (m_watch_ctx->cond.WaitInterval(g_ceph_context, m_watch_ctx->lock, + if (m_watch_ctx->cond.WaitInterval(m_watch_ctx->lock, utime_t(seconds, 0)) != 0) { return false; } diff --git a/src/test/test_rewrite_latency.cc b/src/test/test_rewrite_latency.cc index 67e7c675a2977..fd76e8d147bd8 100644 --- a/src/test/test_rewrite_latency.cc +++ b/src/test/test_rewrite_latency.cc @@ -23,10 +23,10 @@ int main(int argc, const char **argv) } while (true) { - utime_t now = ceph_clock_now(NULL); + utime_t now = ceph_clock_now(); int r = ::pwrite(fd, fn, strlen(fn), 0); assert(r >= 0); - utime_t lat = ceph_clock_now(NULL); + utime_t lat = ceph_clock_now(); lat -= now; utime_t oldmin; if (!latency.empty()) diff --git a/src/test/testcrypto.cc b/src/test/testcrypto.cc index 60f5905b536d2..c91fb1413a605 100644 --- a/src/test/testcrypto.cc +++ b/src/test/testcrypto.cc @@ -13,7 +13,7 @@ int main(int argc, char *argv[]) char aes_key[AES_KEY_LEN]; memset(aes_key, 0x77, sizeof(aes_key)); bufferptr keybuf(aes_key, sizeof(aes_key)); - CryptoKey key(CEPH_CRYPTO_AES, ceph_clock_now(g_ceph_context), keybuf); + CryptoKey key(CEPH_CRYPTO_AES, ceph_clock_now(), keybuf); const char *msg="hello! this is a message\n"; char pad[16]; diff --git a/src/test/testkeys.cc b/src/test/testkeys.cc index 93bed6b6e9c9c..8892942bd532f 100644 --- a/src/test/testkeys.cc +++ b/src/test/testkeys.cc @@ -26,7 +26,7 @@ int main(int argc, const char **argv) char aes_key[AES_KEY_LEN]; memset(aes_key, 0x77, sizeof(aes_key)); bufferptr keybuf(aes_key, sizeof(aes_key)); - CryptoKey key(CEPH_CRYPTO_AES, ceph_clock_now(g_ceph_context), keybuf); + CryptoKey key(CEPH_CRYPTO_AES, ceph_clock_now(), keybuf); const char *msg="hello! this is a message\n"; char pad[16]; diff --git a/src/tools/ceph_kvstore_tool.cc b/src/tools/ceph_kvstore_tool.cc index 751bf106c7041..e0efe39e7d4cb 100644 --- a/src/tools/ceph_kvstore_tool.cc +++ b/src/tools/ceph_kvstore_tool.cc @@ -161,7 +161,7 @@ class StoreTool uint64_t total_size = 0; uint64_t total_txs = 0; - utime_t started_at = ceph_clock_now(g_ceph_context); + utime_t started_at = ceph_clock_now(); do { int num_keys = 0; @@ -186,14 +186,14 @@ class StoreTool if (num_keys > 0) other->submit_transaction_sync(tx); - utime_t cur_duration = ceph_clock_now(g_ceph_context) - started_at; + utime_t cur_duration = ceph_clock_now() - started_at; std::cout << "ts = " << cur_duration << "s, copied " << total_keys << " keys so far (" << stringify(si_t(total_size)) << ")" << std::endl; } while (it->valid()); - utime_t time_taken = ceph_clock_now(g_ceph_context) - started_at; + utime_t time_taken = ceph_clock_now() - started_at; std::cout << "summary:" << std::endl; std::cout << " copied " << total_keys << " keys" << std::endl; diff --git a/src/tools/ceph_monstore_tool.cc b/src/tools/ceph_monstore_tool.cc index 5ffb3d167273f..8f0bf77333f10 100644 --- a/src/tools/ceph_monstore_tool.cc +++ b/src/tools/ceph_monstore_tool.cc @@ -494,7 +494,7 @@ int inflate_pgmap(MonitorDBStore& st, unsigned n, bool can_be_trimmed) { } ::encode(ps->second, dirty_pgs); } - utime_t inc_stamp = ceph_clock_now(NULL); + utime_t inc_stamp = ceph_clock_now(); ::encode(inc_stamp, trans_bl); ::encode_destructively(dirty_pgs, trans_bl); bufferlist dirty_osds; @@ -635,7 +635,7 @@ static int update_pgmap_meta(MonitorDBStore& st) // the first pgmap_meta t->put(prefix, "version", 1); { - auto stamp = ceph_clock_now(g_ceph_context); + auto stamp = ceph_clock_now(); bufferlist bl; ::encode(stamp, bl); t->put(prefix, "stamp", bl); diff --git a/src/tools/cephfs/DataScan.cc b/src/tools/cephfs/DataScan.cc index 951a01713539b..7ff86ef3c202c 100644 --- a/src/tools/cephfs/DataScan.cc +++ b/src/tools/cephfs/DataScan.cc @@ -342,8 +342,8 @@ int MetadataDriver::inject_unlinked_inode( // (we won't actually give the *correct* dirstat here though) inode.inode.dirstat.nfiles = 1; - inode.inode.ctime = - inode.inode.mtime = ceph_clock_now(g_ceph_context); + inode.inode.ctime = + inode.inode.mtime = ceph_clock_now(); inode.inode.nlink = 1; inode.inode.truncate_size = -1ull; inode.inode.truncate_seq = 1; diff --git a/src/tools/cephfs/Dumper.cc b/src/tools/cephfs/Dumper.cc index af04bf6169978..c2cd44a3ccf9c 100644 --- a/src/tools/cephfs/Dumper.cc +++ b/src/tools/cephfs/Dumper.cc @@ -258,7 +258,7 @@ int Dumper::undump(const char *dump_file) C_SaferCond header_cond; lock.Lock(); objecter->write_full(oid, oloc, snapc, hbl, - ceph::real_clock::now(g_ceph_context), 0, + ceph::real_clock::now(), 0, NULL, &header_cond); lock.Unlock(); @@ -285,7 +285,7 @@ int Dumper::undump(const char *dump_file) cout << "Purging " << purge_count << " objects from " << last_obj << std::endl; lock.Lock(); filer.purge_range(ino, &h.layout, snapc, last_obj, purge_count, - ceph::real_clock::now(g_ceph_context), 0, &purge_cond); + ceph::real_clock::now(), 0, &purge_cond); lock.Unlock(); purge_cond.wait(); } @@ -305,7 +305,7 @@ int Dumper::undump(const char *dump_file) C_SaferCond write_cond; lock.Lock(); filer.write(ino, &h.layout, snapc, pos, l, j, - ceph::real_clock::now(g_ceph_context), 0, NULL, &write_cond); + ceph::real_clock::now(), 0, NULL, &write_cond); lock.Unlock(); r = write_cond.wait(); diff --git a/src/tools/monmaptool.cc b/src/tools/monmaptool.cc index b600f5a5393ec..1fac4c10e6840 100644 --- a/src/tools/monmaptool.cc +++ b/src/tools/monmaptool.cc @@ -307,7 +307,7 @@ int main(int argc, const char **argv) if (create) { monmap.epoch = 0; - monmap.created = ceph_clock_now(g_ceph_context); + monmap.created = ceph_clock_now(); monmap.last_changed = monmap.created; srand(getpid() + time(0)); if (g_conf->fsid.is_zero()) { diff --git a/src/tools/rados/rados.cc b/src/tools/rados/rados.cc index a334ada42a9b1..949cc17fb737b 100644 --- a/src/tools/rados/rados.cc +++ b/src/tools/rados/rados.cc @@ -612,7 +612,7 @@ public: } float time_passed() { - utime_t now = ceph_clock_now(g_ceph_context); + utime_t now = ceph_clock_now(); now -= start_time; uint64_t ns = now.nsec(); float total = (float) ns / 1000000000.0; @@ -805,7 +805,7 @@ uint64_t LoadGen::gen_next_op() int LoadGen::run() { - start_time = ceph_clock_now(g_ceph_context); + start_time = ceph_clock_now(); utime_t end_time = start_time; end_time += run_length; utime_t stamp_time = start_time; @@ -814,14 +814,14 @@ int LoadGen::run() while (1) { lock.Lock(); utime_t one_second(1, 0); - cond.WaitInterval(g_ceph_context, lock, one_second); + cond.WaitInterval(lock, one_second); lock.Unlock(); - utime_t now = ceph_clock_now(g_ceph_context); + utime_t now = ceph_clock_now(); if (now > end_time) break; - uint64_t expected = total_expected(); + uint64_t expected = total_expected(); lock.Lock(); uint64_t sent = total_sent; uint64_t completed = total_completed; diff --git a/src/tools/rbd/action/Bench.cc b/src/tools/rbd/action/Bench.cc index 42d05dd16ce3d..fae1db45ec3da 100644 --- a/src/tools/rbd/action/Bench.cc +++ b/src/tools/rbd/action/Bench.cc @@ -156,7 +156,7 @@ struct rbd_bencher { while (in_flight > max) { utime_t dur; dur.set_from_double(.2); - cond.WaitInterval(g_ceph_context, lock, dur); + cond.WaitInterval(lock, dur); } } @@ -208,7 +208,7 @@ int do_bench(librbd::Image& image, io_type_t io_type, srand(time(NULL) % (unsigned long) -1); - utime_t start = ceph_clock_now(NULL); + utime_t start = ceph_clock_now(); utime_t last; unsigned ios = 0; @@ -268,7 +268,7 @@ int do_bench(librbd::Image& image, io_type_t io_type, cur_off += io_size; } - utime_t now = ceph_clock_now(NULL); + utime_t now = ceph_clock_now(); utime_t elapsed = now - start; if (last.is_zero()) { last = elapsed; @@ -295,7 +295,7 @@ int do_bench(librbd::Image& image, io_type_t io_type, << std::endl; } - utime_t now = ceph_clock_now(NULL); + utime_t now = ceph_clock_now(); double elapsed = now - start; printf("elapsed: %5d ops: %8d ops/sec: %8.2lf bytes/sec: %8.2lf\n", diff --git a/src/tools/rbd_mirror/Mirror.cc b/src/tools/rbd_mirror/Mirror.cc index 8d5b83a08ae53..0463a81d139d2 100644 --- a/src/tools/rbd_mirror/Mirror.cc +++ b/src/tools/rbd_mirror/Mirror.cc @@ -234,8 +234,9 @@ void Mirror::run() if (!m_manual_stop) { update_replayers(m_local_cluster_watcher->get_pool_peers()); } - m_cond.WaitInterval(g_ceph_context, m_lock, - utime_t(m_cct->_conf->rbd_mirror_pool_replayers_refresh_interval, 0)); + m_cond.WaitInterval( + m_lock, + utime_t(m_cct->_conf->rbd_mirror_pool_replayers_refresh_interval, 0)); } // stop all replayers in parallel diff --git a/src/tools/rbd_mirror/Replayer.cc b/src/tools/rbd_mirror/Replayer.cc index 78c2091e9e80b..6c70c9b78dffd 100644 --- a/src/tools/rbd_mirror/Replayer.cc +++ b/src/tools/rbd_mirror/Replayer.cc @@ -457,8 +457,9 @@ void Replayer::run() if (m_blacklisted) { break; } - m_cond.WaitInterval(g_ceph_context, m_lock, - utime_t(g_ceph_context->_conf->rbd_mirror_image_state_check_interval, 0)); + m_cond.WaitInterval(m_lock, + utime_t(g_ceph_context->_conf + ->rbd_mirror_image_state_check_interval, 0)); } ImageIds empty_sources; @@ -468,7 +469,7 @@ void Replayer::run() if (m_image_replayers.empty()) { break; } - m_cond.WaitInterval(g_ceph_context, m_lock, seconds(1)); + m_cond.WaitInterval(m_lock, seconds(1)); } } -- 2.39.5