From b0ab76a80fce5dd592709f9671bc969c4976b5b0 Mon Sep 17 00:00:00 2001 From: Kefu Chai Date: Fri, 30 Jun 2017 15:13:48 +0800 Subject: [PATCH] OSD: s/PGQueueable/OpQueueItem/g PGQueueable was always a pretty bad name, and going forward, we won't necessarily be scheduling things ordered only by the PG lock, so let's change it. Signed-off-by: Samuel Just Signed-off-by: Kefu Chai Signed-off-by: Myoungwon Oh --- src/osd/CMakeLists.txt | 2 +- src/osd/OSD.cc | 16 ++++---- src/osd/OSD.h | 45 +++++++++++----------- src/osd/{PGQueueable.cc => OpQueueItem.cc} | 10 ++--- src/osd/{PGQueueable.h => OpQueueItem.h} | 16 ++++---- src/osd/PG.cc | 6 +-- src/osd/PrimaryLogPG.cc | 4 +- src/osd/mClockClientQueue.h | 5 +-- src/osd/mClockOpClassQueue.h | 5 +-- src/test/osd/TestMClockClientQueue.cc | 8 ++-- src/test/osd/TestMClockOpClassQueue.cc | 8 ++-- 11 files changed, 61 insertions(+), 64 deletions(-) rename src/osd/{PGQueueable.cc => OpQueueItem.cc} (70%) rename src/osd/{PGQueueable.h => OpQueueItem.h} (94%) diff --git a/src/osd/CMakeLists.txt b/src/osd/CMakeLists.txt index 3ec6f31a604..8c7b2e8f9f6 100644 --- a/src/osd/CMakeLists.txt +++ b/src/osd/CMakeLists.txt @@ -31,7 +31,7 @@ set(osd_srcs ExtentCache.cc mClockOpClassQueue.cc mClockClientQueue.cc - PGQueueable.cc + OpQueueItem.cc ${CMAKE_SOURCE_DIR}/src/common/TrackedOp.cc ${osd_cyg_functions_src} ${osdc_osd_srcs}) diff --git a/src/osd/OSD.cc b/src/osd/OSD.cc index 9bd7566401f..6a69997d6a0 100644 --- a/src/osd/OSD.cc +++ b/src/osd/OSD.cc @@ -1674,12 +1674,12 @@ void OSDService::handle_misdirected_op(PG *pg, OpRequestRef op) << " in e" << m->get_map_epoch() << "/" << osdmap->get_epoch(); } -void OSDService::enqueue_back(spg_t pgid, PGQueueable qi) +void OSDService::enqueue_back(spg_t pgid, OpQueueItem qi) { osd->op_shardedwq.queue(make_pair(pgid, qi)); } -void OSDService::enqueue_front(spg_t pgid, PGQueueable qi) +void OSDService::enqueue_front(spg_t pgid, OpQueueItem qi) { osd->op_shardedwq.queue_front(make_pair(pgid, qi)); } @@ -1695,7 +1695,7 @@ void OSDService::queue_for_snap_trim(PG *pg) osd->op_shardedwq.queue( make_pair( pg->pg_id, - PGQueueable( + OpQueueItem( PGSnapTrim(pg->get_osdmap()->get_epoch()), cct->_conf->osd_snap_trim_cost, cct->_conf->osd_snap_trim_priority, @@ -8909,7 +8909,7 @@ void OSD::enqueue_op(spg_t pg, OpRequestRef& op, epoch_t epoch) op->osd_trace.keyval("cost", op->get_req()->get_cost()); op->mark_queued_for_pg(); logger->tinc(l_osd_op_before_queue_op_lat, latency); - op_shardedwq.queue(make_pair(pg, PGQueueable(op, epoch))); + op_shardedwq.queue(make_pair(pg, OpQueueItem(op, epoch))); } @@ -9569,7 +9569,7 @@ void OSD::ShardedOpWQ::_process(uint32_t thread_index, heartbeat_handle_d *hb) return; } } - pair item = sdata->pqueue->dequeue(); + pair item = sdata->pqueue->dequeue(); if (osd->is_stopping()) { sdata->sdata_op_ordering_lock.Unlock(); return; // OSD shutdown, discard. @@ -9609,7 +9609,7 @@ void OSD::ShardedOpWQ::_process(uint32_t thread_index, heartbeat_handle_d *hb) osd->service.maybe_inject_dispatch_delay(); - boost::optional qi; + boost::optional qi; // we don't use a Mutex::Locker here because of the // osd->service.release_reserved_pushes() call below @@ -9743,7 +9743,7 @@ void OSD::ShardedOpWQ::_process(uint32_t thread_index, heartbeat_handle_d *hb) pg->unlock(); } -void OSD::ShardedOpWQ::_enqueue(pair item) { +void OSD::ShardedOpWQ::_enqueue(pair item) { uint32_t shard_index = item.first.hash_to_shard(shard_list.size()); @@ -9769,7 +9769,7 @@ void OSD::ShardedOpWQ::_enqueue(pair item) { } -void OSD::ShardedOpWQ::_enqueue_front(pair item) +void OSD::ShardedOpWQ::_enqueue_front(pair item) { uint32_t shard_index = item.first.hash_to_shard(shard_list.size()); ShardData* sdata = shard_list[shard_index]; diff --git a/src/osd/OSD.h b/src/osd/OSD.h index 03e414e1425..f76091ff2cc 100644 --- a/src/osd/OSD.h +++ b/src/osd/OSD.h @@ -19,6 +19,7 @@ #include "msg/Dispatcher.h" +#include "common/backport14.h" #include "common/Mutex.h" #include "common/RWLock.h" #include "common/Timer.h" @@ -40,7 +41,7 @@ #include "OpRequest.h" #include "Session.h" -#include "osd/PGQueueable.h" +#include "osd/OpQueueItem.h" #include #include @@ -366,8 +367,8 @@ public: GenContextWQ recovery_gen_wq; ClassHandler *&class_handler; - void enqueue_back(spg_t pgid, PGQueueable qi); - void enqueue_front(spg_t pgid, PGQueueable qi); + void enqueue_back(spg_t pgid, OpQueueItem qi); + void enqueue_front(spg_t pgid, OpQueueItem qi); void maybe_inject_dispatch_delay() { if (g_conf->osd_debug_inject_dispatch_delay_probability > 0) { @@ -866,7 +867,7 @@ public: } enqueue_back( pg->get_pgid(), - PGQueueable( + OpQueueItem( PGScrub(pg->get_osdmap()->get_epoch()), cct->_conf->osd_scrub_cost, scrub_queue_priority, @@ -894,7 +895,7 @@ private: assert(recovery_lock.is_locked_by_me()); enqueue_back( p.second->get_pgid(), - PGQueueable( + OpQueueItem( PGRecovery(p.first, reserved_pushes), cct->_conf->osd_recovery_cost, cct->_conf->osd_recovery_priority, @@ -1617,10 +1618,10 @@ private: * wake_pg_waiters, and (2) when wake_pg_waiters just ran, waiting_for_pg * and already requeued the items. */ - friend class PGQueueable; + friend class OpQueueItem; class ShardedOpWQ - : public ShardedThreadPool::ShardedWQ> + : public ShardedThreadPool::ShardedWQ> { struct ShardData { Mutex sdata_lock; @@ -1631,7 +1632,7 @@ private: OSDMapRef waiting_for_pg_osdmap; struct pg_slot { PGRef pg; ///< cached pg reference [optional] - deque to_process; ///< order items for this slot + deque to_process; ///< order items for this slot int num_running = 0; ///< _process threads doing pg lookup/lock /// true if pg does/did not exist. if so all new items go directly to @@ -1649,9 +1650,9 @@ private: unordered_map pg_slots; /// priority queue - std::unique_ptr, uint64_t>> pqueue; + std::unique_ptr, uint64_t>> pqueue; - void _enqueue_front(pair item, unsigned cutoff) { + void _enqueue_front(pair item, unsigned cutoff) { unsigned priority = item.second.get_priority(); unsigned cost = item.second.get_cost(); if (priority >= cutoff) @@ -1673,20 +1674,18 @@ private: false, cct) { if (opqueue == io_queue::weightedpriority) { pqueue = std::unique_ptr - ,uint64_t>>( - new WeightedPriorityQueue,uint64_t>( + ,uint64_t>>( + new WeightedPriorityQueue,uint64_t>( max_tok_per_prio, min_cost)); } else if (opqueue == io_queue::prioritized) { pqueue = std::unique_ptr - ,uint64_t>>( - new PrioritizedQueue,uint64_t>( + ,uint64_t>>( + new PrioritizedQueue,uint64_t>( max_tok_per_prio, min_cost)); } else if (opqueue == io_queue::mclock_opclass) { - pqueue = std::unique_ptr - (new ceph::mClockOpClassQueue(cct)); + pqueue = ceph::make_unique(cct); } else if (opqueue == io_queue::mclock_client) { - pqueue = std::unique_ptr - (new ceph::mClockClientQueue(cct)); + pqueue = ceph::make_unique(cct); } } }; // struct ShardData @@ -1701,7 +1700,7 @@ private: time_t ti, time_t si, ShardedThreadPool* tp) - : ShardedThreadPool::ShardedWQ>(ti, si, tp), + : ShardedThreadPool::ShardedWQ>(ti, si, tp), osd(o), num_shards(pnum_shards) { for (uint32_t i = 0; i < num_shards; i++) { @@ -1740,10 +1739,10 @@ private: void _process(uint32_t thread_index, heartbeat_handle_d *hb) override; /// enqueue a new item - void _enqueue(pair item) override; + void _enqueue(pair item) override; /// requeue an old item (at the front of the line) - void _enqueue_front(pair item) override; + void _enqueue_front(pair item) override; void return_waiting_threads() override { for(uint32_t i = 0; i < num_shards; i++) { @@ -1776,7 +1775,7 @@ private: uint64_t reserved_pushes_to_free; Pred(spg_t pg, list *out_ops = 0) : pgid(pg), out_ops(out_ops), reserved_pushes_to_free(0) {} - void accumulate(const PGQueueable &op) { + void accumulate(const OpQueueItem &op) { reserved_pushes_to_free += op.get_reserved_pushes(); if (out_ops) { boost::optional mop = op.maybe_get_op(); @@ -1784,7 +1783,7 @@ private: out_ops->push_front(*mop); } } - bool operator()(const pair &op) { + bool operator()(const pair &op) { if (op.first == pgid) { accumulate(op.second); return true; diff --git a/src/osd/PGQueueable.cc b/src/osd/OpQueueItem.cc similarity index 70% rename from src/osd/PGQueueable.cc rename to src/osd/OpQueueItem.cc index 844cdfc350b..bf944161866 100644 --- a/src/osd/PGQueueable.cc +++ b/src/osd/OpQueueItem.cc @@ -14,22 +14,22 @@ #include "PG.h" -#include "PGQueueable.h" +#include "OpQueueItem.h" #include "OSD.h" -void PGQueueable::RunVis::operator()(const OpRequestRef &op) { +void OpQueueItem::RunVis::operator()(const OpRequestRef &op) { osd->dequeue_op(pg, op, handle); } -void PGQueueable::RunVis::operator()(const PGSnapTrim &op) { +void OpQueueItem::RunVis::operator()(const PGSnapTrim &op) { pg->snap_trimmer(op.epoch_queued); } -void PGQueueable::RunVis::operator()(const PGScrub &op) { +void OpQueueItem::RunVis::operator()(const PGScrub &op) { pg->scrub(op.epoch_queued, handle); } -void PGQueueable::RunVis::operator()(const PGRecovery &op) { +void OpQueueItem::RunVis::operator()(const PGRecovery &op) { osd->do_recovery(pg.get(), op.epoch_queued, op.reserved_pushes, handle); } diff --git a/src/osd/PGQueueable.h b/src/osd/OpQueueItem.h similarity index 94% rename from src/osd/PGQueueable.h rename to src/osd/OpQueueItem.h index 9eeadc038e6..97c4c5ae22f 100644 --- a/src/osd/PGQueueable.h +++ b/src/osd/OpQueueItem.h @@ -54,7 +54,7 @@ struct PGRecovery { }; -class PGQueueable { +class OpQueueItem { typedef boost::variant< OpRequestRef, PGSnapTrim, @@ -95,33 +95,33 @@ class PGQueueable { } }; - friend ostream& operator<<(ostream& out, const PGQueueable& q) { + friend ostream& operator<<(ostream& out, const OpQueueItem& q) { StringifyVis v; - return out << "PGQueueable(" << boost::apply_visitor(v, q.qvariant) + return out << "OpQueueItem(" << boost::apply_visitor(v, q.qvariant) << " prio " << q.priority << " cost " << q.cost << " e" << q.map_epoch << ")"; } public: - PGQueueable(OpRequestRef op, epoch_t e) + OpQueueItem(OpRequestRef op, epoch_t e) : qvariant(op), cost(op->get_req()->get_cost()), priority(op->get_req()->get_priority()), start_time(op->get_req()->get_recv_stamp()), owner(op->get_req()->get_source().num()), map_epoch(e) {} - PGQueueable( + OpQueueItem( const PGSnapTrim &op, int cost, unsigned priority, utime_t start_time, uint64_t owner, epoch_t e) : qvariant(op), cost(cost), priority(priority), start_time(start_time), owner(owner), map_epoch(e) {} - PGQueueable( + OpQueueItem( const PGScrub &op, int cost, unsigned priority, utime_t start_time, uint64_t owner, epoch_t e) : qvariant(op), cost(cost), priority(priority), start_time(start_time), owner(owner), map_epoch(e) {} - PGQueueable( + OpQueueItem( const PGRecovery &op, int cost, unsigned priority, utime_t start_time, uint64_t owner, epoch_t e) : qvariant(op), cost(cost), priority(priority), start_time(start_time), @@ -145,4 +145,4 @@ public: uint64_t get_owner() const { return owner; } epoch_t get_map_epoch() const { return map_epoch; } const QVariant& get_variant() const { return qvariant; } -}; // struct PGQueueable +}; // struct OpQueueItem diff --git a/src/osd/PG.cc b/src/osd/PG.cc index 04893e987fd..509537d9953 100644 --- a/src/osd/PG.cc +++ b/src/osd/PG.cc @@ -3457,7 +3457,7 @@ void PG::requeue_op(OpRequestRef op) p->second.push_front(op); } else { dout(20) << __func__ << " " << op << dendl; - osd->enqueue_front(info.pgid, PGQueueable(op, get_osdmap()->get_epoch())); + osd->enqueue_front(info.pgid, OpQueueItem(op, get_osdmap()->get_epoch())); } } @@ -3473,7 +3473,7 @@ void PG::requeue_ops(list &ls) p->second.push_front(*i); } else { dout(20) << __func__ << " " << *i << dendl; - osd->enqueue_front(info.pgid, PGQueueable(*i, get_osdmap()->get_epoch())); + osd->enqueue_front(info.pgid, OpQueueItem(*i, get_osdmap()->get_epoch())); } } ls.clear(); @@ -3492,7 +3492,7 @@ void PG::requeue_map_waiters() } else { dout(20) << __func__ << " " << p->first << " " << p->second << dendl; for (auto q = p->second.rbegin(); q != p->second.rend(); ++q) { - osd->enqueue_front(info.pgid, PGQueueable(*q, epoch)); + osd->enqueue_front(info.pgid, OpQueueItem(*q, epoch)); } p = waiting_for_map.erase(p); } diff --git a/src/osd/PrimaryLogPG.cc b/src/osd/PrimaryLogPG.cc index a73b8dd7bb9..15231f2c101 100644 --- a/src/osd/PrimaryLogPG.cc +++ b/src/osd/PrimaryLogPG.cc @@ -9062,7 +9062,7 @@ void PrimaryLogPG::op_applied(const eversion_t &applied_version) scrubber.active_rep_scrub->get_req())->scrub_to) { osd->enqueue_back( info.pgid, - PGQueueable(scrubber.active_rep_scrub, get_osdmap()->get_epoch())); + OpQueueItem(scrubber.active_rep_scrub, get_osdmap()->get_epoch())); scrubber.active_rep_scrub = OpRequestRef(); } } @@ -10299,7 +10299,7 @@ void PrimaryLogPG::_applied_recovered_object_replica() scrubber.active_rep_scrub->get_req())->chunky) { osd->enqueue_back( info.pgid, - PGQueueable(scrubber.active_rep_scrub, get_osdmap()->get_epoch())); + OpQueueItem(scrubber.active_rep_scrub, get_osdmap()->get_epoch())); scrubber.active_rep_scrub = OpRequestRef(); } unlock(); diff --git a/src/osd/mClockClientQueue.h b/src/osd/mClockClientQueue.h index 3d8824914eb..f14c7d647d3 100644 --- a/src/osd/mClockClientQueue.h +++ b/src/osd/mClockClientQueue.h @@ -21,17 +21,16 @@ #include "common/config.h" #include "common/ceph_context.h" -#include "osd/PGQueueable.h" +#include "osd/OpQueueItem.h" #include "common/mClockPriorityQueue.h" namespace ceph { - using Request = std::pair; + using Request = std::pair; using Client = uint64_t; - // This class exists to bridge the ceph code, which treats the class // as the client, and the queue, where the class is // osd_op_type_t. So this adapter class will transform calls diff --git a/src/osd/mClockOpClassQueue.h b/src/osd/mClockOpClassQueue.h index 6b87335b109..ccf3f578e67 100644 --- a/src/osd/mClockOpClassQueue.h +++ b/src/osd/mClockOpClassQueue.h @@ -21,17 +21,16 @@ #include "common/config.h" #include "common/ceph_context.h" -#include "osd/PGQueueable.h" +#include "osd/OpQueueItem.h" #include "common/mClockPriorityQueue.h" namespace ceph { - using Request = std::pair; + using Request = std::pair; using Client = uint64_t; - // This class exists to bridge the ceph code, which treats the class // as the client, and the queue, where the class is // osd_op_type_t. So this adapter class will transform calls diff --git a/src/test/osd/TestMClockClientQueue.cc b/src/test/osd/TestMClockClientQueue.cc index 498acd9ddda..6bb9b9f0ee1 100644 --- a/src/test/osd/TestMClockClientQueue.cc +++ b/src/test/osd/TestMClockClientQueue.cc @@ -38,27 +38,27 @@ public: #if 0 // more work needed here Request create_client_op(epoch_t e, uint64_t owner) { - return Request(spg_t(), PGQueueable(OpRequestRef(), e)); + return Request(spg_t(), OpQueueItem(OpRequestRef(), e)); } #endif Request create_snaptrim(epoch_t e, uint64_t owner) { return Request(spg_t(), - PGQueueable(PGSnapTrim(e), + OpQueueItem(PGSnapTrim(e), 12, 12, utime_t(), owner, e)); } Request create_scrub(epoch_t e, uint64_t owner) { return Request(spg_t(), - PGQueueable(PGScrub(e), + OpQueueItem(PGScrub(e), 12, 12, utime_t(), owner, e)); } Request create_recovery(epoch_t e, uint64_t owner) { return Request(spg_t(), - PGQueueable(PGRecovery(e, 64), + OpQueueItem(PGRecovery(e, 64), 12, 12, utime_t(), owner, e)); } diff --git a/src/test/osd/TestMClockOpClassQueue.cc b/src/test/osd/TestMClockOpClassQueue.cc index b18587f491a..44a0346d5f3 100644 --- a/src/test/osd/TestMClockOpClassQueue.cc +++ b/src/test/osd/TestMClockOpClassQueue.cc @@ -40,27 +40,27 @@ public: #if 0 // more work needed here Request create_client_op(epoch_t e, uint64_t owner) { - return Request(spg_t(), PGQueueable(OpRequestRef(), e)); + return Request(spg_t(), OpQueueItem(OpRequestRef(), e)); } #endif Request create_snaptrim(epoch_t e, uint64_t owner) { return Request(spg_t(), - PGQueueable(PGSnapTrim(e), + OpQueueItem(PGSnapTrim(e), 12, 12, utime_t(), owner, e)); } Request create_scrub(epoch_t e, uint64_t owner) { return Request(spg_t(), - PGQueueable(PGScrub(e), + OpQueueItem(PGScrub(e), 12, 12, utime_t(), owner, e)); } Request create_recovery(epoch_t e, uint64_t owner) { return Request(spg_t(), - PGQueueable(PGRecovery(e, 64), + OpQueueItem(PGRecovery(e, 64), 12, 12, utime_t(), owner, e)); } -- 2.39.5