]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
common,osd: remove _process(T *t) to silence warnings
authorKefu Chai <kchai@redhat.com>
Sat, 30 Jan 2016 17:49:47 +0000 (01:49 +0800)
committerKefu Chai <kchai@redhat.com>
Mon, 1 Feb 2016 06:33:40 +0000 (14:33 +0800)
* and mark the WorkQueue::_process(T *, TPHandle&) a pure virtual
  method. so we must override it
* and mark all override methods with the "override" specifier.

Signed-off-by: Kefu Chai <kchai@redhat.com>
src/common/WorkQueue.h
src/compressor/AsyncCompressor.h
src/os/bluestore/BlueStore.h
src/os/filestore/FileStore.h
src/osd/OSD.h
src/rgw/rgw_main.cc
src/test/bench/dumb_backend.h
src/test/bench/tp_bench.cc
src/test/msgr/perf_msgr_server.cc

index 67c1a87b783b8d3cbeb170f82c7c88b7dd0d6229..655575ebec5de7f8585cae4293f2bcb6cb90d8ba 100644 (file)
@@ -126,10 +126,7 @@ public:
     }
 
   protected:
-    virtual void _process(const list<T*> &) { assert(0); }
-    virtual void _process(const list<T*> &items, TPHandle &handle) {
-      _process(items);
-    }
+    virtual void _process(const list<T*> &items, TPHandle &handle) = 0;
 
   public:
     BatchWorkQueue(string n, time_t ti, time_t sti, ThreadPool* p)
@@ -257,10 +254,7 @@ public:
     void unlock() {
       pool->unlock();
     }
-    virtual void _process(U) { assert(0); }
-    virtual void _process(U u, TPHandle &) {
-      _process(u);
-    }
+    virtual void _process(U u, TPHandle &) = 0;
   };
 
   /** @brief Template by-pointer work queue.
@@ -293,10 +287,7 @@ public:
 
   protected:
     /// Process a work item. Called from the worker threads.
-    virtual void _process(T *t) { assert(0); }
-    virtual void _process(T *t, TPHandle &) {
-      _process(t);
-    }
+    virtual void _process(T *t, TPHandle &) = 0;
 
   public:
     WorkQueue(string n, time_t ti, time_t sti, ThreadPool* p) : WorkQueue_(n, ti, sti), pool(p) {
@@ -555,8 +546,8 @@ public:
     _queue.pop_front();
     return c;
   }
-  using ThreadPool::WorkQueueVal<GenContext<ThreadPool::TPHandle&>*>::_process;
-  void _process(GenContext<ThreadPool::TPHandle&> *c, ThreadPool::TPHandle &tp) {
+  void _process(GenContext<ThreadPool::TPHandle&> *c,
+               ThreadPool::TPHandle &tp) override {
     c->complete(tp);
   }
 };
index cec2e961d4e3e4a5f59b9e8a9f052138803bdef8..cdeff6c4789023b3404aafc20b46a953f14cceb0 100644 (file)
@@ -85,7 +85,7 @@ class AsyncCompressor {
       }
       return item;
     }
-    void _process(Job *item, ThreadPool::TPHandle &handle) {
+    void _process(Job *item, ThreadPool::TPHandle &) override {
       assert(item->status.read() == WORKING);
       bufferlist out;
       int r;
index f466590ce4c1ef6115e28b3f66f64bb634c81627..7b297f0cedac4434b42c89e437ebe098da036754 100644 (file)
@@ -427,11 +427,10 @@ public:
       i->osr->wal_apply_lock.Lock();
       return i;
     }
-    void _process(TransContext *i, ThreadPool::TPHandle &handle) {
+    void _process(TransContext *i, ThreadPool::TPHandle &) override {
       store->_wal_apply(i);
       i->osr->wal_apply_lock.Unlock();
     }
-    using ThreadPool::WorkQueue<TransContext>::_process;
     void _clear() {
       assert(wal_queue.empty());
     }
index fec7600a3b777b0894c4cbea57030e70819f43d7..e6dec9fae03acea4b8581054ad8a4a639fe87a60 100644 (file)
@@ -370,10 +370,9 @@ private:
       store->op_queue.pop_front();
       return osr;
     }
-    void _process(OpSequencer *osr, ThreadPool::TPHandle &handle) {
+    void _process(OpSequencer *osr, ThreadPool::TPHandle &handle) override {
       store->_do_op(osr, handle);
     }
-    using ThreadPool::WorkQueue<OpSequencer>::_process;
     void _process_finish(OpSequencer *osr) {
       store->_finish_op(osr);
     }
index 4d274cfb13993873c75fae6e7b7a758352ff36e2..5657eb23be96ab2eb21dfaa47ff9351addc6ad1f 100644 (file)
@@ -1783,7 +1783,7 @@ private:
     void _dequeue(list<PG*> *out);
     void _process(
       const list<PG *> &pgs,
-      ThreadPool::TPHandle &handle) {
+      ThreadPool::TPHandle &handle) override {
       osd->process_peering_events(pgs, handle);
       for (list<PG *>::const_iterator i = pgs.begin();
           i != pgs.end();
@@ -1791,7 +1791,6 @@ private:
        (*i)->put("PeeringWQ");
       }
     }
-    using ThreadPool::BatchWorkQueue<PG>::_process;
     void _process_finish(const list<PG *> &pgs) {
       for (list<PG*>::const_iterator i = pgs.begin();
           i != pgs.end();
@@ -2135,7 +2134,7 @@ protected:
       osd->command_queue.pop_front();
       return c;
     }
-    void _process(Command *c) {
+    void _process(Command *c, ThreadPool::TPHandle &) override {
       osd->osd_lock.Lock();
       if (osd->is_stopping()) {
        osd->osd_lock.Unlock();
@@ -2146,9 +2145,6 @@ protected:
       osd->osd_lock.Unlock();
       delete c;
     }
-    void _process(Command *c, ThreadPool::TPHandle &tp) {
-      _process(c);
-    }
     void _clear() {
       while (!osd->command_queue.empty()) {
        Command *c = osd->command_queue.front();
@@ -2200,11 +2196,10 @@ protected:
        osd->recovery_queue.push_front(&pg->recovery_item);
       }
     }
-    void _process(PG *pg, ThreadPool::TPHandle &handle) {
+    void _process(PG *pg, ThreadPool::TPHandle &handle) override {
       osd->do_recovery(pg, handle);
       pg->put("RecoveryWQ");
     }
-    using ThreadPool::WorkQueue<PG>::_process;
     void _clear() {
       while (!osd->recovery_queue.empty()) {
        PG *pg = osd->recovery_queue.front();
@@ -2259,8 +2254,8 @@ protected:
       remove_queue.pop_front();
       return item;
     }
-    using ThreadPool::WorkQueueVal<pair<PGRef, DeletingStateRef> >::_process;
-    void _process(pair<PGRef, DeletingStateRef>, ThreadPool::TPHandle &);
+    void _process(pair<PGRef, DeletingStateRef>,
+                 ThreadPool::TPHandle &) override;
     void _clear() {
       remove_queue.clear();
     }
index 181d943c921a1a8d2af7aad7f84e32ab6a47c440..40b01ee4830935a3b511523121157d7d0638a9f5 100644 (file)
@@ -215,8 +215,7 @@ protected:
       perfcounter->inc(l_rgw_qlen, -1);
       return req;
     }
-    using ThreadPool::WorkQueue<RGWRequest>::_process;
-    void _process(RGWRequest *req) {
+    void _process(RGWRequest *req, ThreadPool::TPHandle &) override {
       perfcounter->inc(l_rgw_qactive);
       process->handle_request(req);
       process->req_throttle.put(1);
index 38e160db76f7ac5bd7721c9729333f411df332bd..b8cddd1175f63cda2bcd6bef160907710768ed79 100644 (file)
@@ -84,8 +84,7 @@ class DumbBackend : public Backend {
     bool _empty() {
       return item_queue.empty();
     }
-    using ThreadPool::WorkQueue<write_item>::_process;
-    void _process(write_item *item) {
+    void _process(write_item *item, ThreadPool::TPHandle &) override {
       return backend->_write(
        item->oid,
        item->offset,
index 6bc6be10a18118c8aa26f0a5a3f5579056f7cf58..6a50b6e085556148d9089cb95153d1d4f0386a44 100644 (file)
@@ -93,8 +93,7 @@ class PassAlong : public ThreadPool::WorkQueue<unsigned> {
     q.pop_front();
     return val;
   }
-  using ThreadPool::WorkQueue<unsigned>::_process;
-  void _process(unsigned *item) {
+  void _process(unsigned *item, ThreadPool::TPHandle &) override {
     next->queue(item);
   }
   void _clear() { q.clear(); }
index ac7bc0c9d520ee257ad94ed787e3b220eb738074..3eae9a513cce317ace69fd97653e3654703d9bce 100644 (file)
@@ -58,8 +58,7 @@ class ServerDispatcher : public Dispatcher {
       messages.pop_front();
       return m;
     }
-    using ThreadPool::WorkQueue<Message>::_process;
-    void _process(Message *m, ThreadPool::TPHandle &handle) {
+    void _process(Message *m, ThreadPool::TPHandle &handle) override {
       MOSDOp *osd_op = static_cast<MOSDOp*>(m);
       MOSDOpReply *reply = new MOSDOpReply(osd_op, 0, 0, 0, false);
       m->get_connection()->send_message(reply);