]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
osdc: build without "using namespace std"
authorKefu Chai <kchai@redhat.com>
Wed, 11 Aug 2021 03:55:49 +0000 (11:55 +0800)
committerKefu Chai <kchai@redhat.com>
Fri, 13 Aug 2021 04:23:37 +0000 (12:23 +0800)
* add "std::" prefix in headers
* add "using" declarations in .cc files.

so we don't rely on "using namespace std" in one or more included
headers.

Signed-off-by: Kefu Chai <kchai@redhat.com>
src/osdc/Journaler.cc
src/osdc/Journaler.h
src/osdc/Objecter.cc
src/osdc/Objecter.h

index 6e1cbd930ac2cf4500cc2f2dab8ce24e7095ba69..8084a661d7d34be5cc04986dea37cc83a27d2fb6 100644 (file)
@@ -26,6 +26,7 @@
 #define dout_prefix *_dout << objecter->messenger->get_myname() \
   << ".journaler." << name << (readonly ? "(ro) ":"(rw) ")
 
+using namespace std;
 using std::chrono::seconds;
 
 
index 3e8f0f6650b8f01f79e275891f47a4b166b5f402..2dcc1197e61119a841228edd6aaca5fad157b5eb 100644 (file)
@@ -135,7 +135,7 @@ public:
     uint64_t expire_pos;
     uint64_t unused_field;
     uint64_t write_pos;
-    string magic;
+    std::string magic;
     file_layout_t layout; //< The mapping from byte stream offsets
                             //  to RADOS objects
     stream_format_t stream_format; //< The encoding of LogEvents
@@ -186,7 +186,7 @@ public:
       f->close_section(); // journal_header
     }
 
-    static void generate_test_instances(list<Header*> &ls)
+    static void generate_test_instances(std::list<Header*> &ls)
     {
       ls.push_back(new Header());
 
@@ -269,7 +269,7 @@ private:
 
   void _reread_head(Context *onfinish);
   void _set_layout(file_layout_t const *l);
-  list<Context*> waitfor_recover;
+  std::list<Context*> waitfor_recover;
   void _read_head(Context *on_finish, bufferlist *bl);
   void _finish_read_head(int r, bufferlist& bl);
   void _finish_reread_head(int r, bufferlist& bl, Context *finish);
@@ -311,7 +311,7 @@ private:
 
   uint64_t waiting_for_zero_pos;
   interval_set<uint64_t> pending_zero;  // non-contig bits we've zeroed
-  list<Context*> waitfor_prezero;
+  std::list<Context*> waitfor_prezero;
 
   std::map<uint64_t, uint64_t> pending_safe; // flush_pos -> safe_pos
   // when safe through given offset
@@ -330,7 +330,7 @@ private:
   // read buffer.  unused_field + read_buf.length() == prefetch_pos.
   bufferlist read_buf;
 
-  map<uint64_t,bufferlist> prefetch_buf;
+  std::map<uint64_t,bufferlist> prefetch_buf;
 
   uint64_t fetch_len;     // how much to read at a time
   uint64_t temp_fetch_len;
index 6bfebfcc95f883400eb0e50474ab690409c65f60..9565f4326cac930e8cbeaf757c96550c451f02a8 100644 (file)
@@ -75,6 +75,9 @@ using ceph::encode;
 using ceph::Formatter;
 
 using std::defer_lock;
+using std::scoped_lock;
+using std::shared_lock;
+using std::unique_lock;
 
 using ceph::real_time;
 using ceph::real_clock;
index d349ef20670ae81dde483eae9d4c74167598990a..cd7bd2ab59d5b626bbef539f88bda899176ee5e8 100644 (file)
@@ -69,6 +69,7 @@ class MGetPoolStatsReply;
 class MStatfsReply;
 class MCommandReply;
 class MWatchNotify;
+struct ObjectOperation;
 template<typename T>
 struct EnumerationContext;
 template<typename t>
@@ -401,22 +402,22 @@ struct ObjectOperation {
     set_handler(CB_ObjectOperation_stat(psize, nullptr, nullptr, pts, prval, nullptr));
     out_rval.back() = prval;
   }
-  void stat(uint64_t *psize, ceph::real_time *pmtime, nullptr_t) {
+  void stat(uint64_t *psize, ceph::real_time *pmtime, std::nullptr_t) {
     add_op(CEPH_OSD_OP_STAT);
     set_handler(CB_ObjectOperation_stat(psize, pmtime, nullptr, nullptr, nullptr,
                                        nullptr));
   }
-  void stat(uint64_t *psize, time_t *ptime, nullptr_t) {
+  void stat(uint64_t *psize, time_t *ptime, std::nullptr_t) {
     add_op(CEPH_OSD_OP_STAT);
     set_handler(CB_ObjectOperation_stat(psize, nullptr, ptime, nullptr, nullptr,
                                        nullptr));
   }
-  void stat(uint64_t *psize, struct timespec *pts, nullptr_t) {
+  void stat(uint64_t *psize, struct timespec *pts, std::nullptr_t) {
     add_op(CEPH_OSD_OP_STAT);
     set_handler(CB_ObjectOperation_stat(psize, nullptr, nullptr, pts, nullptr,
                                        nullptr));
   }
-  void stat(uint64_t *psize, nullptr_t, nullptr_t) {
+  void stat(uint64_t *psize, std::nullptr_t, std::nullptr_t) {
     add_op(CEPH_OSD_OP_STAT);
     set_handler(CB_ObjectOperation_stat(psize, nullptr, nullptr, nullptr,
                                        nullptr, nullptr));
@@ -868,7 +869,7 @@ struct ObjectOperation {
     ceph::buffer::list bl;
     add_xattr(CEPH_OSD_OP_RMXATTR, name, bl);
   }
-  void setxattrs(map<string, ceph::buffer::list>& attrs) {
+  void setxattrs(std::map<std::string, ceph::buffer::list>& attrs) {
     using ceph::encode;
     ceph::buffer::list bl;
     encode(attrs, bl);
@@ -996,7 +997,7 @@ struct ObjectOperation {
     out_ec.back() = ec;
   }
 
-  void omap_cmp(const std::map<std::string, pair<ceph::buffer::list,int> > &assertions,
+  void omap_cmp(const std::map<std::string, std::pair<ceph::buffer::list,int> > &assertions,
                int *prval) {
     using ceph::encode;
     OSDOp &op = add_op(CEPH_OSD_OP_OMAP_CMP);
@@ -1012,7 +1013,7 @@ struct ObjectOperation {
   }
 
   void omap_cmp(const boost::container::flat_map<
-                 std::string, pair<ceph::buffer::list, int>>& assertions,
+               std::string, std::pair<ceph::buffer::list, int>>& assertions,
                boost::system::error_code *ec) {
     OSDOp &op = add_op(CEPH_OSD_OP_OMAP_CMP);
     ceph::buffer::list bl;
@@ -1293,13 +1294,13 @@ struct ObjectOperation {
     out_ec.back() = ec;
   }
 
-  void omap_set(const map<string, ceph::buffer::list> &map) {
+  void omap_set(const std::map<std::string, ceph::buffer::list> &map) {
     ceph::buffer::list bl;
     encode(map, bl);
     add_data(CEPH_OSD_OP_OMAPSETVALS, 0, bl.length(), bl);
   }
 
-  void omap_set(const boost::container::flat_map<string, ceph::buffer::list>& map) {
+  void omap_set(const boost::container::flat_map<std::string, ceph::buffer::list>& map) {
     ceph::buffer::list bl;
     encode(map, bl);
     add_data(CEPH_OSD_OP_OMAPSETVALS, 0, bl.length(), bl);
@@ -1405,7 +1406,7 @@ struct ObjectOperation {
       out_rval.back() = prval;
     }
   }
-  void list_watchers(vector<neorados::ObjWatcher>* out,
+  void list_watchers(std::vector<neorados::ObjWatcher>* out,
                     boost::system::error_code* ec) {
     add_op(CEPH_OSD_OP_LIST_WATCHERS);
     set_handler(CB_ObjectOperation_decodewatchersneo(out, nullptr, ec));
@@ -2224,7 +2225,7 @@ public:
 
     CommandOp(
       int target_osd,
-      std::vector<string>&& cmd,
+      std::vector<std::string>&& cmd,
       ceph::buffer::list&& inbl,
       decltype(onfinish)&& onfinish)
       : cmd(std::move(cmd)),
@@ -2234,7 +2235,7 @@ public:
 
     CommandOp(
       pg_t pgid,
-      std::vector<string>&& cmd,
+      std::vector<std::string>&& cmd,
       ceph::buffer::list&& inbl,
       decltype(onfinish)&& onfinish)
       : cmd(std::move(cmd)),
@@ -2305,7 +2306,7 @@ public:
       watch_pending_async.push_back(ceph::coarse_mono_clock::now());
     }
     void finished_async() {
-      unique_lock l(watch_lock);
+      std::unique_lock l(watch_lock);
       ceph_assert(!watch_pending_async.empty());
       watch_pending_async.pop_front();
     }
@@ -2598,7 +2599,7 @@ private:
 
   template<typename Callback, typename...Args>
   decltype(auto) with_osdmap(Callback&& cb, Args&&... args) {
-    shared_lock l(rwlock);
+    std::shared_lock l(rwlock);
     return std::forward<Callback>(cb)(*osdmap, std::forward<Args>(args)...);
   }
 
@@ -2661,7 +2662,7 @@ private:
   template<typename CompletionToken>
   auto wait_for_osd_map(CompletionToken&& token) {
     boost::asio::async_completion<CompletionToken, void()> init(token);
-    unique_lock l(rwlock);
+    std::unique_lock l(rwlock);
     if (osdmap->get_epoch()) {
       l.unlock();
       boost::asio::post(std::move(init.completion_handler));