]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
crimson/osd: suppress warnings about unused future<> return values 30474/head
authorSamuel Just <sjust@redhat.com>
Wed, 18 Sep 2019 19:17:56 +0000 (12:17 -0700)
committerKefu Chai <kchai@redhat.com>
Thu, 19 Sep 2019 10:23:20 +0000 (18:23 +0800)
These are dropped on purpose, suppress warnings.

Signed-off-by: Samuel Just <sjust@redhat.com>
src/crimson/osd/heartbeat.cc
src/crimson/osd/osd.cc
src/crimson/osd/pg.cc
src/crimson/osd/pg.h

index 374d2e3e73f44b33c6f91c36edf9a6ef9c6e69b4..b778c08dd26b760abcfa8031fac5168a26c2e573 100644 (file)
@@ -29,7 +29,8 @@ Heartbeat::Heartbeat(const ceph::osd::ShardServices& service,
     monc{monc},
     front_msgr{front_msgr},
     back_msgr{back_msgr},
-    timer{[this] {send_heartbeats();}}
+    // do this in background
+    timer{[this] { (void)send_heartbeats(); }}
 {}
 
 seastar::future<> Heartbeat::start(entity_addrvec_t front_addrs,
index 7d2a5ad49757426d1e17adde3f2ec95dba3fc95d..ddd8742d1a74d1f88ea168df6afe7ae824c1f553 100644 (file)
@@ -60,7 +60,8 @@ OSD::OSD(int id, uint32_t nonce,
          ceph::net::Messenger& hb_back_msgr)
   : whoami{id},
     nonce{nonce},
-    beacon_timer{[this] { send_beacon(); }},
+    // do this in background
+    beacon_timer{[this] { (void)send_beacon(); }},
     cluster_msgr{cluster_msgr},
     public_msgr{public_msgr},
     monc{new ceph::mon::Client{public_msgr, *this}},
@@ -70,7 +71,8 @@ OSD::OSD(int id, uint32_t nonce,
       local_conf().get_val<std::string>("osd_data"))},
     shard_services{*this, cluster_msgr, public_msgr, *monc, *mgrc, *store},
     heartbeat{new Heartbeat{shard_services, *monc, hb_front_msgr, hb_back_msgr}},
-    heartbeat_timer{[this] { update_heartbeat_peers(); }},
+    // do this in background
+    heartbeat_timer{[this] { (void)update_heartbeat_peers(); }},
     osdmap_gate("OSD::osdmap_gate", std::make_optional(std::ref(shard_services)))
 {
   osdmaps[0] = boost::make_local_shared<OSDMap>();
@@ -1034,7 +1036,7 @@ OSD::get_or_create_pg(
   auto [fut, creating] = pg_map.get_pg(pgid, bool(info));
   if (!creating && info) {
     pg_map.set_creating(pgid);
-    handle_pg_create_info(std::move(info));
+    (void)handle_pg_create_info(std::move(info));
   }
   return std::move(fut);
 }
index d9814108b797bd851b849a5a33a0c04a3d72b83a..42a7688c3778a6646ddbb2271f2f530f3646dfaf 100644 (file)
@@ -113,7 +113,7 @@ PG::PG(
 PG::~PG() {}
 
 bool PG::try_flush_or_schedule_async() {
-  shard_services.get_store().do_transaction(
+  (void)shard_services.get_store().do_transaction(
     coll_ref,
     ObjectStore::Transaction()).then(
       [this, epoch=get_osdmap_epoch()]() {
index 689d78dd251f602413565bc55ba907667e3c850f..25d4a3b909808477862079c4d4fe474d0dc84240 100644 (file)
@@ -158,11 +158,11 @@ public:
   void send_cluster_message(
     int osd, Message *m,
     epoch_t epoch, bool share_map_update=false) final {
-    shard_services.send_to_osd(osd, m, epoch);
+    (void)shard_services.send_to_osd(osd, m, epoch);
   }
 
   void send_pg_created(pg_t pgid) final {
-    shard_services.send_pg_created(pgid);
+    (void)shard_services.send_pg_created(pgid);
   }
 
   bool try_flush_or_schedule_async() final;