]> git.apps.os.sepia.ceph.com Git - ceph-ci.git/commitdiff
osd/: move append_log into PeeringState
authorsjust@redhat.com <sjust@redhat.com>
Thu, 4 Apr 2019 22:33:53 +0000 (15:33 -0700)
committersjust@redhat.com <sjust@redhat.com>
Wed, 1 May 2019 18:22:26 +0000 (11:22 -0700)
Signed-off-by: sjust@redhat.com <sjust@redhat.com>
src/osd/PG.cc
src/osd/PG.h
src/osd/PeeringState.cc
src/osd/PeeringState.h
src/osd/PrimaryLogPG.h

index 6db4b3d5aec97de233754a153b03ed1698e29b57..e63af28e2d931486226472e2282421afd40133d1 100644 (file)
@@ -1143,105 +1143,6 @@ int PG::peek_map_epoch(ObjectStore *store,
 #pragma GCC diagnostic pop
 #pragma GCC diagnostic warning "-Wpragmas"
 
-void PG::add_log_entry(const pg_log_entry_t& e, bool applied)
-{
-  // raise last_complete only if we were previously up to date
-  if (info.last_complete == info.last_update)
-    info.last_complete = e.version;
-  
-  // raise last_update.
-  ceph_assert(e.version > info.last_update);
-  info.last_update = e.version;
-
-  // raise user_version, if it increased (it may have not get bumped
-  // by all logged updates)
-  if (e.user_version > info.last_user_version)
-    info.last_user_version = e.user_version;
-
-  // log mutation
-  pg_log.add(e, applied);
-  dout(10) << "add_log_entry " << e << dendl;
-}
-
-
-void PG::append_log(
-  const vector<pg_log_entry_t>& logv,
-  eversion_t trim_to,
-  eversion_t roll_forward_to,
-  ObjectStore::Transaction &t,
-  bool transaction_applied,
-  bool async)
-{
-  if (transaction_applied)
-    update_snap_map(logv, t);
-
-  /* The primary has sent an info updating the history, but it may not
-   * have arrived yet.  We want to make sure that we cannot remember this
-   * write without remembering that it happened in an interval which went
-   * active in epoch history.last_epoch_started.
-   */
-  if (info.last_epoch_started != info.history.last_epoch_started) {
-    info.history.last_epoch_started = info.last_epoch_started;
-  }
-  if (info.last_interval_started != info.history.last_interval_started) {
-    info.history.last_interval_started = info.last_interval_started;
-  }
-  dout(10) << "append_log " << pg_log.get_log() << " " << logv << dendl;
-
-  PGLogEntryHandler handler{this, &t};
-  if (!transaction_applied) {
-     /* We must be a backfill or async recovery peer, so it's ok if we apply
-      * out-of-turn since we won't be considered when
-      * determining a min possible last_update.
-      *
-      * We skip_rollforward() here, which advances the crt, without
-      * doing an actual rollforward. This avoids cleaning up entries
-      * from the backend and we do not end up in a situation, where the
-      * object is deleted before we can _merge_object_divergent_entries().
-      */
-    pg_log.skip_rollforward();
-  }
-
-  for (vector<pg_log_entry_t>::const_iterator p = logv.begin();
-       p != logv.end();
-       ++p) {
-    add_log_entry(*p, transaction_applied);
-
-    /* We don't want to leave the rollforward artifacts around
-     * here past last_backfill.  It's ok for the same reason as
-     * above */
-    if (transaction_applied &&
-       p->soid > info.last_backfill) {
-      pg_log.roll_forward(&handler);
-    }
-  }
-  auto last = logv.rbegin();
-  if (is_primary() && last != logv.rend()) {
-    projected_log.skip_can_rollback_to_to_head();
-    projected_log.trim(cct, last->version, nullptr, nullptr, nullptr);
-  }
-
-  if (transaction_applied && roll_forward_to > pg_log.get_can_rollback_to()) {
-    pg_log.roll_forward_to(
-      roll_forward_to,
-      &handler);
-    last_rollback_info_trimmed_to_applied = roll_forward_to;
-  }
-
-  dout(10) << __func__ << " approx pg log length =  "
-           << pg_log.get_log().approx_size() << dendl;
-  dout(10) << __func__ << " transaction_applied = "
-           << transaction_applied << dendl;
-  if (!transaction_applied || async)
-    dout(10) << __func__ << " " << pg_whoami
-             << " is async_recovery or backfill target" << dendl;
-  pg_log.trim(trim_to, info, transaction_applied, async);
-
-  // update the local pg, pg log
-  dirty_info = true;
-  write_if_dirty(t);
-}
-
 bool PG::check_log_for_corruption(ObjectStore *store)
 {
   /// TODO: this method needs to work with the omap log
index 461078c874efa5afb4093b2127c2f10ead54d948..e3fbf1e65268d68b309e581a788aaf13a27347af 100644 (file)
@@ -1481,14 +1481,6 @@ protected:
     return at_version;
   }
 
-  void add_log_entry(const pg_log_entry_t& e, bool applied);
-  void append_log(
-    const vector<pg_log_entry_t>& logv,
-    eversion_t trim_to,
-    eversion_t roll_forward_to,
-    ObjectStore::Transaction &t,
-    bool transaction_applied = true,
-    bool async = false);
   bool check_log_for_corruption(ObjectStore *store);
 
   std::string get_corrupt_pg_log_name() const;
index 8b09dbe7e9461138cf707e63a3347317658a4a84..00ccd4b5dd3b5f617330ab66d2fe6a3332c0fe19 100644 (file)
@@ -3498,6 +3498,98 @@ void PeeringState::merge_new_log_entries(
       peer_info);
   }
 }
+
+void PeeringState::add_log_entry(const pg_log_entry_t& e, bool applied)
+{
+  // raise last_complete only if we were previously up to date
+  if (info.last_complete == info.last_update)
+    info.last_complete = e.version;
+
+  // raise last_update.
+  ceph_assert(e.version > info.last_update);
+  info.last_update = e.version;
+
+  // raise user_version, if it increased (it may have not get bumped
+  // by all logged updates)
+  if (e.user_version > info.last_user_version)
+    info.last_user_version = e.user_version;
+
+  // log mutation
+  pg_log.add(e, applied);
+  psdout(10) << "add_log_entry " << e << dendl;
+}
+
+
+void PeeringState::append_log(
+  const vector<pg_log_entry_t>& logv,
+  eversion_t trim_to,
+  eversion_t roll_forward_to,
+  ObjectStore::Transaction &t,
+  bool transaction_applied,
+  bool async)
+{
+  /* The primary has sent an info updating the history, but it may not
+   * have arrived yet.  We want to make sure that we cannot remember this
+   * write without remembering that it happened in an interval which went
+   * active in epoch history.last_epoch_started.
+   */
+  if (info.last_epoch_started != info.history.last_epoch_started) {
+    info.history.last_epoch_started = info.last_epoch_started;
+  }
+  if (info.last_interval_started != info.history.last_interval_started) {
+    info.history.last_interval_started = info.last_interval_started;
+  }
+  psdout(10) << "append_log " << pg_log.get_log() << " " << logv << dendl;
+
+  PGLog::LogEntryHandlerRef handler{pl->get_log_handler(&t)};
+  if (!transaction_applied) {
+     /* We must be a backfill or async recovery peer, so it's ok if we apply
+      * out-of-turn since we won't be considered when
+      * determining a min possible last_update.
+      *
+      * We skip_rollforward() here, which advances the crt, without
+      * doing an actual rollforward. This avoids cleaning up entries
+      * from the backend and we do not end up in a situation, where the
+      * object is deleted before we can _merge_object_divergent_entries().
+      */
+    pg_log.skip_rollforward();
+  }
+
+  for (vector<pg_log_entry_t>::const_iterator p = logv.begin();
+       p != logv.end();
+       ++p) {
+    add_log_entry(*p, transaction_applied);
+
+    /* We don't want to leave the rollforward artifacts around
+     * here past last_backfill.  It's ok for the same reason as
+     * above */
+    if (transaction_applied &&
+       p->soid > info.last_backfill) {
+      pg_log.roll_forward(handler.get());
+    }
+  }
+  if (transaction_applied && roll_forward_to > pg_log.get_can_rollback_to()) {
+    pg_log.roll_forward_to(
+      roll_forward_to,
+      handler.get());
+    last_rollback_info_trimmed_to_applied = roll_forward_to;
+  }
+
+  psdout(10) << __func__ << " approx pg log length =  "
+            << pg_log.get_log().approx_size() << dendl;
+  psdout(10) << __func__ << " transaction_applied = "
+            << transaction_applied << dendl;
+  if (!transaction_applied || async)
+    psdout(10) << __func__ << " " << pg_whoami
+              << " is async_recovery or backfill target" << dendl;
+  pg_log.trim(trim_to, info, transaction_applied, async);
+
+  // update the local pg, pg log
+  dirty_info = true;
+  write_if_dirty(t);
+}
+
+
 /*------------ Peering State Machine----------------*/
 #undef dout_prefix
 #define dout_prefix (context< PeeringMachine >().dpp->gen_prefix(*_dout) \
index 93bb82f53baa9902fd17b4db8866fde5b70e81d5..3857f6d58278ebaa2ba72efdd4cd39ecab8af2eb 100644 (file)
@@ -1428,6 +1428,15 @@ public:
     ObjectStore::Transaction &t,
     boost::optional<eversion_t> trim_to,
     boost::optional<eversion_t> roll_forward_to);
+
+  void add_log_entry(const pg_log_entry_t& e, bool applied);
+  void append_log(
+    const vector<pg_log_entry_t>& logv,
+    eversion_t trim_to,
+    eversion_t roll_forward_to,
+    ObjectStore::Transaction &t,
+    bool transaction_applied,
+    bool async);
 public:
   PeeringState(
     CephContext *cct,
index a55d224d058a93d24d8b2a93ef9ea6d27c3ae868..a596658661b1e11f98e9328f088ee9a665af97a6 100644 (file)
@@ -449,7 +449,16 @@ public:
     if (hset_history) {
       info.hit_set = *hset_history;
     }
-    append_log(logv, trim_to, roll_forward_to, t, transaction_applied, async);
+    if (transaction_applied) {
+      update_snap_map(logv, t);
+    }
+    auto last = logv.rbegin();
+    if (is_primary() && last != logv.rend()) {
+      projected_log.skip_can_rollback_to_to_head();
+      projected_log.trim(cct, last->version, nullptr, nullptr, nullptr);
+    }
+    recovery_state.append_log(
+      logv, trim_to, roll_forward_to, t, transaction_applied, async);
   }
 
   void op_applied(const eversion_t &applied_version) override;