const pg_stat_t &stats) {
return peering_state.update_complete_backfill_object_stats(hoid, stats);
}
+
+ PeeringFacade(PeeringState& peering_state)
+ : peering_state(peering_state) {
+ }
};
// PGFacade -- a facade (in the GoF-defined meaning) simplifying the huge
decltype(auto) get_projected_last_update() const {
return pg.projected_last_update;
}
+
+ PGFacade(PG& pg) : pg(pg) {}
};
} // namespace crimson::osd
std::unique_ptr<PGFacade> pg);
~BackfillState();
+ void process_event(
+ boost::intrusive_ptr<const sc::event_base> evt) {
+ backfill_machine.process_event(*std::move(evt));
+ }
+
private:
hobject_t last_backfill_started;
BackfillInterval backfill_info;
crimson::common::local_conf()->osd_recovery_max_single_start));
}
+seastar::future<bool> BackfillRecovery::do_recovery()
+{
+ if (pg->has_reset_since(epoch_started))
+ return seastar::make_ready_future<bool>(false);
+ // FIXME: blocking future, limits
+ pg->get_recovery_handler()->dispatch_backfill_event(std::move(evt));
+ return seastar::make_ready_future<bool>(false);
}
+
+} // namespace crimson::osd
epoch_t epoch_started);
};
+class BackfillRecovery final : public BackgroundRecovery {
+ boost::intrusive_ptr<const boost::statechart::event_base> evt;
+ seastar::future<bool> do_recovery() override;
+
+public:
+ template <class EventT>
+ BackfillRecovery(
+ Ref<PG> pg,
+ ShardServices &ss,
+ epoch_t epoch_started,
+ const EventT& evt);
+};
+
+template <class EventT>
+BackfillRecovery::BackfillRecovery(
+ Ref<PG> pg,
+ ShardServices &ss,
+ const epoch_t epoch_started,
+ const EventT& evt)
+ : BackgroundRecovery(
+ std::move(pg),
+ ss,
+ epoch_started,
+ crimson::osd::scheduler::scheduler_class_t::background_best_effort),
+ evt(evt.intrusive_from_this())
+{}
+
+
}
}
void on_backfill_reserved() final {
- ceph_assert(0 == "Not implemented");
+ recovery_handler->on_backfill_reserved();
}
void on_backfill_canceled() final {
ceph_assert(0 == "Not implemented");
#include <fmt/ostream.h>
#include "crimson/common/type_helpers.h"
+#include "crimson/osd/backfill_facades.h"
#include "crimson/osd/osd_operations/background_recovery.h"
#include "crimson/osd/osd_operations/peering_event.h"
#include "crimson/osd/pg.h"
{
ceph_abort_msg("Not implemented");
}
+
+void PGRecovery::dispatch_backfill_event(
+ boost::intrusive_ptr<const boost::statechart::event_base> evt)
+{
+ logger().debug("{}", __func__);
+ backfill_state->process_event(evt);
+}
+
+void PGRecovery::on_backfill_reserved()
+{
+ logger().debug("{}", __func__);
+ // PIMP and depedency injection for the sake unittestability.
+ // I'm not afraid about the performance here.
+ using BackfillState = crimson::osd::BackfillState;
+ backfill_state = std::make_unique<BackfillState>(
+ *this,
+ std::make_unique<BackfillState::PeeringFacade>(pg->get_peering_state()),
+ std::make_unique<BackfillState::PGFacade>(
+ *static_cast<crimson::osd::PG*>(pg)));
+ using BackfillRecovery = crimson::osd::BackfillRecovery;
+ pg->get_shard_services().start_operation<BackfillRecovery>(
+ static_cast<crimson::osd::PG*>(pg),
+ pg->get_shard_services(),
+ pg->get_osdmap_epoch(),
+ BackfillState::Triggered{});
+}
void start_pglogbased_recovery();
crimson::osd::blocking_future<bool> start_recovery_ops(size_t max_to_start);
+ void on_backfill_reserved();
+ void dispatch_backfill_event(
+ boost::intrusive_ptr<const boost::statechart::event_base> evt);
+
seastar::future<> stop() { return seastar::now(); }
private:
PGRecoveryListener* pg;
seastar::future<> handle_scan(MOSDPGScan& m);
// backfill begin
+ std::unique_ptr<crimson::osd::BackfillState> backfill_state;
+
void request_replica_scan(
const pg_shard_t& target,
const hobject_t& begin,