scheduler_class(scheduler_class)
{}
-seastar::future<bool> BackgroundRecovery::do_recovery()
-{
- if (pg->has_reset_since(epoch_started))
- return seastar::make_ready_future<bool>(false);
- return with_blocking_future(
- pg->get_recovery_handler()->start_recovery_ops(
- crimson::common::local_conf()->osd_recovery_max_single_start));
-}
-
void BackgroundRecovery::print(std::ostream &lhs) const
{
lhs << "BackgroundRecovery(" << pg->get_pgid() << ")";
f->close_section();
}
+PglogBasedRecovery::PglogBasedRecovery(
+ Ref<PG> pg,
+ ShardServices &ss,
+ const epoch_t epoch_started)
+ : BackgroundRecovery(
+ std::move(pg),
+ ss,
+ epoch_started,
+ crimson::osd::scheduler::scheduler_class_t::background_recovery)
+{}
+
+seastar::future<bool> PglogBasedRecovery::do_recovery()
+{
+ if (pg->has_reset_since(epoch_started))
+ return seastar::make_ready_future<bool>(false);
+ return with_blocking_future(
+ pg->get_recovery_handler()->start_recovery_ops(
+ crimson::common::local_conf()->osd_recovery_max_single_start));
+}
+
}
#pragma once
+#include <boost/statechart/event_base.hpp>
+
#include "crimson/net/Connection.h"
#include "crimson/osd/osd_operation.h"
#include "crimson/common/type_helpers.h"
scheduler_class
};
}
- virtual seastar::future<bool> do_recovery();
+ virtual seastar::future<bool> do_recovery() = 0;
};
class UrgentRecovery final : public BackgroundRecovery {
const eversion_t& need,
Ref<PG> pg,
ShardServices& ss,
- epoch_t epoch_started,
- crimson::osd::scheduler::scheduler_class_t scheduler_class)
- : BackgroundRecovery{pg, ss, epoch_started, scheduler_class},
+ epoch_t epoch_started)
+ : BackgroundRecovery{pg, ss, epoch_started, crimson::osd::scheduler::scheduler_class_t::immediate},
soid{soid}, need(need) {}
void print(std::ostream&) const final;
void dump_detail(Formatter* f) const final;
seastar::future<bool> do_recovery() override;
};
+class PglogBasedRecovery final : public BackgroundRecovery {
+ seastar::future<bool> do_recovery() override;
+
+public:
+ PglogBasedRecovery(
+ Ref<PG> pg,
+ ShardServices &ss,
+ epoch_t epoch_started);
+};
+
}
const hobject_t& soid = m->get_hobj();
if (pg.is_unreadable_object(soid, &ver)) {
auto [op, fut] = osd.get_shard_services().start_operation<UrgentRecovery>(
- soid, ver, pgref, osd.get_shard_services(), m->get_min_epoch(),
- crimson::osd::scheduler::scheduler_class_t::immediate);
+ soid, ver, pgref, osd.get_shard_services(), m->get_min_epoch());
return std::move(fut);
}
return seastar::now();
ceph_assert(0 == "Not implemented");
}
void on_recovery_reserved() final {
- recovery_handler->start_background_recovery(
- crimson::osd::scheduler::scheduler_class_t::background_recovery);
+ recovery_handler->start_pglogbased_recovery();
}
}
}
-void PGRecovery::start_background_recovery(
- crimson::osd::scheduler::scheduler_class_t klass)
+void PGRecovery::start_pglogbased_recovery()
{
- using BackgroundRecovery = crimson::osd::BackgroundRecovery;
- (void) pg->get_shard_services().start_operation<BackgroundRecovery>(
+ using PglogBasedRecovery = crimson::osd::PglogBasedRecovery;
+ (void) pg->get_shard_services().start_operation<PglogBasedRecovery>(
static_cast<crimson::osd::PG*>(pg),
pg->get_shard_services(),
- pg->get_osdmap_epoch(),
- klass);
+ pg->get_osdmap_epoch());
}
crimson::osd::blocking_future<bool>
public:
PGRecovery(PGRecoveryListener* pg) : pg(pg) {}
virtual ~PGRecovery() {}
- void start_background_recovery(
- crimson::osd::scheduler::scheduler_class_t klass);
+ void start_pglogbased_recovery();
crimson::osd::blocking_future<bool> start_recovery_ops(size_t max_to_start);
seastar::future<> stop() { return seastar::now(); }