#include "messages/MOSDPGRecoveryDelete.h"
#include "messages/MOSDPGRecoveryDeleteReply.h"
#include "messages/MOSDRepOpReply.h"
+#include "messages/MOSDScrub2.h"
#include "messages/MPGStats.h"
#include "os/Transaction.h"
return handle_rep_op(conn, boost::static_pointer_cast<MOSDRepOp>(m));
case MSG_OSD_REPOPREPLY:
return handle_rep_op_reply(conn, boost::static_pointer_cast<MOSDRepOpReply>(m));
+ case MSG_OSD_SCRUB2:
+ return handle_scrub(conn, boost::static_pointer_cast<MOSDScrub2>(m));
default:
logger().info("ms_dispatch unhandled message {}", *m);
return seastar::now();
return seastar::now();
}
+seastar::future<> OSD::handle_scrub(crimson::net::Connection* conn,
+ Ref<MOSDScrub2> m)
+{
+ if (m->fsid != superblock.cluster_fsid) {
+ logger().warn("fsid mismatched");
+ return seastar::now();
+ }
+ return seastar::parallel_for_each(std::move(m->scrub_pgs),
+ [m, conn=conn->get_shared(), this](spg_t pgid) {
+ pg_shard_t from_shard{static_cast<int>(m->get_source().num()),
+ pgid.shard};
+ PeeringState::RequestScrub scrub_request{m->deep, m->repair};
+ return shard_services.start_operation<RemotePeeringEvent>(
+ *this,
+ conn,
+ shard_services,
+ from_shard,
+ pgid,
+ PGPeeringEvent{m->epoch, m->epoch, scrub_request}).second;
+ });
+}
+
seastar::future<> OSD::handle_mark_me_down(crimson::net::Connection* conn,
Ref<MOSDMarkMeDown> m)
{
class MOSDMap;
class MOSDRepOpReply;
class MOSDRepOp;
+class MOSDScrub2;
class OSDMap;
class OSDMeta;
class Heartbeat;
Ref<MOSDPeeringOp> m);
seastar::future<> handle_recovery_subreq(crimson::net::Connection* conn,
Ref<MOSDFastDispatchOp> m);
+ seastar::future<> handle_scrub(crimson::net::Connection* conn,
+ Ref<MOSDScrub2> m);
seastar::future<> handle_mark_me_down(crimson::net::Connection* conn,
Ref<MOSDMarkMeDown> m);
shard_services.dec_pg_num();
}
+void PG::scrub_requested(bool deep, bool repair, bool need_auto)
+{
+ // TODO: should update the stats upon finishing the scrub
+ peering_state.update_stats([deep, this](auto& history, auto& stats) {
+ const utime_t now = ceph_clock_now();
+ history.last_scrub = peering_state.get_info().last_update;
+ history.last_scrub_stamp = now;
+ history.last_clean_scrub_stamp = now;
+ if (deep) {
+ history.last_deep_scrub = history.last_scrub;
+ history.last_deep_scrub_stamp = now;
+ }
+ // yes, please publish the stats
+ return true;
+ });
+}
+
void PG::log_state_enter(const char *state) {
logger().info("Entering state: {}", state);
}
// Not needed yet -- mainly for scrub scheduling
}
- void scrub_requested(bool deep, bool repair, bool need_auto = false) final {
- ceph_assert(0 == "Not implemented");
- }
+ void scrub_requested(bool deep, bool repair, bool need_auto = false) final;
uint64_t get_snap_trimq_size() const final {
return 0;