From 18c83bb97ecb5a001921c0d55e6f4efd4b255bd4 Mon Sep 17 00:00:00 2001 From: xie xingguo Date: Mon, 20 Mar 2017 10:17:37 +0800 Subject: [PATCH] osd/PG: kill PG_STATE_REPLAY This replay state of PG is not functionable and hence should not be made visible to user. Signed-off-by: xie xingguo --- doc/rados/operations/pg-states.rst | 3 --- src/mon/MonCommands.h | 8 ++++---- src/osd/PG.cc | 1 - src/osd/osd_types.cc | 4 ---- src/osd/osd_types.h | 2 +- 5 files changed, 5 insertions(+), 13 deletions(-) diff --git a/doc/rados/operations/pg-states.rst b/doc/rados/operations/pg-states.rst index 94088546943..03a92426de5 100644 --- a/doc/rados/operations/pg-states.rst +++ b/doc/rados/operations/pg-states.rst @@ -19,9 +19,6 @@ map is ``active + clean``. *Down* A replica with necessary data is down, so the placement group is offline. -*Replay* - The placement group is waiting for clients to replay operations after an OSD crashed. - *Scrubbing* Ceph is checking the placement group for inconsistencies. diff --git a/src/mon/MonCommands.h b/src/mon/MonCommands.h index cefb2102cfe..7589054bfa9 100644 --- a/src/mon/MonCommands.h +++ b/src/mon/MonCommands.h @@ -139,21 +139,21 @@ COMMAND("pg dump_stuck " \ "pg", "r", "cli,rest") COMMAND("pg ls-by-pool " \ "name=poolstr,type=CephString " \ - "name=states,type=CephChoices,strings=active|clean|down|replay|scrubbing|degraded|inconsistent|peering|repair|recovering|backfill_wait|incomplete|stale|remapped|deep_scrub|backfill|backfill_toofull|recovery_wait|undersized|activating|peered,n=N,req=false ", \ + "name=states,type=CephChoices,strings=active|clean|down|scrubbing|degraded|inconsistent|peering|repair|recovering|backfill_wait|incomplete|stale|remapped|deep_scrub|backfill|backfill_toofull|recovery_wait|undersized|activating|peered,n=N,req=false ", \ "list pg with pool = [poolname]", "pg", "r", "cli,rest") COMMAND("pg ls-by-primary " \ "name=osd,type=CephOsdName " \ "name=pool,type=CephInt,req=false " \ - "name=states,type=CephChoices,strings=active|clean|down|replay|scrubbing|degraded|inconsistent|peering|repair|recovering|backfill_wait|incomplete|stale|remapped|deep_scrub|backfill|backfill_toofull|recovery_wait|undersized|activating|peered,n=N,req=false ", \ + "name=states,type=CephChoices,strings=active|clean|down|scrubbing|degraded|inconsistent|peering|repair|recovering|backfill_wait|incomplete|stale|remapped|deep_scrub|backfill|backfill_toofull|recovery_wait|undersized|activating|peered,n=N,req=false ", \ "list pg with primary = [osd]", "pg", "r", "cli,rest") COMMAND("pg ls-by-osd " \ "name=osd,type=CephOsdName " \ "name=pool,type=CephInt,req=false " \ - "name=states,type=CephChoices,strings=active|clean|down|replay|scrubbing|degraded|inconsistent|peering|repair|recovering|backfill_wait|incomplete|stale|remapped|deep_scrub|backfill|backfill_toofull|recovery_wait|undersized|activating|peered,n=N,req=false ", \ + "name=states,type=CephChoices,strings=active|clean|down|scrubbing|degraded|inconsistent|peering|repair|recovering|backfill_wait|incomplete|stale|remapped|deep_scrub|backfill|backfill_toofull|recovery_wait|undersized|activating|peered,n=N,req=false ", \ "list pg on osd [osd]", "pg", "r", "cli,rest") COMMAND("pg ls " \ "name=pool,type=CephInt,req=false " \ - "name=states,type=CephChoices,strings=active|clean|down|replay|scrubbing|degraded|inconsistent|peering|repair|recovering|backfill_wait|incomplete|stale|remapped|deep_scrub|backfill|backfill_toofull|recovery_wait|undersized|activating|peered,n=N,req=false ", \ + "name=states,type=CephChoices,strings=active|clean|down|scrubbing|degraded|inconsistent|peering|repair|recovering|backfill_wait|incomplete|stale|remapped|deep_scrub|backfill|backfill_toofull|recovery_wait|undersized|activating|peered,n=N,req=false ", \ "list pg with specific pool, osd, state", "pg", "r", "cli,rest") COMMAND("pg map name=pgid,type=CephPgid", "show mapping of pg to osds", \ "pg", "r", "cli,rest") diff --git a/src/osd/PG.cc b/src/osd/PG.cc index 44ef72f0210..bc03bb5c133 100644 --- a/src/osd/PG.cc +++ b/src/osd/PG.cc @@ -7071,7 +7071,6 @@ void PG::RecoveryState::Active::exit() pg->state_clear(PG_STATE_BACKFILL_TOOFULL); pg->state_clear(PG_STATE_BACKFILL_WAIT); pg->state_clear(PG_STATE_RECOVERY_WAIT); - pg->state_clear(PG_STATE_REPLAY); utime_t dur = ceph_clock_now() - enter_time; pg->osd->recoverystate_perf->tinc(rs_active_latency, dur); pg->agent_stop(); diff --git a/src/osd/osd_types.cc b/src/osd/osd_types.cc index c7671356a02..7eedcd3a2e4 100644 --- a/src/osd/osd_types.cc +++ b/src/osd/osd_types.cc @@ -793,8 +793,6 @@ std::string pg_state_string(int state) oss << "recovering+"; if (state & PG_STATE_DOWN) oss << "down+"; - if (state & PG_STATE_REPLAY) - oss << "replay+"; if (state & PG_STATE_UNDERSIZED) oss << "undersized+"; if (state & PG_STATE_DEGRADED) @@ -843,8 +841,6 @@ int pg_string_state(const std::string& state) type = PG_STATE_CLEAN; else if (state == "down") type = PG_STATE_DOWN; - else if (state == "replay") - type = PG_STATE_REPLAY; else if (state == "scrubbing") type = PG_STATE_SCRUBBING; else if (state == "degraded") diff --git a/src/osd/osd_types.h b/src/osd/osd_types.h index 7fe56fe707f..7e759884d18 100644 --- a/src/osd/osd_types.h +++ b/src/osd/osd_types.h @@ -954,7 +954,7 @@ inline ostream& operator<<(ostream& out, const osd_stat_t& s) { #define PG_STATE_ACTIVE (1<<1) // i am active. (primary: replicas too) #define PG_STATE_CLEAN (1<<2) // peers are complete, clean of stray replicas. #define PG_STATE_DOWN (1<<4) // a needed replica is down, PG offline -#define PG_STATE_REPLAY (1<<5) // crashed, waiting for replay +//#define PG_STATE_REPLAY (1<<5) // crashed, waiting for replay //#define PG_STATE_STRAY (1<<6) // i must notify the primary i exist. //#define PG_STATE_SPLITTING (1<<7) // i am splitting #define PG_STATE_SCRUBBING (1<<8) // scrubbing -- 2.39.5