Usage::
- ceph osd pool get <poolname> size|min_size|crash_replay_interval|pg_num|
+ ceph osd pool get <poolname> size|min_size|pg_num|
pgp_num|crush_ruleset|auid|write_fadvise_dontneed
Only for tiered pools::
Usage::
- ceph osd pool set <poolname> size|min_size|crash_replay_interval|pg_num|
+ ceph osd pool set <poolname> size|min_size|pg_num|
pgp_num|crush_ruleset|hashpspool|nodelete|nopgchange|nosizechange|
hit_set_type|hit_set_period|hit_set_count|hit_set_fpp|debug_fake_ec_pool|
target_max_bytes|target_max_objects|cache_target_dirty_ratio|
"rename <srcpool> to <destpool>", "osd", "rw", "cli,rest")
COMMAND("osd pool get " \
"name=pool,type=CephPoolname " \
- "name=var,type=CephChoices,strings=size|min_size|crash_replay_interval|pg_num|pgp_num|crush_rule|hashpspool|nodelete|nopgchange|nosizechange|write_fadvise_dontneed|noscrub|nodeep-scrub|hit_set_type|hit_set_period|hit_set_count|hit_set_fpp|use_gmt_hitset|auid|target_max_objects|target_max_bytes|cache_target_dirty_ratio|cache_target_dirty_high_ratio|cache_target_full_ratio|cache_min_flush_age|cache_min_evict_age|erasure_code_profile|min_read_recency_for_promote|all|min_write_recency_for_promote|fast_read|hit_set_grade_decay_rate|hit_set_search_last_n|scrub_min_interval|scrub_max_interval|deep_scrub_interval|recovery_priority|recovery_op_priority|scrub_priority|compression_mode|compression_algorithm|compression_required_ratio|compression_max_blob_size|compression_min_blob_size|csum_type|csum_min_block|csum_max_block", \
+ "name=var,type=CephChoices,strings=size|min_size|pg_num|pgp_num|crush_rule|hashpspool|nodelete|nopgchange|nosizechange|write_fadvise_dontneed|noscrub|nodeep-scrub|hit_set_type|hit_set_period|hit_set_count|hit_set_fpp|use_gmt_hitset|auid|target_max_objects|target_max_bytes|cache_target_dirty_ratio|cache_target_dirty_high_ratio|cache_target_full_ratio|cache_min_flush_age|cache_min_evict_age|erasure_code_profile|min_read_recency_for_promote|all|min_write_recency_for_promote|fast_read|hit_set_grade_decay_rate|hit_set_search_last_n|scrub_min_interval|scrub_max_interval|deep_scrub_interval|recovery_priority|recovery_op_priority|scrub_priority|compression_mode|compression_algorithm|compression_required_ratio|compression_max_blob_size|compression_min_blob_size|csum_type|csum_min_block|csum_max_block", \
"get pool parameter <var>", "osd", "r", "cli,rest")
COMMAND("osd pool set " \
"name=pool,type=CephPoolname " \
- "name=var,type=CephChoices,strings=size|min_size|crash_replay_interval|pg_num|pgp_num|crush_rule|hashpspool|nodelete|nopgchange|nosizechange|write_fadvise_dontneed|noscrub|nodeep-scrub|hit_set_type|hit_set_period|hit_set_count|hit_set_fpp|use_gmt_hitset|target_max_bytes|target_max_objects|cache_target_dirty_ratio|cache_target_dirty_high_ratio|cache_target_full_ratio|cache_min_flush_age|cache_min_evict_age|auid|min_read_recency_for_promote|min_write_recency_for_promote|fast_read|hit_set_grade_decay_rate|hit_set_search_last_n|scrub_min_interval|scrub_max_interval|deep_scrub_interval|recovery_priority|recovery_op_priority|scrub_priority|compression_mode|compression_algorithm|compression_required_ratio|compression_max_blob_size|compression_min_blob_size|csum_type|csum_min_block|csum_max_block|allow_ec_overwrites " \
+ "name=var,type=CephChoices,strings=size|min_size|pg_num|pgp_num|crush_rule|hashpspool|nodelete|nopgchange|nosizechange|write_fadvise_dontneed|noscrub|nodeep-scrub|hit_set_type|hit_set_period|hit_set_count|hit_set_fpp|use_gmt_hitset|target_max_bytes|target_max_objects|cache_target_dirty_ratio|cache_target_dirty_high_ratio|cache_target_full_ratio|cache_min_flush_age|cache_min_evict_age|auid|min_read_recency_for_promote|min_write_recency_for_promote|fast_read|hit_set_grade_decay_rate|hit_set_search_last_n|scrub_min_interval|scrub_max_interval|deep_scrub_interval|recovery_priority|recovery_op_priority|scrub_priority|compression_mode|compression_algorithm|compression_required_ratio|compression_max_blob_size|compression_min_blob_size|csum_type|csum_min_block|csum_max_block|allow_ec_overwrites " \
"name=val,type=CephString " \
"name=force,type=CephChoices,strings=--yes-i-really-mean-it,req=false", \
"set pool parameter <var> to <val>", "osd", "rw", "cli,rest")
namespace {
enum osd_pool_get_choices {
- SIZE, MIN_SIZE, CRASH_REPLAY_INTERVAL,
+ SIZE, MIN_SIZE,
PG_NUM, PGP_NUM, CRUSH_RULE, HASHPSPOOL,
NODELETE, NOPGCHANGE, NOSIZECHANGE,
WRITE_FADVISE_DONTNEED, NOSCRUB, NODEEP_SCRUB,
const choices_map_t ALL_CHOICES = {
{"size", SIZE},
{"min_size", MIN_SIZE},
- {"crash_replay_interval", CRASH_REPLAY_INTERVAL},
{"pg_num", PG_NUM}, {"pgp_num", PGP_NUM},
{"crush_rule", CRUSH_RULE},
{"hashpspool", HASHPSPOOL}, {"nodelete", NODELETE},
case MIN_SIZE:
f->dump_int("min_size", p->get_min_size());
break;
- case CRASH_REPLAY_INTERVAL:
- f->dump_int("crash_replay_interval",
- p->get_crash_replay_interval());
- break;
case CRUSH_RULE:
if (osdmap.crush->rule_exists(p->get_crush_rule())) {
f->dump_string("crush_rule", osdmap.crush->get_rule_name(
case MIN_SIZE:
ss << "min_size: " << p->get_min_size() << "\n";
break;
- case CRASH_REPLAY_INTERVAL:
- ss << "crash_replay_interval: " <<
- p->get_crash_replay_interval() << "\n";
- break;
case CRUSH_RULE:
if (osdmap.crush->rule_exists(p->get_crush_rule())) {
ss << "crush_rule: " << osdmap.crush->get_rule_name(
return -EINVAL;
}
p.auid = n;
- } else if (var == "crash_replay_interval") {
- if (interr.length()) {
- ss << "error parsing integer value '" << val << "': " << interr;
- return -EINVAL;
- }
- p.crash_replay_interval = n;
} else if (var == "pg_num") {
if (p.has_flag(pg_pool_t::FLAG_NOPGCHANGE)) {
ss << "pool pg_num change is disabled; you must unset nopgchange flag for the pool first";
f->dump_int("object_hash", get_object_hash());
f->dump_unsigned("pg_num", get_pg_num());
f->dump_unsigned("pg_placement_num", get_pgp_num());
- f->dump_unsigned("crash_replay_interval", get_crash_replay_interval());
f->dump_stream("last_change") << get_last_change();
f->dump_stream("last_force_op_resend") << get_last_force_op_resend();
f->dump_stream("last_force_op_resend_preluminous")
::encode(removed_snaps, bl);
::encode(auid, bl);
::encode(flags, bl);
- ::encode(crash_replay_interval, bl);
+ ::encode((uint32_t)0, bl); // crash_replay_interval
return;
}
::encode(removed_snaps, bl);
::encode(auid, bl);
::encode(flags, bl);
- ::encode(crash_replay_interval, bl);
+ ::encode((uint32_t)0, bl); // crash_replay_interval
::encode(min_size, bl);
::encode(quota_max_bytes, bl);
::encode(quota_max_objects, bl);
::encode(removed_snaps, bl);
::encode(auid, bl);
::encode(flags, bl);
- ::encode(crash_replay_interval, bl);
+ ::encode((uint32_t)0, bl); // crash_replay_interval
::encode(min_size, bl);
::encode(quota_max_bytes, bl);
::encode(quota_max_objects, bl);
if (struct_v >= 4) {
::decode(flags, bl);
+ uint32_t crash_replay_interval;
::decode(crash_replay_interval, bl);
} else {
flags = 0;
-
- // if this looks like the 'data' pool, set the
- // crash_replay_interval appropriately. unfortunately, we can't
- // be precise here. this should be good enough to preserve replay
- // on the data pool for the majority of cluster upgrades, though.
- if (crush_rule == 0 && auid == 0)
- crash_replay_interval = 60;
- else
- crash_replay_interval = 0;
}
if (struct_v >= 7) {
::decode(min_size, bl);
a.snap_seq = 10;
a.snap_epoch = 11;
a.auid = 12;
- a.crash_replay_interval = 13;
a.quota_max_bytes = 473;
a.quota_max_objects = 474;
o.push_back(new pg_pool_t(a));
out << " owner " << p.get_auid();
if (p.flags)
out << " flags " << p.get_flags_string();
- if (p.crash_replay_interval)
- out << " crash_replay_interval " << p.crash_replay_interval;
if (p.quota_max_bytes)
out << " max_bytes " << p.quota_max_bytes;
if (p.quota_max_objects)
snapid_t snap_seq; ///< seq for per-pool snapshot
epoch_t snap_epoch; ///< osdmap epoch of last snap
uint64_t auid; ///< who owns the pg
- __u32 crash_replay_interval; ///< seconds to allow clients to replay ACKed but unCOMMITted requests
uint64_t quota_max_bytes; ///< maximum number of bytes for this pool
uint64_t quota_max_objects; ///< maximum number of objects for this pool
last_force_op_resend_preluminous(0),
snap_seq(0), snap_epoch(0),
auid(0),
- crash_replay_interval(0),
quota_max_bytes(0), quota_max_objects(0),
pg_num_mask(0), pgp_num_mask(0),
tier_of(-1), read_tier(-1), write_tier(-1),
epoch_t get_snap_epoch() const { return snap_epoch; }
snapid_t get_snap_seq() const { return snap_seq; }
uint64_t get_auid() const { return auid; }
- unsigned get_crash_replay_interval() const { return crash_replay_interval; }
void set_snap_seq(snapid_t s) { snap_seq = s; }
void set_snap_epoch(epoch_t e) { snap_epoch = e; }
# Valid values for the 'var' argument to 'ceph osd pool set'
POOL_PROPERTIES_1 = [
- 'size', 'min_size', 'crash_replay_interval', 'pg_num',
+ 'size', 'min_size', 'pg_num',
'crush_rule', 'hashpspool',
]
'toomany']))
def test_pool_get(self):
- for var in ('size', 'min_size', 'crash_replay_interval',
+ for var in ('size', 'min_size',
'pg_num', 'pgp_num', 'crush_rule', 'auid', 'fast_read',
'scrub_min_interval', 'scrub_max_interval',
'deep_scrub_interval', 'recovery_priority',
'invalid']))
def test_pool_set(self):
- for var in ('size', 'min_size', 'crash_replay_interval',
+ for var in ('size', 'min_size',
'pg_num', 'pgp_num', 'crush_rule',
'hashpspool', 'auid', 'fast_read',
'scrub_min_interval', 'scrub_max_interval',