ceph config set global mon_warn_on_pool_pg_num_not_power_of_two false
+* Ceph will issue a health warning if a RADOS pool's ``size`` is set to 1
+ or in other words the pool is configured with no redundancy. This can
+ be fixed by setting the pool size to the minimum recommended value
+ with::
+
+ ceph osd pool set <pool-name> size <num-replicas>
+
+ The warning can be silenced with::
+
+ ceph config set global mon_warn_on_pool_no_redundancy false
+
>=15.0.0
--------
:Default: ``0``
+``mon warn on pool no redundancy``
+
+:Description: Issue a ``HEALTH_WARN`` in cluster log if any pool is
+ configured with no replicas.
+:Type: Boolean
+:Default: ``True``
+
+
``mon cache target full warn ratio``
:Description: Position between pool's ``cache_target_full`` and
ceph osd pool application enable foo rbd --yes-i-really-mean-it
wait_for_clean || return 1
+ ceph -s
+ ceph health | grep HEALTH_OK || return 1
+ # test warning on setting pool size=1
+ ceph osd pool set foo size 1
+ ceph -s
+ ceph health | grep HEALTH_WARN || return 1
+ ceph health detail | grep POOL_NO_REDUNDANCY || return 1
+ ceph health mute POOL_NO_REDUNDANCY
+ ceph -s
+ ceph health | grep HEALTH_OK | grep POOL_NO_REDUNDANCY || return 1
+ ceph health unmute POOL_NO_REDUNDANCY
+ ceph -s
+ ceph health | grep HEALTH_WARN || return 1
+ # restore pool size to default
+ ceph osd pool set foo size 3
ceph -s
ceph health | grep HEALTH_OK || return 1
ceph osd set noup
mon warn on osd down out interval zero = false
mon warn on too few osds = false
mon_warn_on_pool_pg_num_not_power_of_two = false
+ mon_warn_on_pool_no_redundancy = false
osd pool default erasure code profile = "plugin=jerasure technique=reed_sol_van k=2 m=1 ruleset-failure-domain=osd crush-failure-domain=osd"
.add_service("mon")
.set_description("issue POOL_PG_NUM_NOT_POWER_OF_TWO warning if pool has a non-power-of-two pg_num value"),
+ Option("mon_warn_on_pool_no_redundancy", Option::TYPE_BOOL, Option::LEVEL_ADVANCED)
+ .set_default(true)
+ .add_service("mon")
+ .set_description("Issue a health warning if any pool is configured with no replicas")
+ .add_see_also("osd_pool_default_size")
+ .add_see_also("osd_pool_default_min_size"),
+
Option("mon_warn_on_misplaced", Option::TYPE_BOOL, Option::LEVEL_ADVANCED)
.set_default(false)
.add_service("mgr")
d.detail.swap(detail);
}
}
+
+ // POOL_NO_REDUNDANCY
+ if (cct->_conf.get_val<bool>("mon_warn_on_pool_no_redundancy"))
+ {
+ list<string> detail;
+ for (auto it : get_pools()) {
+ if (it.second.get_size() == 1) {
+ ostringstream ss;
+ ss << "pool '" << get_pool_name(it.first)
+ << "' has no replicas configured";
+ detail.push_back(ss.str());
+ }
+ }
+ if (!detail.empty()) {
+ ostringstream ss;
+ ss << detail.size() << " pool(s) have no replicas configured";
+ auto& d = checks->add("POOL_NO_REDUNDANCY", HEALTH_WARN,
+ ss.str(), detail.size());
+ d.detail.swap(detail);
+ }
+ }
}
int OSDMap::parse_osd_id_list(const vector<string>& ls, set<int> *out,