ceph osd tier remove data cache3
ceph osd pool delete cache3 cache3 --yes-i-really-really-mean-it
+# check health check
+ceph osd pool create cache4 2
+ceph osd pool set cache4 target_max_objects 5
+ceph osd pool set cache4 target_max_bytes 1000
+for f in `seq 1 5` ; do
+ rados -p cache4 put foo$f /etc/passwd
+done
+while ! ceph df | grep cache4 | grep ' 5 ' ; do
+ echo waiting for pg stats to flush
+ sleep 2
+done
+ceph health | grep WARN | grep cache4
+ceph health detail | grep cache4 | grep 'target max' | grep objects
+ceph health detail | grep cache4 | grep 'target max' | grep 'B'
+ceph osd pool delete cache4 cache4 --yes-i-really-really-mean-it
+
# Assumes there are at least 3 MDSes and two OSDs
#
OPTION(mon_pg_warn_max_object_skew, OPT_FLOAT, 10.0) // max skew few average in objects per pg
OPTION(mon_pg_warn_min_objects, OPT_INT, 10000) // do not warn below this object #
OPTION(mon_pg_warn_min_pool_objects, OPT_INT, 1000) // do not warn on pools below this object #
+OPTION(mon_cache_target_full_warn_ratio, OPT_FLOAT, .66) // position between pool cache_target_full and max where we start warning
OPTION(mon_osd_full_ratio, OPT_FLOAT, .95) // what % full makes an OSD "full"
OPTION(mon_osd_nearfull_ratio, OPT_FLOAT, .85) // what % full makes an OSD near full
OPTION(mon_globalid_prealloc, OPT_INT, 100) // how many globalids to prealloc
stamp = s;
}
- pool_stat_t get_pg_pool_sum_stat(int64_t pool) {
- ceph::unordered_map<int,pool_stat_t>::iterator p = pg_pool_sum.find(pool);
+ pool_stat_t get_pg_pool_sum_stat(int64_t pool) const {
+ ceph::unordered_map<int,pool_stat_t>::const_iterator p =
+ pg_pool_sum.find(pool);
if (p != pg_pool_sum.end())
return p->second;
return pool_stat_t();
check_full_osd_health(summary, detail, pg_map.full_osds, "full", HEALTH_ERR);
check_full_osd_health(summary, detail, pg_map.nearfull_osds, "near full", HEALTH_WARN);
+ // near-target max pools
+ const map<int64_t,pg_pool_t>& pools = mon->osdmon()->osdmap.get_pools();
+ for (map<int64_t,pg_pool_t>::const_iterator p = pools.begin();
+ p != pools.end(); ++p) {
+ if ((!p->second.target_max_objects && !p->second.target_max_bytes) ||
+ !pg_map.pg_pool_sum.count(p->first))
+ continue;
+ bool nearfull = false;
+ const char *name = mon->osdmon()->osdmap.get_pool_name(p->first);
+ const pool_stat_t& st = pg_map.get_pg_pool_sum_stat(p->first);
+ uint64_t ratio = p->second.cache_target_full_ratio_micro +
+ ((1000000 - p->second.cache_target_full_ratio_micro) *
+ g_conf->mon_cache_target_full_warn_ratio);
+ if (p->second.target_max_objects && (uint64_t)st.stats.sum.num_objects >
+ p->second.target_max_objects * ratio / 1000000) {
+ nearfull = true;
+ if (detail) {
+ ostringstream ss;
+ ss << "cache pool '" << name << "' with "
+ << si_t(st.stats.sum.num_objects)
+ << " objects at/near target max "
+ << si_t(p->second.target_max_objects) << " objects";
+ detail->push_back(make_pair(HEALTH_WARN, ss.str()));
+ }
+ }
+ if (p->second.target_max_bytes && (uint64_t)st.stats.sum.num_bytes >
+ p->second.target_max_bytes * ratio / 1000000) {
+ nearfull = true;
+ if (detail) {
+ ostringstream ss;
+ ss << "cache pool '" << mon->osdmon()->osdmap.get_pool_name(p->first)
+ << "' with " << si_t(st.stats.sum.num_bytes)
+ << "B at/near target max "
+ << si_t(p->second.target_max_bytes) << "B";
+ detail->push_back(make_pair(HEALTH_WARN, ss.str()));
+ }
+ }
+ if (nearfull) {
+ ostringstream ss;
+ ss << "'" << name << "' at/near target max";
+ summary.push_back(make_pair(HEALTH_WARN, ss.str()));
+ }
+ }
+
// scrub
if (pg_map.pg_sum.stats.sum.num_scrub_errors) {
ostringstream ss;