From b4ca5ae462c6f12ca48b787529938862646282cd Mon Sep 17 00:00:00 2001 From: xie xingguo Date: Mon, 28 Aug 2017 15:51:28 +0800 Subject: [PATCH] mon, osd: per pool space-full flag support The newly introduced 'device-class' can be used to separate different type of devices into different pools, e.g, hdd-pool for backup data and all-flash-pool for DB applications. However, if any osd of the cluster is currently running out of space (exceeding the predefined 'full' threshold), Ceph will mark the whole cluster as full and prevent writes to all pools, which turns out to be very wrong. This patch instead makes the space 'full' control at pool granularity, which exactly leverages the pool quota logic but shall solve the above problem. Signed-off-by: xie xingguo --- .../all/admin_socket_output.yaml | 1 + src/crush/CrushWrapper.cc | 38 +++ src/crush/CrushWrapper.h | 1 + src/mon/OSDMonitor.cc | 244 +++++++++++++++--- src/mon/OSDMonitor.h | 3 +- src/osd/OSDMap.cc | 103 ++++++-- src/osd/OSDMap.h | 16 +- src/osd/osd_types.h | 12 + 8 files changed, 368 insertions(+), 50 deletions(-) diff --git a/qa/suites/rados/singleton-nomsgr/all/admin_socket_output.yaml b/qa/suites/rados/singleton-nomsgr/all/admin_socket_output.yaml index 3aaca875940..bbf330b0ba1 100644 --- a/qa/suites/rados/singleton-nomsgr/all/admin_socket_output.yaml +++ b/qa/suites/rados/singleton-nomsgr/all/admin_socket_output.yaml @@ -9,6 +9,7 @@ overrides: - (OSDMAP_FLAGS) - (OSD_FULL) - (MDS_READ_ONLY) + - (POOL_FULL) tasks: - install: - ceph: diff --git a/src/crush/CrushWrapper.cc b/src/crush/CrushWrapper.cc index e1b0723a117..ed1f5242264 100644 --- a/src/crush/CrushWrapper.cc +++ b/src/crush/CrushWrapper.cc @@ -2052,6 +2052,44 @@ int CrushWrapper::get_rules_by_class(const string &class_name, set *rules) return 0; } +// return rules that might reference the given osd +int CrushWrapper::get_rules_by_osd(int osd, set *rules) +{ + assert(rules); + rules->clear(); + if (osd < 0) { + return -EINVAL; + } + for (unsigned i = 0; i < crush->max_rules; ++i) { + crush_rule *r = crush->rules[i]; + if (!r) + continue; + for (unsigned j = 0; j < r->len; ++j) { + if (r->steps[j].op == CRUSH_RULE_TAKE) { + int step_item = r->steps[j].arg1; + list unordered; + int rc = _get_leaves(step_item, &unordered); + if (rc < 0) { + return rc; // propagate fatal errors! + } + bool match = false; + for (auto &o: unordered) { + assert(o >= 0); + if (o == osd) { + match = true; + break; + } + } + if (match) { + rules->insert(i); + break; + } + } + } + } + return 0; +} + bool CrushWrapper::_class_is_dead(int class_id) { for (auto &p: class_map) { diff --git a/src/crush/CrushWrapper.h b/src/crush/CrushWrapper.h index a8c017c29c5..3a99fc2c00e 100644 --- a/src/crush/CrushWrapper.h +++ b/src/crush/CrushWrapper.h @@ -1224,6 +1224,7 @@ public: int populate_classes( const std::map>& old_class_bucket); int get_rules_by_class(const string &class_name, set *rules); + int get_rules_by_osd(int osd, set *rules); bool _class_is_dead(int class_id); void cleanup_dead_classes(); int rebuild_roots_with_classes(); diff --git a/src/mon/OSDMonitor.cc b/src/mon/OSDMonitor.cc index 23d0f182e46..64010e925f9 100644 --- a/src/mon/OSDMonitor.cc +++ b/src/mon/OSDMonitor.cc @@ -917,31 +917,190 @@ void OSDMonitor::encode_pending(MonitorDBStore::TransactionRef t) tmp.apply_incremental(pending_inc); if (tmp.require_osd_release >= CEPH_RELEASE_LUMINOUS) { - // set or clear full/nearfull? - int full, backfill, nearfull; - tmp.count_full_nearfull_osds(&full, &backfill, &nearfull); - if (full > 0) { - if (!tmp.test_flag(CEPH_OSDMAP_FULL)) { - dout(10) << __func__ << " setting full flag" << dendl; - add_flag(CEPH_OSDMAP_FULL); - remove_flag(CEPH_OSDMAP_NEARFULL); - } - } else { - if (tmp.test_flag(CEPH_OSDMAP_FULL)) { - dout(10) << __func__ << " clearing full flag" << dendl; - remove_flag(CEPH_OSDMAP_FULL); - } - if (nearfull > 0) { - if (!tmp.test_flag(CEPH_OSDMAP_NEARFULL)) { - dout(10) << __func__ << " setting nearfull flag" << dendl; - add_flag(CEPH_OSDMAP_NEARFULL); - } - } else { - if (tmp.test_flag(CEPH_OSDMAP_NEARFULL)) { - dout(10) << __func__ << " clearing nearfull flag" << dendl; - remove_flag(CEPH_OSDMAP_NEARFULL); - } - } + // remove any legacy osdmap nearfull/full flags + { + if (tmp.test_flag(CEPH_OSDMAP_FULL | CEPH_OSDMAP_NEARFULL)) { + dout(10) << __func__ << " clearing legacy osdmap nearfull/full flag" + << dendl; + remove_flag(CEPH_OSDMAP_NEARFULL); + remove_flag(CEPH_OSDMAP_FULL); + } + } + // collect which pools are currently affected by + // the near/backfill/full osd(s), + // and set per-pool near/backfill/full flag instead + set full_pool_ids; + set backfillfull_pool_ids; + set nearfull_pool_ids; + tmp.get_full_pools(g_ceph_context, + &full_pool_ids, + &backfillfull_pool_ids, + &nearfull_pool_ids); + if (full_pool_ids.empty() || + backfillfull_pool_ids.empty() || + nearfull_pool_ids.empty()) { + // normal case - no nearfull, backfillfull or full osds + // try cancel any improper nearfull/backfillfull/full pool + // flags first + for (auto &pool: tmp.get_pools()) { + auto p = pool.first; + if (tmp.get_pg_pool(p)->has_flag(pg_pool_t::FLAG_NEARFULL) && + nearfull_pool_ids.empty()) { + dout(10) << __func__ << " clearing pool '" << tmp.pool_name[p] + << "'s nearfull flag" << dendl; + if (pending_inc.new_pools.count(p) == 0) { + // load original pool info first! + pending_inc.new_pools[p] = pool.second; + } + pending_inc.new_pools[p].flags &= ~pg_pool_t::FLAG_NEARFULL; + } + if (tmp.get_pg_pool(p)->has_flag(pg_pool_t::FLAG_BACKFILLFULL) && + backfillfull_pool_ids.empty()) { + dout(10) << __func__ << " clearing pool '" << tmp.pool_name[p] + << "'s backfillfull flag" << dendl; + if (pending_inc.new_pools.count(p) == 0) { + pending_inc.new_pools[p] = pool.second; + } + pending_inc.new_pools[p].flags &= ~pg_pool_t::FLAG_BACKFILLFULL; + } + if (tmp.get_pg_pool(p)->has_flag(pg_pool_t::FLAG_FULL) && + full_pool_ids.empty()) { + if (tmp.get_pg_pool(p)->has_flag(pg_pool_t::FLAG_FULL_NO_QUOTA)) { + // set by EQUOTA, skipping + continue; + } + dout(10) << __func__ << " clearing pool '" << tmp.pool_name[p] + << "'s full flag" << dendl; + if (pending_inc.new_pools.count(p) == 0) { + pending_inc.new_pools[p] = pool.second; + } + pending_inc.new_pools[p].flags &= ~pg_pool_t::FLAG_FULL; + } + } + } + if (!full_pool_ids.empty()) { + dout(10) << __func__ << " marking pool(s) " << full_pool_ids + << " as full" << dendl; + for (auto &p: full_pool_ids) { + if (tmp.get_pg_pool(p)->has_flag(pg_pool_t::FLAG_FULL)) { + continue; + } + if (pending_inc.new_pools.count(p) == 0) { + pending_inc.new_pools[p] = tmp.pools[p]; + } + pending_inc.new_pools[p].flags |= pg_pool_t::FLAG_FULL; + pending_inc.new_pools[p].flags &= ~pg_pool_t::FLAG_BACKFILLFULL; + pending_inc.new_pools[p].flags &= ~pg_pool_t::FLAG_NEARFULL; + } + // cancel FLAG_FULL for pools which are no longer full too + for (auto &pool: tmp.get_pools()) { + auto p = pool.first; + if (full_pool_ids.count(p)) { + // skip pools we have just marked as full above + continue; + } + if (!tmp.get_pg_pool(p)->has_flag(pg_pool_t::FLAG_FULL) || + tmp.get_pg_pool(p)->has_flag(pg_pool_t::FLAG_FULL_NO_QUOTA)) { + // don't touch if currently is not full + // or is running out of quota (and hence considered as full) + continue; + } + dout(10) << __func__ << " clearing pool '" << tmp.pool_name[p] + << "'s full flag" << dendl; + if (pending_inc.new_pools.count(p) == 0) { + pending_inc.new_pools[p] = pool.second; + } + pending_inc.new_pools[p].flags &= ~pg_pool_t::FLAG_FULL; + } + } + if (!backfillfull_pool_ids.empty()) { + for (auto &p: backfillfull_pool_ids) { + if (full_pool_ids.count(p)) { + // skip pools we have already considered as full above + continue; + } + if (tmp.get_pg_pool(p)->has_flag(pg_pool_t::FLAG_FULL_NO_QUOTA)) { + // make sure FLAG_FULL is truly set, so we are safe not + // to set a extra (redundant) FLAG_BACKFILLFULL flag + assert(tmp.get_pg_pool(p)->has_flag(pg_pool_t::FLAG_FULL)); + continue; + } + if (tmp.get_pg_pool(p)->has_flag(pg_pool_t::FLAG_BACKFILLFULL)) { + // don't bother if pool is already marked as backfillfull + continue; + } + dout(10) << __func__ << " marking pool '" << tmp.pool_name[p] + << "'s as backfillfull" << dendl; + if (pending_inc.new_pools.count(p) == 0) { + pending_inc.new_pools[p] = tmp.pools[p]; + } + pending_inc.new_pools[p].flags |= pg_pool_t::FLAG_BACKFILLFULL; + pending_inc.new_pools[p].flags &= ~pg_pool_t::FLAG_NEARFULL; + } + // cancel FLAG_BACKFILLFULL for pools + // which are no longer backfillfull too + for (auto &pool: tmp.get_pools()) { + auto p = pool.first; + if (full_pool_ids.count(p) || backfillfull_pool_ids.count(p)) { + // skip pools we have just marked as backfillfull/full above + continue; + } + if (!tmp.get_pg_pool(p)->has_flag(pg_pool_t::FLAG_BACKFILLFULL)) { + // and don't touch if currently is not backfillfull + continue; + } + dout(10) << __func__ << " clearing pool '" << tmp.pool_name[p] + << "'s backfillfull flag" << dendl; + if (pending_inc.new_pools.count(p) == 0) { + pending_inc.new_pools[p] = pool.second; + } + pending_inc.new_pools[p].flags &= ~pg_pool_t::FLAG_BACKFILLFULL; + } + } + if (!nearfull_pool_ids.empty()) { + for (auto &p: nearfull_pool_ids) { + if (full_pool_ids.count(p) || backfillfull_pool_ids.count(p)) { + continue; + } + if (tmp.get_pg_pool(p)->has_flag(pg_pool_t::FLAG_FULL_NO_QUOTA)) { + // make sure FLAG_FULL is truly set, so we are safe not + // to set a extra (redundant) FLAG_NEARFULL flag + assert(tmp.get_pg_pool(p)->has_flag(pg_pool_t::FLAG_FULL)); + continue; + } + if (tmp.get_pg_pool(p)->has_flag(pg_pool_t::FLAG_NEARFULL)) { + // don't bother if pool is already marked as nearfull + continue; + } + dout(10) << __func__ << " marking pool '" << tmp.pool_name[p] + << "'s as nearfull" << dendl; + if (pending_inc.new_pools.count(p) == 0) { + pending_inc.new_pools[p] = tmp.pools[p]; + } + pending_inc.new_pools[p].flags |= pg_pool_t::FLAG_NEARFULL; + } + // cancel FLAG_NEARFULL for pools + // which are no longer nearfull too + for (auto &pool: tmp.get_pools()) { + auto p = pool.first; + if (full_pool_ids.count(p) || + backfillfull_pool_ids.count(p) || + nearfull_pool_ids.count(p)) { + // skip pools we have just marked as + // nearfull/backfillfull/full above + continue; + } + if (!tmp.get_pg_pool(p)->has_flag(pg_pool_t::FLAG_NEARFULL)) { + // and don't touch if currently is not nearfull + continue; + } + dout(10) << __func__ << " clearing pool '" << tmp.pool_name[p] + << "'s nearfull flag" << dendl; + if (pending_inc.new_pools.count(p) == 0) { + pending_inc.new_pools[p] = pool.second; + } + pending_inc.new_pools[p].flags &= ~pg_pool_t::FLAG_NEARFULL; + } } // min_compat_client? @@ -4828,10 +4987,20 @@ bool OSDMonitor::preprocess_command(MonOpRequestRef op) return true; } -void OSDMonitor::update_pool_flags(int64_t pool_id, uint64_t flags) +void OSDMonitor::set_pool_flags(int64_t pool_id, uint64_t flags) { - const pg_pool_t *pool = osdmap.get_pg_pool(pool_id); - pending_inc.get_new_pool(pool_id, pool)->flags = flags; + pg_pool_t *pool = pending_inc.get_new_pool(pool_id, + osdmap.get_pg_pool(pool_id)); + assert(pool); + pool->set_flag(flags); +} + +void OSDMonitor::clear_pool_flags(int64_t pool_id, uint64_t flags) +{ + pg_pool_t *pool = pending_inc.get_new_pool(pool_id, + osdmap.get_pg_pool(pool_id)); + assert(pool); + pool->unset_flag(flags); } bool OSDMonitor::update_pools_status() @@ -4854,14 +5023,16 @@ bool OSDMonitor::update_pools_status() (pool.quota_max_bytes > 0 && (uint64_t)sum.num_bytes >= pool.quota_max_bytes) || (pool.quota_max_objects > 0 && (uint64_t)sum.num_objects >= pool.quota_max_objects); - if (pool.has_flag(pg_pool_t::FLAG_FULL)) { + if (pool.has_flag(pg_pool_t::FLAG_FULL_NO_QUOTA)) { if (pool_is_full) continue; mon->clog->info() << "pool '" << pool_name - << "' no longer full; removing FULL flag"; - - update_pool_flags(it->first, pool.get_flags() & ~pg_pool_t::FLAG_FULL); + << "' no longer out of quota; removing NO_QUOTA flag"; + // below we cancel FLAG_FULL too, we'll set it again in + // OSDMonitor::encode_pending if it still fails the osd-full checking. + clear_pool_flags(it->first, + pg_pool_t::FLAG_FULL_NO_QUOTA | pg_pool_t::FLAG_FULL); ret = true; } else { if (!pool_is_full) @@ -4879,7 +5050,14 @@ bool OSDMonitor::update_pools_status() << " (reached quota's max_objects: " << pool.quota_max_objects << ")"; } - update_pool_flags(it->first, pool.get_flags() | pg_pool_t::FLAG_FULL); + // set both FLAG_FULL_NO_QUOTA and FLAG_FULL + // note that below we try to cancel FLAG_BACKFILLFULL/NEARFULL too + // since FLAG_FULL should always take precedence + set_pool_flags(it->first, + pg_pool_t::FLAG_FULL_NO_QUOTA | pg_pool_t::FLAG_FULL); + clear_pool_flags(it->first, + pg_pool_t::FLAG_NEARFULL | + pg_pool_t::FLAG_BACKFILLFULL); ret = true; } } diff --git a/src/mon/OSDMonitor.h b/src/mon/OSDMonitor.h index e4b1eb7f953..5f36ba106e0 100644 --- a/src/mon/OSDMonitor.h +++ b/src/mon/OSDMonitor.h @@ -357,7 +357,8 @@ private: ostream *ss); int prepare_new_pool(MonOpRequestRef op); - void update_pool_flags(int64_t pool_id, uint64_t flags); + void set_pool_flags(int64_t pool_id, uint64_t flags); + void clear_pool_flags(int64_t pool_id, uint64_t flags); bool update_pools_status(); bool prepare_set_flag(MonOpRequestRef op, int flag); diff --git a/src/osd/OSDMap.cc b/src/osd/OSDMap.cc index 8314d3acb46..9c3875b4fe9 100644 --- a/src/osd/OSDMap.cc +++ b/src/osd/OSDMap.cc @@ -20,6 +20,7 @@ #include "OSDMap.h" #include #include "common/config.h" +#include "common/errno.h" #include "common/Formatter.h" #include "common/TextTable.h" #include "include/ceph_features.h" @@ -1145,21 +1146,41 @@ int OSDMap::calc_num_osds() return num_osd; } -void OSDMap::count_full_nearfull_osds(int *full, int *backfill, int *nearfull) const +void OSDMap::get_full_pools(CephContext *cct, + set *full, + set *backfillfull, + set *nearfull) const { - *full = 0; - *backfill = 0; - *nearfull = 0; + assert(full); + assert(backfillfull); + assert(nearfull); + full->clear(); + backfillfull->clear(); + nearfull->clear(); + + vector full_osds; + vector backfillfull_osds; + vector nearfull_osds; for (int i = 0; i < max_osd; ++i) { if (exists(i) && is_up(i) && is_in(i)) { if (osd_state[i] & CEPH_OSD_FULL) - ++(*full); + full_osds.push_back(i); else if (osd_state[i] & CEPH_OSD_BACKFILLFULL) - ++(*backfill); + backfillfull_osds.push_back(i); else if (osd_state[i] & CEPH_OSD_NEARFULL) - ++(*nearfull); + nearfull_osds.push_back(i); } } + + for (auto i: full_osds) { + get_pool_ids_by_osd(cct, i, full); + } + for (auto i: backfillfull_osds) { + get_pool_ids_by_osd(cct, i, backfillfull); + } + for (auto i: nearfull_osds) { + get_pool_ids_by_osd(cct, i, nearfull); + } } void OSDMap::get_full_osd_counts(set *full, set *backfill, @@ -3925,6 +3946,31 @@ int OSDMap::get_osds_by_bucket_name(const string &name, set *osds) const return crush->get_leaves(name, osds); } +// get pools whose crush rules might reference the given osd +void OSDMap::get_pool_ids_by_osd(CephContext *cct, + int osd, + set *pool_ids) const +{ + assert(pool_ids); + set raw_rules; + int r = crush->get_rules_by_osd(osd, &raw_rules); + if (r < 0) { + lderr(cct) << __func__ << " get_rules_by_osd failed: " << cpp_strerror(r) + << dendl; + assert(r >= 0); + } + set rules; + for (auto &i: raw_rules) { + // exclude any dead rule + if (crush_ruleset_in_use(i)) { + rules.insert(i); + } + } + for (auto &r: rules) { + get_pool_ids_by_rule(r, pool_ids); + } +} + template class OSDUtilizationDumper : public CrushTreeDumper::Dumper { public: @@ -4480,6 +4526,7 @@ void OSDMap::check_health(health_check_map_t *checks) const { // warn about flags uint64_t warn_flags = + CEPH_OSDMAP_NEARFULL | CEPH_OSDMAP_FULL | CEPH_OSDMAP_PAUSERD | CEPH_OSDMAP_PAUSEWR | @@ -4586,23 +4633,49 @@ void OSDMap::check_health(health_check_map_t *checks) const // OSD_UPGRADE_FINISHED // none of these (yet) since we don't run until luminous upgrade is done. - // POOL_FULL + // POOL_NEARFULL/BACKFILLFULL/FULL { - list detail; + list full_detail, backfillfull_detail, nearfull_detail; for (auto it : get_pools()) { const pg_pool_t &pool = it.second; + const string& pool_name = get_pool_name(it.first); if (pool.has_flag(pg_pool_t::FLAG_FULL)) { - const string& pool_name = get_pool_name(it.first); stringstream ss; - ss << "pool '" << pool_name << "' is full"; - detail.push_back(ss.str()); + if (pool.has_flag(pg_pool_t::FLAG_FULL_NO_QUOTA)) { + // may run out of space too, + // but we want EQUOTA taking precedence + ss << "pool '" << pool_name << "' is full (no quota)"; + } else { + ss << "pool '" << pool_name << "' is full (no space)"; + } + full_detail.push_back(ss.str()); + } else if (pool.has_flag(pg_pool_t::FLAG_BACKFILLFULL)) { + stringstream ss; + ss << "pool '" << pool_name << "' is backfillfull"; + backfillfull_detail.push_back(ss.str()); + } else if (pool.has_flag(pg_pool_t::FLAG_NEARFULL)) { + stringstream ss; + ss << "pool '" << pool_name << "' is nearfull"; + nearfull_detail.push_back(ss.str()); } } - if (!detail.empty()) { + if (!full_detail.empty()) { ostringstream ss; - ss << detail.size() << " pool(s) full"; + ss << full_detail.size() << " pool(s) full"; auto& d = checks->add("POOL_FULL", HEALTH_WARN, ss.str()); - d.detail.swap(detail); + d.detail.swap(full_detail); + } + if (!backfillfull_detail.empty()) { + ostringstream ss; + ss << backfillfull_detail.size() << " pool(s) backfillfull"; + auto& d = checks->add("POOL_BACKFILLFULL", HEALTH_WARN, ss.str()); + d.detail.swap(backfillfull_detail); + } + if (!nearfull_detail.empty()) { + ostringstream ss; + ss << nearfull_detail.size() << " pool(s) nearfull"; + auto& d = checks->add("POOL_NEARFULL", HEALTH_WARN, ss.str()); + d.detail.swap(nearfull_detail); } } } diff --git a/src/osd/OSDMap.h b/src/osd/OSDMap.h index a347d7f04e1..65350c22c62 100644 --- a/src/osd/OSDMap.h +++ b/src/osd/OSDMap.h @@ -644,7 +644,10 @@ public: float get_nearfull_ratio() const { return nearfull_ratio; } - void count_full_nearfull_osds(int *full, int *backfill, int *nearfull) const; + void get_full_pools(CephContext *cct, + set *full, + set *backfillfull, + set *nearfull) const; void get_full_osd_counts(set *full, set *backfill, set *nearfull) const; @@ -1165,6 +1168,17 @@ public: mempool::osdmap::map& get_pools() { return pools; } + void get_pool_ids_by_rule(int rule_id, set *pool_ids) const { + assert(pool_ids); + for (auto &p: pools) { + if ((int)p.second.get_crush_rule() == rule_id) { + pool_ids->insert(p.first); + } + } + } + void get_pool_ids_by_osd(CephContext *cct, + int osd, + set *pool_ids) const; const string& get_pool_name(int64_t p) const { auto i = pool_name.find(p); assert(i != pool_name.end()); diff --git a/src/osd/osd_types.h b/src/osd/osd_types.h index 5b14f7da9a6..0f02862f071 100644 --- a/src/osd/osd_types.h +++ b/src/osd/osd_types.h @@ -1156,6 +1156,9 @@ struct pg_pool_t { FLAG_WRITE_FADVISE_DONTNEED = 1<<7, // write mode with LIBRADOS_OP_FLAG_FADVISE_DONTNEED FLAG_NOSCRUB = 1<<8, // block periodic scrub FLAG_NODEEP_SCRUB = 1<<9, // block periodic deep-scrub + FLAG_FULL_NO_QUOTA = 1<<10, // pool is currently running out of quota, will set FLAG_FULL too + FLAG_NEARFULL = 1<<11, // pool is nearfull + FLAG_BACKFILLFULL = 1<<12, // pool is backfillfull }; static const char *get_flag_name(int f) { @@ -1170,6 +1173,9 @@ struct pg_pool_t { case FLAG_WRITE_FADVISE_DONTNEED: return "write_fadvise_dontneed"; case FLAG_NOSCRUB: return "noscrub"; case FLAG_NODEEP_SCRUB: return "nodeep-scrub"; + case FLAG_FULL_NO_QUOTA: return "full_no_quota"; + case FLAG_NEARFULL: return "nearfull"; + case FLAG_BACKFILLFULL: return "backfillfull"; default: return "???"; } } @@ -1208,6 +1214,12 @@ struct pg_pool_t { return FLAG_NOSCRUB; if (name == "nodeep-scrub") return FLAG_NODEEP_SCRUB; + if (name == "full_no_quota") + return FLAG_FULL_NO_QUOTA; + if (name == "nearfull") + return FLAG_NEARFULL; + if (name == "backfillfull") + return FLAG_BACKFILLFULL; return 0; } -- 2.39.5