ceph osd crush rm-device-class all || return 1
ceph osd tree | grep -q 'asdf' && return 1
- # test 'class rm' automatically recycles shadow trees
- ceph osd crush set-device-class asdf 0 1 2 || return 1
- ceph osd tree | grep -q 'asdf' || return 1
- ceph osd crush dump | grep -q '~asdf' || return 1
- ceph osd crush tree --show-shadow | grep -q '~asdf' || return 1
- ceph osd crush class ls | grep -q 'asdf' || return 1
- ceph osd crush class rm asdf || return 1
- ceph osd tree | grep -q 'asdf' && return 1
- ceph osd crush dump | grep -q '~asdf' && return 1
- ceph osd crush tree --show-shadow | grep -q '~asdf' && return 1
- ceph osd crush class ls | grep -q 'asdf' && return 1
-
ceph osd crush set-device-class abc osd.2 || return 1
ceph osd crush move osd.2 root=foo rack=foo-rack host=foo-host || return 1
out=`ceph osd tree |awk '$1 == 2 && $2 == "abc" {print $0}'`
ceph osd crush rule create-replicated foo-rule foo host abc || return 1
- # test class_is_in_use
- ceph osd crush set-device-class hdd osd.0 || return 1
- ceph osd crush set-device-class ssd osd.1 || return 1
- ceph osd crush rule create-replicated foo-hdd1 default host hdd || return 1
- ceph osd crush rule create-replicated foo-hdd2 default host hdd || return 1
- ceph osd crush rule create-replicated foo-ssd default host ssd || return 1
- expect_failure $dir EBUSY ceph osd crush class rm hdd || return 1
- expect_failure $dir EBUSY ceph osd crush class rm ssd || return 1
- ceph osd crush rule rm foo-hdd1 || return 1
- expect_failure $dir EBUSY ceph osd crush class rm hdd || return 1 # still referenced by foo-hdd2
- ceph osd crush rule rm foo-hdd2 || return 1
- ceph osd crush rule rm foo-ssd || return 1
- ceph osd crush class rm hdd || return 1
- ceph osd crush class rm ssd || return 1
- expect_failure $dir EBUSY ceph osd crush class rm abc || return 1 # still referenced by foo-rule
- ceph osd crush rule rm foo-rule || return 1
- ceph osd crush class rm abc || return 1
-
# test set-device-class implicitly change class
ceph osd crush set-device-class hdd osd.0 || return 1
expect_failure $dir EBUSY ceph osd crush set-device-class nvme osd.0 || return 1
return item;
}
-
-bool CrushWrapper::class_is_in_use(int class_id, ostream *ss)
-{
- list<unsigned> rules;
- for (unsigned i = 0; i < crush->max_rules; ++i) {
- crush_rule *r = crush->rules[i];
- if (!r)
- continue;
- for (unsigned j = 0; j < r->len; ++j) {
- if (r->steps[j].op == CRUSH_RULE_TAKE) {
- int root = r->steps[j].arg1;
- for (auto &p : class_bucket) {
- auto& q = p.second;
- if (q.count(class_id) && q[class_id] == root) {
- rules.push_back(i);
- }
- }
- }
- }
- }
- if (rules.empty()) {
- return false;
- }
- if (ss) {
- ostringstream os;
- for (auto &p: rules) {
- os << "'" << get_rule_name(p) <<"',";
- }
- string out(os.str());
- out.resize(out.size() - 1); // drop last ','
- *ss << "still referenced by crush_rule(s): " << out;
- }
- return true;
-}
-
int CrushWrapper::populate_classes(
const std::map<int32_t, map<int32_t, int32_t>>& old_class_bucket)
{
const std::map<int32_t, map<int32_t, int32_t>>& old_class_bucket,
const std::set<int32_t>& used_ids,
int *clone);
- bool class_is_in_use(int class_id, ostream *ss = nullptr);
int populate_classes(
const std::map<int32_t, map<int32_t, int32_t>>& old_class_bucket);
int rebuild_roots_with_classes();
"name=shadow,type=CephChoices,strings=--show-shadow,req=false", \
"dump crush buckets and items in a tree view",
"osd", "r", "cli,rest")
-COMMAND("osd crush class rm " \
- "name=class,type=CephString,goodchars=[A-Za-z0-9-_]", \
- "remove crush device class <class>", \
- "osd", "rw", "cli,rest")
COMMAND("osd crush class ls", \
"list all crush device classes", \
"osd", "r", "cli,rest")
goto reply;
else
goto update;
- } else if (prefix == "osd crush class rm") {
- string device_class;
- if (!cmd_getval(g_ceph_context, cmdmap, "class", device_class)) {
- err = -EINVAL; // no value!
- goto reply;
- }
- if (osdmap.require_osd_release < CEPH_RELEASE_LUMINOUS) {
- ss << "you must complete the upgrade and 'ceph osd require-osd-release "
- << "luminous' before using crush device classes";
- err = -EPERM;
- goto reply;
- }
-
- CrushWrapper newcrush;
- _get_pending_crush(newcrush);
-
- if (!newcrush.class_exists(device_class)) {
- err = -ENOENT;
- ss << "class '" << device_class << "' does not exist";
- goto reply;
- }
-
- int class_id = newcrush.get_class_id(device_class);
-
- stringstream ts;
- if (newcrush.class_is_in_use(class_id, &ts)) {
- err = -EBUSY;
- ss << "class '" << device_class << "' " << ts.str();
- goto reply;
- }
-
- set<int> osds;
- newcrush.get_devices_by_class(device_class, &osds);
- for (auto& p: osds) {
- err = newcrush.remove_device_class(g_ceph_context, p, &ss);
- if (err < 0) {
- // ss has reason for failure
- goto reply;
- }
- }
-
- if (osds.empty()) {
- // empty class, remove directly
- err = newcrush.remove_class_name(device_class);
- if (err < 0) {
- ss << "class '" << device_class << "' cannot be removed '"
- << cpp_strerror(err) << "'";
- goto reply;
- }
- }
-
- pending_inc.crush.clear();
- newcrush.encode(pending_inc.crush, mon->get_quorum_con_features());
- ss << "removed class " << device_class << " with id " << class_id
- << " from crush map";
- goto update;
-
} else if (prefix == "osd crush weight-set create" ||
prefix == "osd crush weight-set create-compat") {
CrushWrapper newcrush;