From: Stephan Müller Date: Tue, 9 Oct 2018 06:35:41 +0000 (+0200) Subject: mgr/dashboard: Unset compression arguments for pools X-Git-Tag: v14.0.1~96^2~3 X-Git-Url: http://git.apps.os.sepia.ceph.com/?a=commitdiff_plain;h=c98c8055c21d1f2b8576b748716187cfab8e5845;p=ceph.git mgr/dashboard: Unset compression arguments for pools The dashboard backend can now unset all set compression arguments if the compression mode is switched to 'unset'. In the case of 'unset' Ceph itself will only delete the 'compression_mode' argument, not all other set arguments. The other arguments that should be removed, too, are added to the update arguments in order to delete all set arguments. Fixes: https://tracker.ceph.com/issues/36355 Signed-off-by: Stephan Müller --- diff --git a/qa/tasks/mgr/dashboard/test_pool.py b/qa/tasks/mgr/dashboard/test_pool.py index 665c854f99915..b9f28ff34669f 100644 --- a/qa/tasks/mgr/dashboard/test_pool.py +++ b/qa/tasks/mgr/dashboard/test_pool.py @@ -13,13 +13,6 @@ log = logging.getLogger(__name__) class PoolTest(DashboardTestCase): AUTH_ROLES = ['pool-manager'] - @classmethod - def tearDownClass(cls): - super(PoolTest, cls).tearDownClass() - for name in ['dashboard_pool1', 'dashboard_pool2', 'dashboard_pool3', 'dashboard_pool_update1']: - cls._ceph_cmd(['osd', 'pool', 'delete', name, name, '--yes-i-really-really-mean-it']) - cls._ceph_cmd(['osd', 'erasure-code-profile', 'rm', 'ecprofile']) - pool_schema = JObj(sub_elems={ 'pool_name': str, 'type': str, @@ -28,6 +21,75 @@ class PoolTest(DashboardTestCase): 'flags_names': str, }, allow_unknown=True) + def _pool_create(self, data): + try: + self._task_post('/api/pool/', data) + self.assertStatus(201) + + self._check_pool_properties(data) + + self._task_delete("/api/pool/" + data['pool']) + self.assertStatus(204) + except Exception: + log.exception("test_pool_create: data=%s", data) + raise + + def _check_pool_properties(self, data, pool_name=None): + if not pool_name: + pool_name = data['pool'] + pool = self._get_pool(pool_name) + try: + for k, v in data.items(): + self._check_pool_property(k, v, pool) + + except Exception: + log.exception("test_pool_create: pool=%s", pool) + raise + + health = self._get('/api/dashboard/health')['health'] + self.assertEqual(health['status'], 'HEALTH_OK', msg='health={}'.format(health)) + + def _get_pool(self, pool_name): + pool = self._get("/api/pool/" + pool_name) + self.assertStatus(200) + self.assertSchemaBody(self.pool_schema) + return pool + + def _check_pool_property(self, prop, value, pool): + if prop == 'pool_type': + self.assertEqual(pool['type'], value) + elif prop == 'size': + self.assertEqual(pool[prop], int(value), '{}: {} != {}'.format(prop, pool[prop], value)) + elif prop == 'pg_num': + self._check_pg_num(value, pool) + elif prop == 'application_metadata': + self.assertIsInstance(pool[prop], list) + self.assertEqual(pool[prop], value) + elif prop == 'pool': + self.assertEqual(pool['pool_name'], value) + elif prop.startswith('compression'): + if value is not None: + if prop.endswith('size'): + value = int(value) + elif prop.endswith('ratio'): + value = float(value) + self.assertEqual(pool['options'].get(prop), value) + else: + self.assertEqual(pool[prop], value, '{}: {} != {}'.format(prop, pool[prop], value)) + + def _check_pg_num(self, value, pool): + prop = 'pg_num' + pgp_prop = 'pg_placement_num' + for p in [prop, pgp_prop]: # Should have the same values + self.assertEqual(pool[p], int(value), '{}: {} != {}'.format(p, pool[p], value)) + + @classmethod + def tearDownClass(cls): + super(PoolTest, cls).tearDownClass() + for name in ['dashboard_pool1', 'dashboard_pool2', 'dashboard_pool3', 'dashboard_pool_update1']: + cls._ceph_cmd(['osd', 'pool', 'delete', name, name, '--yes-i-really-really-mean-it']) + cls._ceph_cmd(['osd', 'erasure-code-profile', 'rm', 'ecprofile']) + @DashboardTestCase.RunAs('test', 'test', [{'pool': ['create', 'update', 'delete']}]) def test_read_access_permissions(self): self._get('/api/pool') @@ -95,55 +157,6 @@ class PoolTest(DashboardTestCase): self.assertIn('stats', pool) self.assertNotIn('flags_names', pool) - def _pool_create(self, data): - try: - self._task_post('/api/pool/', data) - self.assertStatus(201) - - self._check_pool_properties(data) - - self._task_delete("/api/pool/" + data['pool']) - self.assertStatus(204) - except Exception: - log.exception("test_pool_create: data=%s", data) - raise - - def _check_pool_properties(self, data, pool_name=None): - if not pool_name: - pool_name = data['pool'] - pool = self._get("/api/pool/" + pool_name) - self.assertStatus(200) - self.assertSchemaBody(self.pool_schema) - try: - for k, v in data.items(): - if k == 'pool_type': - self.assertEqual(pool['type'], data['pool_type']) - elif k == 'pg_num': - self.assertEqual(pool[k], int(v), '{}: {} != {}'.format(k, pool[k], v)) - k = 'pg_placement_num' # Should have the same value as pg_num - self.assertEqual(pool[k], int(v), '{}: {} != {}'.format(k, pool[k], v)) - elif k == 'application_metadata': - self.assertIsInstance(pool[k], list) - self.assertEqual(pool[k], - data['application_metadata']) - elif k == 'pool': - self.assertEqual(pool['pool_name'], v) - elif k in ['compression_mode', 'compression_algorithm']: - self.assertEqual(pool['options'][k], data[k]) - elif k == 'compression_max_blob_size': - self.assertEqual(pool['options'][k], int(data[k])) - elif k == 'compression_required_ratio': - self.assertEqual(pool['options'][k], float(data[k])) - else: - self.assertEqual(pool[k], v, '{}: {} != {}'.format(k, pool[k], v)) - - except Exception: - log.exception("test_pool_create: pool=%s", pool) - raise - - health = self._get('/api/dashboard/health')['health'] - self.assertEqual(health['status'], 'HEALTH_OK', msg='health={}'.format(health)) - def test_pool_create(self): self._ceph_cmd(['osd', 'crush', 'rule', 'create-erasure', 'ecrule']) self._ceph_cmd( @@ -172,12 +185,16 @@ class PoolTest(DashboardTestCase): self._pool_create(data) def test_update(self): - pool = [ - { - 'pool': 'dashboard_pool_update1', - 'pg_num': '10', - 'pool_type': 'replicated', - }, + pool = { + 'pool': 'dashboard_pool_update1', + 'pg_num': '4', + 'pool_type': 'replicated', + 'compression_mode': 'passive', + 'compression_algorithm': 'snappy', + 'compression_max_blob_size': '131072', + 'compression_required_ratio': '0.875', + } + updates = [ { 'application_metadata': ['rbd', 'sth'], }, @@ -192,19 +209,27 @@ class PoolTest(DashboardTestCase): 'compression_mode': 'aggressive', 'compression_max_blob_size': '10000000', 'compression_required_ratio': '0.8', + }, + { + 'compression_mode': 'unset' } - ] - self._task_post('/api/pool/', pool[0]) + self._task_post('/api/pool/', pool) self.assertStatus(201) - - self._check_pool_properties(pool[0]) - - for data in pool[1:]: - self._task_put('/api/pool/' + pool[0]['pool'], data) - self._check_pool_properties(data, pool_name=pool[0]['pool']) - - self._task_delete("/api/pool/" + pool[0]['pool']) + self._check_pool_properties(pool) + + for update in updates: + self._task_put('/api/pool/' + pool['pool'], update) + if update.get('compression_mode') == 'unset': + update = { + 'compression_mode': None, + 'compression_algorithm': None, + 'compression_mode': None, + 'compression_max_blob_size': None, + 'compression_required_ratio': None, + } + self._check_pool_properties(update, pool_name=pool['pool']) + self._task_delete("/api/pool/" + pool['pool']) self.assertStatus(204) def test_pool_create_fail(self): @@ -224,6 +249,7 @@ class PoolTest(DashboardTestCase): 'compression_algorithms': JList(six.string_types), 'compression_modes': JList(six.string_types), 'is_all_bluestore': bool, + "bluestore_compression_algorithm": six.string_types, 'osd_count': int, 'crush_rules_replicated': JList(JObj({}, allow_unknown=True)), 'crush_rules_erasure': JList(JObj({}, allow_unknown=True)), diff --git a/src/pybind/mgr/dashboard/controllers/pool.py b/src/pybind/mgr/dashboard/controllers/pool.py index ade88a6e5da41..bf27e1b36f27c 100644 --- a/src/pybind/mgr/dashboard/controllers/pool.py +++ b/src/pybind/mgr/dashboard/controllers/pool.py @@ -92,6 +92,9 @@ class Pool(RESTController): self._set_pool_values(pool, application_metadata, flags, False, kwargs) def _set_pool_values(self, pool, application_metadata, flags, update_existing, kwargs): + if update_existing: + current_pool = self._get(pool) + self._handle_update_compression_args(current_pool.get('options'), kwargs) if flags and 'ec_overwrites' in flags: CephService.send_command('mon', 'osd pool set', pool=pool, var='allow_ec_overwrites', val='true') @@ -101,7 +104,7 @@ class Pool(RESTController): force='--yes-i-really-mean-it') if update_existing: original_app_metadata = set( - self._get(pool, 'application_metadata')['application_metadata']) + current_pool.get('application_metadata')) else: original_app_metadata = set() @@ -118,6 +121,16 @@ class Pool(RESTController): if key == 'pg_num': set_key('pgp_num', value) + def _handle_update_compression_args(self, options, kwargs): + if kwargs.get('compression_mode') == 'unset' and options is not None: + def reset_arg(arg, value): + if options.get(arg): + kwargs[arg] = value + for arg in ['compression_min_blob_size', 'compression_max_blob_size', + 'compression_required_ratio']: + reset_arg(arg, '0') + reset_arg('compression_algorithm', 'unset') + @Endpoint() @ReadPermission def _info(self): @@ -132,15 +145,17 @@ class Pool(RESTController): for o in mgr.get('osd_metadata').values()) def compression_enum(conf_name): - return [o['enum_values'] for o in mgr.get('config_options')['options'] + return [[v for v in o['enum_values'] if len(v) > 0] for o in config_options if o['name'] == conf_name][0] + config_options = mgr.get('config_options')['options'] return { "pool_names": [p['pool_name'] for p in self._pool_list()], "crush_rules_replicated": rules(1), "crush_rules_erasure": rules(3), "is_all_bluestore": all_bluestore(), "osd_count": len(mgr.get('osd_map')['osds']), + "bluestore_compression_algorithm": mgr.get('config')['bluestore_compression_algorithm'], "compression_algorithms": compression_enum('bluestore_compression_algorithm'), "compression_modes": compression_enum('bluestore_compression_mode'), }