From: Samuel Just Date: Tue, 18 Feb 2014 20:03:44 +0000 (-0800) Subject: task/: add ec_pool and append to rados.py X-Git-Tag: v0.94.10~27^2^2~364^2~370 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=e37156525142ee88981e632810cf5ab66c6f905d;p=ceph.git task/: add ec_pool and append to rados.py Signed-off-by: Samuel Just --- diff --git a/teuthology/task/ceph_manager.py b/teuthology/task/ceph_manager.py index bd8285a26b7f..b52f137e160e 100644 --- a/teuthology/task/ceph_manager.py +++ b/teuthology/task/ceph_manager.py @@ -325,6 +325,7 @@ class CephManager: self.config = config self.controller = controller self.next_pool_id = 0 + self.created_erasure_pool = False if (logger): self.log = lambda x: logger.info(x) else: @@ -569,21 +570,28 @@ class CephManager: self.log(status) return status['pgmap']['num_pgs'] - def create_pool_with_unique_name(self, pg_num=1): + def create_pool_with_unique_name(self, pg_num=1, ec_pool=False): name = "" with self.lock: name = "unique_pool_%s"%(str(self.next_pool_id),) self.next_pool_id += 1 - self.create_pool(name, pg_num) + self.create_pool(name, pg_num, ec_pool=ec_pool) return name - def create_pool(self, pool_name, pg_num=1): + def create_pool(self, pool_name, pg_num=1, ec_pool=False): with self.lock: assert isinstance(pool_name, str) assert isinstance(pg_num, int) assert pool_name not in self.pools self.log("creating pool_name %s"%(pool_name,)) - self.raw_cluster_cmd('osd', 'pool', 'create', pool_name, str(pg_num)) + if ec_pool and not self.created_erasure_pool: + self.created_erasure_pool = True + self.raw_cluster_cmd('osd', 'crush', 'rule', 'create-erasure', 'erasure2', '--property', 'erasure-code-ruleset-failure-domain=osd', '--property', 'erasure-code-m=2', '--property', 'erasure-code-k=1') + + if ec_pool: + self.raw_cluster_cmd('osd', 'pool', 'create', pool_name, str(pg_num), str(pg_num), 'erasure', 'crush_ruleset=erasure2', '--property', 'erasure-code-ruleset-failure-domain=osd', '--property', 'erasure-code-m=2', '--property', 'erasure-code-k=2') + else: + self.raw_cluster_cmd('osd', 'pool', 'create', pool_name, str(pg_num)) self.pools[pool_name] = pg_num def remove_pool(self, pool_name): diff --git a/teuthology/task/rados.py b/teuthology/task/rados.py index bc0e837dd541..e1df679c6a5a 100644 --- a/teuthology/task/rados.py +++ b/teuthology/task/rados.py @@ -28,6 +28,7 @@ def task(ctx, config): max_stride_size: op_weights: runs: - the pool is remade between runs + ec_pool: use an ec pool For example:: @@ -49,6 +50,7 @@ def task(ctx, config): snap_create: 3 rollback: 2 snap_remove: 0 + ec_pool: true runs: 10 - interactive: @@ -88,7 +90,10 @@ def task(ctx, config): 'adjust-ulimits', 'ceph-coverage', '{tdir}/archive/coverage'.format(tdir=testdir), - 'ceph_test_rados', + 'ceph_test_rados'] + if config.get('ec_pool', False): + args.extend(['--ec-pool']) + args.extend([ '--op', 'read', str(op_weights.get('read', 100)), '--op', 'write', str(op_weights.get('write', 100)), '--op', 'delete', str(op_weights.get('delete', 10)), @@ -98,6 +103,7 @@ def task(ctx, config): '--op', 'setattr', str(op_weights.get('setattr', 0)), '--op', 'rmattr', str(op_weights.get('rmattr', 0)), '--op', 'watch', str(op_weights.get('watch', 0)), + '--op', 'append', str(op_weights.get('append', 0)), '--max-ops', str(config.get('ops', 10000)), '--objects', str(config.get('objects', 500)), '--max-in-flight', str(config.get('max_in_flight', 16)), @@ -105,7 +111,7 @@ def task(ctx, config): '--min-stride-size', str(config.get('min_stride_size', object_size / 10)), '--max-stride-size', str(config.get('max_stride_size', object_size / 5)), '--max-seconds', str(config.get('max_seconds', 0)) - ] + ]) for field in [ 'copy_from', 'is_dirty', 'undirty', 'cache_flush', 'cache_try_flush', 'cache_evict', @@ -143,7 +149,7 @@ def task(ctx, config): if not pool and existing_pools: pool = existing_pools.pop() else: - pool = ctx.manager.create_pool_with_unique_name() + pool = ctx.manager.create_pool_with_unique_name(ec_pool=config.get('ec_pool', False)) created_pools.append(pool) (remote,) = ctx.cluster.only(role).remotes.iterkeys()