]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
task/: add ec_pool and append to rados.py
authorSamuel Just <sam.just@inktank.com>
Tue, 18 Feb 2014 20:03:44 +0000 (12:03 -0800)
committerSamuel Just <sam.just@inktank.com>
Tue, 18 Feb 2014 21:19:52 +0000 (13:19 -0800)
Signed-off-by: Samuel Just <sam.just@inktank.com>
teuthology/task/ceph_manager.py
teuthology/task/rados.py

index bd8285a26b7f133e5e6faf7b404c843a433a05dd..b52f137e160eab70761d248405986e31e3783efc 100644 (file)
@@ -325,6 +325,7 @@ class CephManager:
         self.config = config
         self.controller = controller
         self.next_pool_id = 0
+        self.created_erasure_pool = False
         if (logger):
             self.log = lambda x: logger.info(x)
         else:
@@ -569,21 +570,28 @@ class CephManager:
         self.log(status)
         return status['pgmap']['num_pgs']
 
-    def create_pool_with_unique_name(self, pg_num=1):
+    def create_pool_with_unique_name(self, pg_num=1, ec_pool=False):
         name = ""
         with self.lock:
             name = "unique_pool_%s"%(str(self.next_pool_id),)
             self.next_pool_id += 1
-            self.create_pool(name, pg_num)
+            self.create_pool(name, pg_num, ec_pool=ec_pool)
         return name
 
-    def create_pool(self, pool_name, pg_num=1):
+    def create_pool(self, pool_name, pg_num=1, ec_pool=False):
         with self.lock:
             assert isinstance(pool_name, str)
             assert isinstance(pg_num, int)
             assert pool_name not in self.pools
             self.log("creating pool_name %s"%(pool_name,))
-            self.raw_cluster_cmd('osd', 'pool', 'create', pool_name, str(pg_num))
+            if ec_pool and not self.created_erasure_pool:
+                self.created_erasure_pool = True
+                self.raw_cluster_cmd('osd', 'crush', 'rule', 'create-erasure', 'erasure2', '--property', 'erasure-code-ruleset-failure-domain=osd', '--property', 'erasure-code-m=2', '--property', 'erasure-code-k=1')
+
+            if ec_pool:
+                self.raw_cluster_cmd('osd', 'pool', 'create', pool_name, str(pg_num), str(pg_num), 'erasure', 'crush_ruleset=erasure2', '--property', 'erasure-code-ruleset-failure-domain=osd', '--property', 'erasure-code-m=2', '--property', 'erasure-code-k=2')
+            else:
+                self.raw_cluster_cmd('osd', 'pool', 'create', pool_name, str(pg_num))
             self.pools[pool_name] = pg_num
 
     def remove_pool(self, pool_name):
index bc0e837dd54113bf79f158511a0a3ccdec1d5efb..e1df679c6a5acd188a9777e3345605195a6a2f36 100644 (file)
@@ -28,6 +28,7 @@ def task(ctx, config):
           max_stride_size: <maximum write stride size in bytes>
           op_weights: <dictionary mapping operation type to integer weight>
           runs: <number of times to run> - the pool is remade between runs
+          ec_pool: use an ec pool
 
     For example::
 
@@ -49,6 +50,7 @@ def task(ctx, config):
               snap_create: 3
               rollback: 2
               snap_remove: 0
+            ec_pool: true
             runs: 10
         - interactive:
 
@@ -88,7 +90,10 @@ def task(ctx, config):
         'adjust-ulimits',
         'ceph-coverage',
         '{tdir}/archive/coverage'.format(tdir=testdir),
-        'ceph_test_rados',
+        'ceph_test_rados']
+    if config.get('ec_pool', False):
+        args.extend(['--ec-pool'])
+    args.extend([
         '--op', 'read', str(op_weights.get('read', 100)),
         '--op', 'write', str(op_weights.get('write', 100)),
         '--op', 'delete', str(op_weights.get('delete', 10)),
@@ -98,6 +103,7 @@ def task(ctx, config):
         '--op', 'setattr', str(op_weights.get('setattr', 0)),
         '--op', 'rmattr', str(op_weights.get('rmattr', 0)),
         '--op', 'watch', str(op_weights.get('watch', 0)),
+        '--op', 'append', str(op_weights.get('append', 0)),
         '--max-ops', str(config.get('ops', 10000)),
         '--objects', str(config.get('objects', 500)),
         '--max-in-flight', str(config.get('max_in_flight', 16)),
@@ -105,7 +111,7 @@ def task(ctx, config):
         '--min-stride-size', str(config.get('min_stride_size', object_size / 10)),
         '--max-stride-size', str(config.get('max_stride_size', object_size / 5)),
         '--max-seconds', str(config.get('max_seconds', 0))
-        ]
+        ])
     for field in [
         'copy_from', 'is_dirty', 'undirty', 'cache_flush',
         'cache_try_flush', 'cache_evict',
@@ -143,7 +149,7 @@ def task(ctx, config):
                 if not pool and existing_pools:
                     pool = existing_pools.pop()
                 else:
-                    pool = ctx.manager.create_pool_with_unique_name()
+                    pool = ctx.manager.create_pool_with_unique_name(ec_pool=config.get('ec_pool', False))
                     created_pools.append(pool)
 
                 (remote,) = ctx.cluster.only(role).remotes.iterkeys()