]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
ceph_objectstore_tool: run tests on erasure coded pools
authorLoic Dachary <ldachary@redhat.com>
Wed, 19 Nov 2014 18:02:11 +0000 (19:02 +0100)
committerDavid Zafman <dzafman@redhat.com>
Wed, 4 Mar 2015 00:03:59 +0000 (16:03 -0800)
Create an erasure coded pool and run tests on it. The list of PGs is
adapted to contain the shard id.

Signed-off-by: Loic Dachary <ldachary@redhat.com>
(cherry picked from commit be39bd5d84613d30eecede78172619bb5339dbe0)

tasks/ceph_objectstore_tool.py

index a05248bbc409adb42817d2c9310648e5155e9129..a065787e79b42d9841499e6c0cb5dc095202d109 100644 (file)
@@ -14,7 +14,7 @@ from teuthology.orchestra import run
 import sys
 import tempfile
 import json
-from util.rados import (rados, create_replicated_pool)
+from util.rados import (rados, create_replicated_pool, create_ec_pool)
 # from util.rados import (rados, create_ec_pool,
 #                               create_replicated_pool,
 #                               create_cache_pool)
@@ -59,7 +59,7 @@ def cod_setup_remote_data(log, ctx, remote, NUM_OBJECTS, DATADIR, BASE_NAME, DAT
 
 
 # def rados(ctx, remote, cmd, wait=True, check_status=False):
-def cod_setup(log, ctx, remote, NUM_OBJECTS, DATADIR, BASE_NAME, DATALINECOUNT, POOL, db):
+def cod_setup(log, ctx, remote, NUM_OBJECTS, DATADIR, BASE_NAME, DATALINECOUNT, POOL, db, ec):
     ERRORS = 0
     log.info("Creating {objs} objects in pool".format(objs=NUM_OBJECTS))
     nullfd = open(os.devnull, "w")
@@ -92,6 +92,10 @@ def cod_setup(log, ctx, remote, NUM_OBJECTS, DATADIR, BASE_NAME, DATALINECOUNT,
                 ERRORS += 1
             db[NAME]["xattr"][mykey] = myval
 
+        # Erasure coded pools don't support omap
+        if ec:
+            continue
+
         # Create omap header in all objects but REPobject1
         if i != 1:
             myhdr = "hdr{i}".format(i=i)
@@ -153,8 +157,6 @@ def task(ctx, config):
     # assert len(clients) > 0,
     #    'ceph_objectstore_tool task needs at least 1 client'
 
-    # EC_POOL = "ec_pool"
-    # EC_NAME = "ECobject"
     #ERRORS = 0
     #DATADIR = os.path.join(TEUTHDIR, "data")
     # Put a test dir below the data dir
@@ -206,6 +208,11 @@ def task(ctx, config):
     create_replicated_pool(cli_remote, REP_POOL, PGNUM)
     ERRORS += test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME)
 
+    EC_POOL = "ec_pool"
+    EC_NAME = "ECobject"
+    create_ec_pool(cli_remote, EC_POOL, 'default', PGNUM)
+    ERRORS += test_objectstore(ctx, config, cli_remote, EC_POOL, EC_NAME, ec = True)
+
     if ERRORS == 0:
         log.info("TEST PASSED")
     else:
@@ -217,7 +224,7 @@ def task(ctx, config):
         log.info('Ending ceph_objectstore_tool')
 
 
-def test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME):
+def test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME, ec = False):
     manager = ctx.manager
 
     osds = ctx.cluster.only(teuthology.is_type('osd'))
@@ -246,19 +253,22 @@ def test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME):
     for remote in allremote:
         cod_setup_remote_data(log, ctx, remote, NUM_OBJECTS, DATADIR, REP_NAME, DATALINECOUNT)
 
-    ERRORS += cod_setup(log, ctx, cli_remote, NUM_OBJECTS, DATADIR, REP_NAME, DATALINECOUNT, REP_POOL, db)
+    ERRORS += cod_setup(log, ctx, cli_remote, NUM_OBJECTS, DATADIR, REP_NAME, DATALINECOUNT, REP_POOL, db, ec)
 
     pgs = {}
-    jsontext = manager.raw_cluster_cmd('pg', 'dump_json')
-    pgdump = json.loads(jsontext)
-    PGS = [str(p["pgid"]) for p in pgdump["pg_stats"] if p["pgid"].find(str(REPID) + ".") == 0]
-    for stats in pgdump["pg_stats"]:
-        if stats["pgid"] in PGS:
+    for stats in manager.get_pg_stats():
+        if stats["pgid"].find(str(REPID) + ".") != 0:
+            continue
+        if pool_dump["type"] == ceph_manager.CephManager.REPLICATED_POOL:
             for osd in stats["acting"]:
-                if not pgs.has_key(osd):
-                    pgs[osd] = []
-                pgs[osd].append(stats["pgid"])
-
+                pgs.setdefault(osd, []).append(stats["pgid"])
+        elif pool_dump["type"] == ceph_manager.CephManager.ERASURE_CODED_POOL:
+            shard = 0
+            for osd in stats["acting"]:
+                pgs.setdefault(osd, []).append("{pgid}s{shard}".format(pgid=stats["pgid"], shard=shard))
+                shard += 1
+        else:
+            raise Exception("{pool} has an unexpected type {type}".format(pool=REP_POOL, type=pool_dump["type"]))
 
     log.info(pgs)
     log.info(db)