call("./init-ceph -c {conf} stop osd mon > /dev/null 2>&1".format(conf=CEPH_CONF), shell=True)
+def check_data(DATADIR, TMPFILE, OSDDIR, SPLIT_NAME):
+ repcount = 0
+ ERRORS = 0
+ for nsfile in [f for f in os.listdir(DATADIR) if f.split('-')[1].find(SPLIT_NAME) == 0]:
+ nspace = nsfile.split("-")[0]
+ file = nsfile.split("-")[1]
+ path = os.path.join(DATADIR, nsfile)
+ tmpfd = open(TMPFILE, "w")
+ cmd = "find {dir} -name '{file}_*_{nspace}_*'".format(dir=OSDDIR, file=file, nspace=nspace)
+ logging.debug(cmd)
+ ret = call(cmd, shell=True, stdout=tmpfd)
+ if ret:
+ logging.critical("INTERNAL ERROR")
+ return 1
+ tmpfd.close()
+ obj_locs = get_lines(TMPFILE)
+ if len(obj_locs) == 0:
+ logging.error("Can't find imported object {name}".format(name=file))
+ ERRORS += 1
+ for obj_loc in obj_locs:
+ repcount += 1
+ cmd = "diff -q {src} {obj_loc}".format(src=path, obj_loc=obj_loc)
+ logging.debug(cmd)
+ ret = call(cmd, shell=True)
+ if ret != 0:
+ logging.error("{file} data not imported properly into {obj}".format(file=file, obj=obj_loc))
+ ERRORS += 1
+ return ERRORS, repcount
+
+
def main(argv):
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
nullfd = open(os.devnull, "w")
cmd = (CFSD_PREFIX + "--op export --pgid {pg} --file -").format(osd=ONEOSD, pg=ONEPG)
ERRORS += test_failure_tty(cmd, "stdout is a tty and no --file filename specified")
+ # Prep a valid export file for import failure tests
OTHERFILE = "/tmp/foo.{pid}".format(pid=pid)
- foofd = open(OTHERFILE, "w")
- foofd.close()
+ cmd = (CFSD_PREFIX + "--op export --pgid {pg} --file {file}").format(osd=ONEOSD, pg=ONEPG, file=OTHERFILE)
+ logging.debug(cmd)
+ call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
+
+ # On import can't specify a PG with a non-existent pool
+ cmd = (CFSD_PREFIX + "--op import --pgid {pg} --file {file}").format(osd=ONEOSD, pg="10.0", file=OTHERFILE)
+ ERRORS += test_failure(cmd, "Can't specify a different pgid pool, must be")
- # On import can't specify a PG
- cmd = (CFSD_PREFIX + "--op import --pgid {pg} --file {FOO}").format(osd=ONEOSD, pg=ONEPG, FOO=OTHERFILE)
- ERRORS += test_failure(cmd, "--pgid option invalid with import")
+ # On import can't specify a PG with a bad seed
+ TMPPG="{pool}.80".format(pool=REPID)
+ cmd = (CFSD_PREFIX + "--op import --pgid {pg} --file {file}").format(osd=ONEOSD, pg=TMPPG, file=OTHERFILE)
+ ERRORS += test_failure(cmd, "Illegal pgid, the seed is larger than current pg_num")
os.unlink(OTHERFILE)
cmd = (CFSD_PREFIX + "--op import --file {FOO}").format(osd=ONEOSD, FOO=OTHERFILE)
if pg == PGS[0]:
cmd = ("cat {file} |".format(file=file) + CFSD_PREFIX + "--op import").format(osd=osd)
elif pg == PGS[1]:
- cmd = (CFSD_PREFIX + "--op import --file - < {file}").format(osd=osd, file=file)
+ cmd = (CFSD_PREFIX + "--op import --file - --pgid {pg} < {file}").format(osd=osd, file=file, pg=pg)
else:
cmd = (CFSD_PREFIX + "--op import --file {file}").format(osd=osd, file=file)
logging.debug(cmd)
if EXP_ERRORS == 0 and RM_ERRORS == 0 and IMP_ERRORS == 0:
print "Verify replicated import data"
- for nsfile in [f for f in os.listdir(DATADIR) if f.split('-')[1].find(REP_NAME) == 0]:
- nspace = nsfile.split("-")[0]
- file = nsfile.split("-")[1]
- path = os.path.join(DATADIR, nsfile)
- tmpfd = open(TMPFILE, "w")
- cmd = "find {dir} -name '{file}_*_{nspace}_*'".format(dir=OSDDIR, file=file, nspace=nspace)
- logging.debug(cmd)
- ret = call(cmd, shell=True, stdout=tmpfd)
- if ret:
- logging.critical("INTERNAL ERROR")
- return 1
- tmpfd.close()
- obj_locs = get_lines(TMPFILE)
- if len(obj_locs) == 0:
- logging.error("Can't find imported object {name}".format(name=file))
- ERRORS += 1
- for obj_loc in obj_locs:
- cmd = "diff -q {src} {obj_loc}".format(src=path, obj_loc=obj_loc)
- logging.debug(cmd)
- ret = call(cmd, shell=True)
- if ret != 0:
- logging.error("{file} data not imported properly into {obj}".format(file=file, obj=obj_loc))
- ERRORS += 1
+ data_errors, _ = check_data(DATADIR, TMPFILE, OSDDIR, REP_NAME)
+ ERRORS += data_errors
else:
logging.warning("SKIPPING CHECKING IMPORT DATA DUE TO PREVIOUS FAILURES")
else:
logging.warning("SKIPPING IMPORT-RADOS TESTS DUE TO PREVIOUS FAILURES")
- # Cause REP_POOL to split and test import with object/log filtering
- cmd = "./ceph osd pool set {pool} pg_num 32".format(pool=REP_POOL)
- logging.debug(cmd)
- ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
- time.sleep(15)
- cmd = "./ceph osd pool set {pool} pgp_num 32".format(pool=REP_POOL)
+ # Clear directories of previous portion
+ call("/bin/rm -rf {dir}".format(dir=TESTDIR), shell=True)
+ call("/bin/rm -rf {dir}".format(dir=DATADIR), shell=True)
+ os.mkdir(TESTDIR)
+ os.mkdir(DATADIR)
+
+ # Cause SPLIT_POOL to split and test import with object/log filtering
+ print "Testing import all objects after a split"
+ SPLIT_POOL = "split_pool"
+ PG_COUNT = 1
+ SPLIT_OBJ_COUNT = 5
+ SPLIT_NSPACE_COUNT = 2
+ SPLIT_NAME = "split"
+ cmd = "./ceph osd pool create {pool} {pg} {pg} replicated".format(pool=SPLIT_POOL, pg=PG_COUNT)
logging.debug(cmd)
- ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
- wait_for_health()
- kill_daemons()
-
- print "Remove pgs for another import"
+ call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
+ SPLITID = get_pool_id(SPLIT_POOL, nullfd)
+ pool_size = int(check_output("./ceph osd pool get {pool} size".format(pool=SPLIT_POOL), shell=True, stderr=nullfd).split(" ")[1])
+ EXP_ERRORS = 0
RM_ERRORS = 0
- for pg in ALLREPPGS:
- for osd in get_osds(pg, OSDDIR):
- cmd = (CFSD_PREFIX + "--op remove --pgid {pg}").format(pg=pg, osd=osd)
+ IMP_ERRORS = 0
+
+ objects = range(1, SPLIT_OBJ_COUNT + 1)
+ nspaces = range(SPLIT_NSPACE_COUNT)
+ for n in nspaces:
+ nspace = get_nspace(n)
+
+ for i in objects:
+ NAME = SPLIT_NAME + "{num}".format(num=i)
+ LNAME = nspace + "-" + NAME
+ DDNAME = os.path.join(DATADIR, LNAME)
+
+ cmd = "rm -f " + DDNAME
logging.debug(cmd)
- ret = call(cmd, shell=True, stdout=nullfd)
+ call(cmd, shell=True)
+
+ if i == 1:
+ dataline = range(DATALINECOUNT)
+ else:
+ dataline = range(1)
+ fd = open(DDNAME, "w")
+ data = "This is the split data for " + LNAME + "\n"
+ for _ in dataline:
+ fd.write(data)
+ fd.close()
+
+ cmd = "./rados -p {pool} -N '{nspace}' put {name} {ddname}".format(pool=SPLIT_POOL, name=NAME, ddname=DDNAME, nspace=nspace)
+ logging.debug(cmd)
+ ret = call(cmd, shell=True, stderr=nullfd)
if ret != 0:
- logging.error("Removing failed for pg {pg} on {osd} with {ret}".format(pg=pg, osd=osd, ret=ret))
- RM_ERRORS += 1
+ logging.critical("Rados put command failed with {ret}".format(ret=ret))
+ return 1
- ERRORS += RM_ERRORS
+ wait_for_health()
+ kill_daemons()
- IMP_ERRORS = 0
- if RM_ERRORS == 0:
- print "Test pg import after PGs have split"
- for osd in [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and string.find(f, "osd") == 0]:
- dir = os.path.join(TESTDIR, osd)
- PGS = [f for f in os.listdir(dir) if os.path.isfile(os.path.join(dir, f))]
- for pg in PGS:
- if pg not in ALLREPPGS:
- continue
- file = os.path.join(dir, pg)
- cmd = (CFSD_PREFIX + "--op import --file {file}").format(osd=osd, file=file)
+ for osd in [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and string.find(f, "osd") == 0]:
+ os.mkdir(os.path.join(TESTDIR, osd))
+
+ pg = "{pool}.0".format(pool=SPLITID)
+ EXPORT_PG = pg
+
+ export_osds = get_osds(pg, OSDDIR)
+ for osd in export_osds:
+ mydir = os.path.join(TESTDIR, osd)
+ fname = os.path.join(mydir, pg)
+ cmd = (CFSD_PREFIX + "--op export --pgid {pg} --file {file}").format(osd=osd, pg=pg, file=fname)
+ logging.debug(cmd)
+ ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
+ if ret != 0:
+ logging.error("Exporting failed for pg {pg} on {osd} with {ret}".format(pg=pg, osd=osd, ret=ret))
+ EXP_ERRORS += 1
+
+ ERRORS += EXP_ERRORS
+
+ if EXP_ERRORS == 0:
+ vstart(new=False)
+ wait_for_health()
+
+ time.sleep(20)
+
+ cmd = "./ceph osd pool set {pool} pg_num 2".format(pool=SPLIT_POOL)
+ logging.debug(cmd)
+ ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
+ time.sleep(5)
+ wait_for_health()
+
+ time.sleep(15)
+
+ kill_daemons()
+
+ # Now 2 PGs, poolid.0 and poolid.1
+ for seed in range(2):
+ pg = "{pool}.{seed}".format(pool=SPLITID, seed=seed)
+
+ which = 0
+ for osd in get_osds(pg, OSDDIR):
+ cmd = (CFSD_PREFIX + "--op remove --pgid {pg}").format(pg=pg, osd=osd)
+ logging.debug(cmd)
+ ret = call(cmd, shell=True, stdout=nullfd)
+
+ # This is weird. The export files are based on only the EXPORT_PG
+ # and where that pg was before the split. Use 'which' to use all
+ # export copies in import.
+ mydir = os.path.join(TESTDIR, export_osds[which])
+ fname = os.path.join(mydir, EXPORT_PG)
+ which += 1
+ cmd = (CFSD_PREFIX + "--op import --pgid {pg} --file {file}").format(osd=osd, pg=pg, file=fname)
logging.debug(cmd)
ret = call(cmd, shell=True, stdout=nullfd)
if ret != 0:
logging.error("Import failed from {file} with {ret}".format(file=file, ret=ret))
IMP_ERRORS += 1
- else:
- logging.warning("SKIPPING IMPORT TESTS DUE TO PREVIOUS FAILURES")
- ERRORS += IMP_ERRORS
+ ERRORS += IMP_ERRORS
- # Start up again to make sure imports didn't corrupt anything
- if IMP_ERRORS == 0:
- vstart(new=False)
- wait_for_health()
+ # Start up again to make sure imports didn't corrupt anything
+ if IMP_ERRORS == 0:
+ print "Verify split import data"
+ data_errors, count = check_data(DATADIR, TMPFILE, OSDDIR, SPLIT_NAME)
+ ERRORS += data_errors
+ if count != (SPLIT_OBJ_COUNT * SPLIT_NSPACE_COUNT * pool_size):
+ logging.error("Incorrect number of replicas seen {count}".format(count=count))
+ ERRORS += 1
+ vstart(new=False)
+ wait_for_health()
call("/bin/rm -rf {dir}".format(dir=TESTDIR), shell=True)
call("/bin/rm -rf {dir}".format(dir=DATADIR), shell=True)
}
if (coll_pgid.pgid != pgid) {
- cerr << "Skipping object '" << ob.hoid << "' which no longer belongs in exported pg" << std::endl;
+ cerr << "Skipping object '" << ob.hoid << "' which belongs in pg " << pgid << std::endl;
*skipped_objects = true;
skip_object(bl);
return 0;
}
int get_pg_metadata(ObjectStore *store, bufferlist &bl, metadata_section &ms,
- const OSDSuperblock& sb, OSDMap& curmap)
+ const OSDSuperblock& sb, OSDMap& curmap, spg_t pgid)
{
bufferlist::iterator ebliter = bl.begin();
ms.decode(ebliter);
+ spg_t old_pgid = ms.info.pgid;
+ ms.info.pgid = pgid;
#if DIAGNOSTIC
Formatter *formatter = new JSONFormatter(true);
+ cout << "export pgid " << old_pgid << std::endl;
cout << "struct_v " << (int)ms.struct_ver << std::endl;
cout << "map epoch " << ms.map_epoch << std::endl;
cout << std::endl;
#endif
+ if (ms.osdmap.get_epoch() != 0 && ms.map_epoch != ms.osdmap.get_epoch()) {
+ cerr << "FATAL: Invalid OSDMap epoch in export data" << std::endl;
+ return -EFAULT;
+ }
+
if (ms.map_epoch > sb.current_epoch) {
cerr << "ERROR: Export map_epoch " << ms.map_epoch << " > osd epoch " << sb.current_epoch << std::endl;
return -EINVAL;
}
- // If the osdmap was present in the metadata we can check for splits.
// Pool verified to exist for call to get_pg_num().
- if (ms.map_epoch < sb.current_epoch) {
- bool found_map = false;
+ unsigned new_pg_num = curmap.get_pg_num(pgid.pgid.pool());
+
+ if (pgid.pgid.ps() >= new_pg_num) {
+ cerr << "Illegal pgid, the seed is larger than current pg_num" << std::endl;
+ return -EINVAL;
+ }
+
+ // Old exports didn't include OSDMap, see if we have a copy locally
+ if (ms.osdmap.get_epoch() == 0) {
OSDMap findmap;
bufferlist findmap_bl;
int ret = get_osdmap(store, ms.map_epoch, findmap, findmap_bl);
- if (ret == 0)
- found_map = true;
-
- // Old export didn't include OSDMap
- if (ms.osdmap.get_epoch() == 0) {
- // If we found the map locally and an older export didn't have it,
- // then we'll use the local one.
- if (found_map) {
- ms.osdmap = findmap;
- } else {
- cerr << "WARNING: No OSDMap in old export,"
- " some objects may be ignored due to a split" << std::endl;
- }
+ if (ret == 0) {
+ ms.osdmap = findmap;
+ } else {
+ cerr << "WARNING: No OSDMap in old export,"
+ " some objects may be ignored due to a split" << std::endl;
}
+ }
+
+ // Make sure old_pg_num is 0 in the unusual case that OSDMap not in export
+ // nor can we find a local copy.
+ unsigned old_pg_num = 0;
+ if (ms.osdmap.get_epoch() != 0)
+ old_pg_num = ms.osdmap.get_pg_num(pgid.pgid.pool());
- // If OSDMap is available check for splits
- if (ms.osdmap.get_epoch()) {
- spg_t parent(ms.info.pgid);
- if (parent.is_split(ms.osdmap.get_pg_num(ms.info.pgid.pgid.m_pool),
- curmap.get_pg_num(ms.info.pgid.pgid.m_pool), NULL)) {
- cerr << "WARNING: Split occurred, some objects may be ignored" << std::endl;
+ if (debug) {
+ cerr << "old_pg_num " << old_pg_num << std::endl;
+ cerr << "new_pg_num " << new_pg_num << std::endl;
+ cerr << ms.osdmap << std::endl;
+ cerr << curmap << std::endl;
+ }
+
+ // If we have managed to have a good OSDMap we can do these checks
+ if (old_pg_num) {
+ if (old_pgid.pgid.ps() >= old_pg_num) {
+ cerr << "FATAL: pgid invalid for original map epoch" << std::endl;
+ return -EFAULT;
+ }
+ if (pgid.pgid.ps() >= old_pg_num) {
+ cout << "NOTICE: Post split pgid specified" << std::endl;
+ } else {
+ spg_t parent(pgid);
+ if (parent.is_split(old_pg_num, new_pg_num, NULL)) {
+ cerr << "WARNING: Split occurred, some objects may be ignored" << std::endl;
}
}
}
}
}
-int do_import(ObjectStore *store, OSDSuperblock& sb, bool force)
+int do_import(ObjectStore *store, OSDSuperblock& sb, bool force, string pgidstr)
{
bufferlist ebl;
pg_info_t info;
pg_begin pgb;
pgb.decode(ebliter);
spg_t pgid = pgb.pgid;
+ spg_t orig_pgid = pgid;
+
+ if (pgidstr.length()) {
+ spg_t user_pgid;
+
+ bool ok = user_pgid.parse(pgidstr.c_str());
+ // This succeeded in main() already
+ assert(ok);
+ if (pgid != user_pgid) {
+ if (pgid.pool() != user_pgid.pool()) {
+ cerr << "Can't specify a different pgid pool, must be " << pgid.pool() << std::endl;
+ return -EINVAL;
+ }
+ pgid = user_pgid;
+ }
+ }
if (!pgb.superblock.cluster_fsid.is_zero()
&& pgb.superblock.cluster_fsid != sb.cluster_fsid) {
delete t;
}
- cout << "Importing pgid " << pgid << std::endl;
+ cout << "Importing pgid " << pgid;
+ if (orig_pgid != pgid) {
+ cout << " exported as " << orig_pgid;
+ }
+ cout << std::endl;
bool done = false;
bool found_metadata = false;
if (ret) return ret;
break;
case TYPE_PG_METADATA:
- ret = get_pg_metadata(store, ebl, ms, sb, curmap);
+ ret = get_pg_metadata(store, ebl, ms, sb, curmap, pgid);
if (ret) return ret;
found_metadata = true;
break;
}
}
- if (op == "import" && pgidstr.length()) {
- cerr << "--pgid option invalid with import" << std::endl;
- myexit(1);
- }
-
if (pgidstr.length() && !pgid.parse(pgidstr.c_str())) {
cerr << "Invalid pgid '" << pgidstr << "' specified" << std::endl;
myexit(1);
if (op == "import") {
try {
- ret = do_import(fs, superblock, force);
+ ret = do_import(fs, superblock, force, pgidstr);
}
catch (const buffer::error &e) {
cerr << "do_import threw exception error " << e.what() << std::endl;