timeout = 30
pause = 2
test = sorted(test)
- for i in range(timeout/pause):
+ for i in range(timeout // pause):
subtrees = self.fs.mds_asok(["get", "subtrees"], mds_id=status.get_rank(self.fs.id, rank)['name'])
subtrees = filter(lambda s: s['dir']['path'].startswith('/'), subtrees)
filtered = sorted([(s['dir']['path'], s['auth_first']) for s in subtrees])
want_objects = [
"{0:x}.{1:08x}".format(ino, n)
- for n in range(0, ((size - 1) / stripe_size) + 1)
+ for n in range(0, ((size - 1) // stripe_size) + 1)
]
exist_objects = self.rados(["ls"], pool=self.get_data_pool_name()).split("\n")
:param use_subdir: whether to put test files in a subdir or use root
"""
- cache_size = open_files/2
+ cache_size = open_files // 2
self.set_conf('mds', 'mds cache size', cache_size)
- self.set_conf('mds', 'mds_recall_max_caps', open_files/2)
+ self.set_conf('mds', 'mds_recall_max_caps', open_files // 2)
self.set_conf('mds', 'mds_recall_warning_threshold', open_files)
self.fs.mds_fail_restart()
self.fs.wait_for_daemons()
self.mount_a.create_n_files("testdir/file2", 5, True)
# Wait for the health warnings. Assume mds can handle 10 request per second at least
- self.wait_for_health("MDS_CLIENT_OLDEST_TID", max_requests / 10)
+ self.wait_for_health("MDS_CLIENT_OLDEST_TID", max_requests // 10)
def _test_client_cache_size(self, mount_subdir):
"""
self.assertGreaterEqual(dentry_count, num_dirs)
self.assertGreaterEqual(dentry_pinned_count, num_dirs)
- cache_size = num_dirs / 10
+ cache_size = num_dirs // 10
self.mount_a.set_cache_size(cache_size)
def trimmed():
in_reconnect_for = self.fs.wait_for_state('up:active', timeout=self.mds_reconnect_timeout * 2)
# Check that the period we waited to enter active is within a factor
# of two of the reconnect timeout.
- self.assertGreater(in_reconnect_for, self.mds_reconnect_timeout / 2,
+ self.assertGreater(in_reconnect_for, self.mds_reconnect_timeout // 2,
"Should have been in reconnect phase for {0} but only took {1}".format(
self.mds_reconnect_timeout, in_reconnect_for
))
# Exactly stripe_count objects will exist
self.os * self.sc,
# Fewer than stripe_count objects will exist
- self.os * self.sc / 2,
- self.os * (self.sc - 1) + self.os / 2,
- self.os * (self.sc - 1) + self.os / 2 - 1,
- self.os * (self.sc + 1) + self.os / 2,
- self.os * (self.sc + 1) + self.os / 2 + 1,
+ self.os * self.sc // 2,
+ self.os * (self.sc - 1) + self.os // 2,
+ self.os * (self.sc - 1) + self.os // 2 - 1,
+ self.os * (self.sc + 1) + self.os // 2,
+ self.os * (self.sc + 1) + self.os // 2 + 1,
# More than stripe_count objects will exist
- self.os * self.sc + self.os * self.sc / 2
+ self.os * self.sc + self.os * self.sc // 2
]
def write(self):
# Fill up the cluster. This dd may or may not fail, as it depends on
# how soon the cluster recognises its own fullness
- self.mount_a.write_n_mb("large_file_a", self.fill_mb / 2)
+ self.mount_a.write_n_mb("large_file_a", self.fill_mb // 2)
try:
- self.mount_a.write_n_mb("large_file_b", self.fill_mb / 2)
+ self.mount_a.write_n_mb("large_file_b", self.fill_mb // 2)
except CommandFailedError:
log.info("Writing file B failed (full status happened already)")
assert self.is_full()
# Attempting to write more data should give me ENOSPC
with self.assertRaises(CommandFailedError) as ar:
- self.mount_a.write_n_mb("large_file_b", 50, seek=self.fill_mb / 2)
+ self.mount_a.write_n_mb("large_file_b", 50, seek=self.fill_mb // 2)
self.assertEqual(ar.exception.exitstatus, 1) # dd returns 1 on "No space"
# Wait for the MDS to see the latest OSD map so that it will reliably
Test per-pool fullness, which indicates quota limits exceeded
"""
pool_capacity = 1024 * 1024 * 32 # arbitrary low-ish limit
- fill_mb = pool_capacity / (1024 * 1024)
+ fill_mb = pool_capacity // (1024 * 1024)
# We are only testing quota handling on the data pool, not the metadata
# pool.
max_avail = self.fs.get_pool_df(self._data_pool_name())['max_avail']
full_ratio = float(self.fs.get_config("mon_osd_full_ratio", service_type="mon"))
TestClusterFull.pool_capacity = int(max_avail * full_ratio)
- TestClusterFull.fill_mb = (self.pool_capacity / (1024 * 1024))
+ TestClusterFull.fill_mb = (self.pool_capacity // (1024 * 1024))
def is_full(self):
return self.fs.is_full()
os.mkdir(os.path.join(mount_path, subdir))
for i in range(0, file_multiplier):
for size in range(0, {size_range}*size_unit, size_unit):
- filename = "{{0}}_{{1}}.bin".format(i, size / size_unit)
+ filename = "{{0}}_{{1}}.bin".format(i, size // size_unit)
with open(os.path.join(mount_path, subdir, filename), 'w') as f:
f.write(size * 'x')
""".format(
# insanely fast such that the deletions all pass before we have polled the
# statistics.
if throttle_type == self.OPS_THROTTLE:
- if ops_high_water < mds_max_purge_ops / 2:
+ if ops_high_water < mds_max_purge_ops // 2:
raise RuntimeError("Ops in flight high water is unexpectedly low ({0} / {1})".format(
ops_high_water, mds_max_purge_ops
))
# particularly large file/directory.
self.assertLessEqual(ops_high_water, mds_max_purge_ops+64)
elif throttle_type == self.FILES_THROTTLE:
- if files_high_water < mds_max_purge_files / 2:
+ if files_high_water < mds_max_purge_files // 2:
raise RuntimeError("Files in flight high water is unexpectedly low ({0} / {1})".format(
files_high_water, mds_max_purge_files
))
for p in osd_map['pools']:
existing_pg_count += p['pg_num']
- expected_pg_num = (max_overall - existing_pg_count) / 10
+ expected_pg_num = (max_overall - existing_pg_count) // 10
log.info("max_per_osd {0}".format(max_per_osd))
log.info("osd_count {0}".format(osd_count))
log.info("max_overall {0}".format(max_overall))
pool_capacity = 32 * 1024 * 1024
# number of files required to fill up 99% of the pool
- nr_files = int((pool_capacity * 0.99) / (TestVolumes.DEFAULT_FILE_SIZE * 1024 * 1024))
+ nr_files = int((pool_capacity * 0.99) // (TestVolumes.DEFAULT_FILE_SIZE * 1024 * 1024))
# create subvolume
self._fs_cmd("subvolume", "create", self.volname, subvolume)
self.fs.add_data_pool(new_pool)
self.fs.mon_manager.raw_cluster_cmd("osd", "pool", "set-quota", new_pool,
- "max_bytes", "{0}".format(pool_capacity / 4))
+ "max_bytes", "{0}".format(pool_capacity // 4))
# schedule a clone
self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1, "--pool_layout", new_pool)
manager.kill_mon(m)
log.info('forming a minimal quorum for %s, then adding monitors' % mons)
- qnum = (len(mons) / 2) + 1
+ qnum = (len(mons) // 2) + 1
num = 0
for m in mons:
manager.revive_mon(m)
'--objects', str(config.get('objects', 500)),
'--max-in-flight', str(config.get('max_in_flight', 16)),
'--size', str(object_size),
- '--min-stride-size', str(config.get('min_stride_size', object_size / 10)),
- '--max-stride-size', str(config.get('max_stride_size', object_size / 5)),
+ '--min-stride-size', str(config.get('min_stride_size', object_size // 10)),
+ '--max-stride-size', str(config.get('max_stride_size', object_size // 5)),
'--max-seconds', str(config.get('max_seconds', 0))
])
if config.get('write_append_excl', True):
if 'write' in weights:
- weights['write'] = weights['write'] / 2
+ weights['write'] = weights['write'] // 2
weights['write_excl'] = weights['write']
if 'append' in weights:
- weights['append'] = weights['append'] / 2
+ weights['append'] = weights['append'] // 2
weights['append_excl'] = weights['append']
for op, weight in weights.items():