From 0184cd7e7af330aff49b0cae73d725fdd95ab5af Mon Sep 17 00:00:00 2001 From: Kefu Chai Date: Sun, 5 Apr 2020 00:02:40 +0800 Subject: [PATCH] qa/tasks: use "a // b" instead of "a / b" for expressions where the value is expected to be integer. as in python3, `a / b` returns a float. Signed-off-by: Kefu Chai (cherry picked from commit 2089bf04b9c3b4bb065bf4c0bafa97419f2d87ff) Conflicts: qa/tasks/cephfs/test_full.py qa/tasks/cephfs/test_strays.py: trivial resolutions --- qa/tasks/cephfs/cephfs_test_case.py | 2 +- qa/tasks/cephfs/filesystem.py | 2 +- qa/tasks/cephfs/test_client_limits.py | 8 ++++---- qa/tasks/cephfs/test_client_recovery.py | 2 +- qa/tasks/cephfs/test_data_scan.py | 12 ++++++------ qa/tasks/cephfs/test_full.py | 10 +++++----- qa/tasks/cephfs/test_strays.py | 6 +++--- qa/tasks/cephfs/test_volume_client.py | 2 +- qa/tasks/cephfs/test_volumes.py | 4 ++-- qa/tasks/mon_recovery.py | 2 +- qa/tasks/rados.py | 8 ++++---- 11 files changed, 29 insertions(+), 29 deletions(-) diff --git a/qa/tasks/cephfs/cephfs_test_case.py b/qa/tasks/cephfs/cephfs_test_case.py index f5aef8d747d2e..576e3d7533218 100644 --- a/qa/tasks/cephfs/cephfs_test_case.py +++ b/qa/tasks/cephfs/cephfs_test_case.py @@ -292,7 +292,7 @@ class CephFSTestCase(CephTestCase): timeout = 30 pause = 2 test = sorted(test) - for i in range(timeout/pause): + for i in range(timeout // pause): subtrees = self.fs.mds_asok(["get", "subtrees"], mds_id=status.get_rank(self.fs.id, rank)['name']) subtrees = filter(lambda s: s['dir']['path'].startswith('/'), subtrees) filtered = sorted([(s['dir']['path'], s['auth_first']) for s in subtrees]) diff --git a/qa/tasks/cephfs/filesystem.py b/qa/tasks/cephfs/filesystem.py index d149387a6984b..24051294ca3e7 100644 --- a/qa/tasks/cephfs/filesystem.py +++ b/qa/tasks/cephfs/filesystem.py @@ -1151,7 +1151,7 @@ class Filesystem(MDSCluster): want_objects = [ "{0:x}.{1:08x}".format(ino, n) - for n in range(0, ((size - 1) / stripe_size) + 1) + for n in range(0, ((size - 1) // stripe_size) + 1) ] exist_objects = self.rados(["ls"], pool=self.get_data_pool_name()).split("\n") diff --git a/qa/tasks/cephfs/test_client_limits.py b/qa/tasks/cephfs/test_client_limits.py index 706f4af0addd3..bf4a469ed1e51 100644 --- a/qa/tasks/cephfs/test_client_limits.py +++ b/qa/tasks/cephfs/test_client_limits.py @@ -39,10 +39,10 @@ class TestClientLimits(CephFSTestCase): :param use_subdir: whether to put test files in a subdir or use root """ - cache_size = open_files/2 + cache_size = open_files // 2 self.set_conf('mds', 'mds cache size', cache_size) - self.set_conf('mds', 'mds_recall_max_caps', open_files/2) + self.set_conf('mds', 'mds_recall_max_caps', open_files // 2) self.set_conf('mds', 'mds_recall_warning_threshold', open_files) self.fs.mds_fail_restart() self.fs.wait_for_daemons() @@ -174,7 +174,7 @@ class TestClientLimits(CephFSTestCase): self.mount_a.create_n_files("testdir/file2", 5, True) # Wait for the health warnings. Assume mds can handle 10 request per second at least - self.wait_for_health("MDS_CLIENT_OLDEST_TID", max_requests / 10) + self.wait_for_health("MDS_CLIENT_OLDEST_TID", max_requests // 10) def _test_client_cache_size(self, mount_subdir): """ @@ -215,7 +215,7 @@ class TestClientLimits(CephFSTestCase): self.assertGreaterEqual(dentry_count, num_dirs) self.assertGreaterEqual(dentry_pinned_count, num_dirs) - cache_size = num_dirs / 10 + cache_size = num_dirs // 10 self.mount_a.set_cache_size(cache_size) def trimmed(): diff --git a/qa/tasks/cephfs/test_client_recovery.py b/qa/tasks/cephfs/test_client_recovery.py index ab86ae7a89c78..c7806b71292f3 100644 --- a/qa/tasks/cephfs/test_client_recovery.py +++ b/qa/tasks/cephfs/test_client_recovery.py @@ -161,7 +161,7 @@ class TestClientRecovery(CephFSTestCase): in_reconnect_for = self.fs.wait_for_state('up:active', timeout=self.mds_reconnect_timeout * 2) # Check that the period we waited to enter active is within a factor # of two of the reconnect timeout. - self.assertGreater(in_reconnect_for, self.mds_reconnect_timeout / 2, + self.assertGreater(in_reconnect_for, self.mds_reconnect_timeout // 2, "Should have been in reconnect phase for {0} but only took {1}".format( self.mds_reconnect_timeout, in_reconnect_for )) diff --git a/qa/tasks/cephfs/test_data_scan.py b/qa/tasks/cephfs/test_data_scan.py index 512e4754122b0..e18c2da522ce9 100644 --- a/qa/tasks/cephfs/test_data_scan.py +++ b/qa/tasks/cephfs/test_data_scan.py @@ -146,13 +146,13 @@ class StripedStashedLayout(Workload): # Exactly stripe_count objects will exist self.os * self.sc, # Fewer than stripe_count objects will exist - self.os * self.sc / 2, - self.os * (self.sc - 1) + self.os / 2, - self.os * (self.sc - 1) + self.os / 2 - 1, - self.os * (self.sc + 1) + self.os / 2, - self.os * (self.sc + 1) + self.os / 2 + 1, + self.os * self.sc // 2, + self.os * (self.sc - 1) + self.os // 2, + self.os * (self.sc - 1) + self.os // 2 - 1, + self.os * (self.sc + 1) + self.os // 2, + self.os * (self.sc + 1) + self.os // 2 + 1, # More than stripe_count objects will exist - self.os * self.sc + self.os * self.sc / 2 + self.os * self.sc + self.os * self.sc // 2 ] def write(self): diff --git a/qa/tasks/cephfs/test_full.py b/qa/tasks/cephfs/test_full.py index d51e24794e029..d20325ed25092 100644 --- a/qa/tasks/cephfs/test_full.py +++ b/qa/tasks/cephfs/test_full.py @@ -122,9 +122,9 @@ class FullnessTestCase(CephFSTestCase): # Fill up the cluster. This dd may or may not fail, as it depends on # how soon the cluster recognises its own fullness - self.mount_a.write_n_mb("large_file_a", self.fill_mb / 2) + self.mount_a.write_n_mb("large_file_a", self.fill_mb // 2) try: - self.mount_a.write_n_mb("large_file_b", self.fill_mb / 2) + self.mount_a.write_n_mb("large_file_b", self.fill_mb // 2) except CommandFailedError: log.info("Writing file B failed (full status happened already)") assert self.is_full() @@ -135,7 +135,7 @@ class FullnessTestCase(CephFSTestCase): # Attempting to write more data should give me ENOSPC with self.assertRaises(CommandFailedError) as ar: - self.mount_a.write_n_mb("large_file_b", 50, seek=self.fill_mb / 2) + self.mount_a.write_n_mb("large_file_b", 50, seek=self.fill_mb // 2) self.assertEqual(ar.exception.exitstatus, 1) # dd returns 1 on "No space" # Wait for the MDS to see the latest OSD map so that it will reliably @@ -358,7 +358,7 @@ class TestQuotaFull(FullnessTestCase): Test per-pool fullness, which indicates quota limits exceeded """ pool_capacity = 1024 * 1024 * 32 # arbitrary low-ish limit - fill_mb = pool_capacity / (1024 * 1024) + fill_mb = pool_capacity // (1024 * 1024) # We are only testing quota handling on the data pool, not the metadata # pool. @@ -389,7 +389,7 @@ class TestClusterFull(FullnessTestCase): max_avail = self.fs.get_pool_df(self._data_pool_name())['max_avail'] full_ratio = float(self.fs.get_config("mon_osd_full_ratio", service_type="mon")) TestClusterFull.pool_capacity = int(max_avail * full_ratio) - TestClusterFull.fill_mb = (self.pool_capacity / (1024 * 1024)) + TestClusterFull.fill_mb = (self.pool_capacity // (1024 * 1024)) def is_full(self): return self.fs.is_full() diff --git a/qa/tasks/cephfs/test_strays.py b/qa/tasks/cephfs/test_strays.py index d7be983c570be..f518afe79583c 100644 --- a/qa/tasks/cephfs/test_strays.py +++ b/qa/tasks/cephfs/test_strays.py @@ -152,7 +152,7 @@ class TestStrays(CephFSTestCase): os.mkdir(os.path.join(mount_path, subdir)) for i in range(0, file_multiplier): for size in range(0, {size_range}*size_unit, size_unit): - filename = "{{0}}_{{1}}.bin".format(i, size / size_unit) + filename = "{{0}}_{{1}}.bin".format(i, size // size_unit) with open(os.path.join(mount_path, subdir, filename), 'w') as f: f.write(size * 'x') """.format( @@ -237,7 +237,7 @@ class TestStrays(CephFSTestCase): # insanely fast such that the deletions all pass before we have polled the # statistics. if throttle_type == self.OPS_THROTTLE: - if ops_high_water < mds_max_purge_ops / 2: + if ops_high_water < mds_max_purge_ops // 2: raise RuntimeError("Ops in flight high water is unexpectedly low ({0} / {1})".format( ops_high_water, mds_max_purge_ops )) @@ -248,7 +248,7 @@ class TestStrays(CephFSTestCase): # particularly large file/directory. self.assertLessEqual(ops_high_water, mds_max_purge_ops+64) elif throttle_type == self.FILES_THROTTLE: - if files_high_water < mds_max_purge_files / 2: + if files_high_water < mds_max_purge_files // 2: raise RuntimeError("Files in flight high water is unexpectedly low ({0} / {1})".format( files_high_water, mds_max_purge_files )) diff --git a/qa/tasks/cephfs/test_volume_client.py b/qa/tasks/cephfs/test_volume_client.py index 8687e910be1dd..e31145397521d 100644 --- a/qa/tasks/cephfs/test_volume_client.py +++ b/qa/tasks/cephfs/test_volume_client.py @@ -381,7 +381,7 @@ vc.disconnect() for p in osd_map['pools']: existing_pg_count += p['pg_num'] - expected_pg_num = (max_overall - existing_pg_count) / 10 + expected_pg_num = (max_overall - existing_pg_count) // 10 log.info("max_per_osd {0}".format(max_per_osd)) log.info("osd_count {0}".format(osd_count)) log.info("max_overall {0}".format(max_overall)) diff --git a/qa/tasks/cephfs/test_volumes.py b/qa/tasks/cephfs/test_volumes.py index 2b94583b7b1f5..40af32e09b825 100644 --- a/qa/tasks/cephfs/test_volumes.py +++ b/qa/tasks/cephfs/test_volumes.py @@ -2120,7 +2120,7 @@ class TestVolumes(CephFSTestCase): pool_capacity = 32 * 1024 * 1024 # number of files required to fill up 99% of the pool - nr_files = int((pool_capacity * 0.99) / (TestVolumes.DEFAULT_FILE_SIZE * 1024 * 1024)) + nr_files = int((pool_capacity * 0.99) // (TestVolumes.DEFAULT_FILE_SIZE * 1024 * 1024)) # create subvolume self._fs_cmd("subvolume", "create", self.volname, subvolume) @@ -2139,7 +2139,7 @@ class TestVolumes(CephFSTestCase): self.fs.add_data_pool(new_pool) self.fs.mon_manager.raw_cluster_cmd("osd", "pool", "set-quota", new_pool, - "max_bytes", "{0}".format(pool_capacity / 4)) + "max_bytes", "{0}".format(pool_capacity // 4)) # schedule a clone self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1, "--pool_layout", new_pool) diff --git a/qa/tasks/mon_recovery.py b/qa/tasks/mon_recovery.py index a4146b1ffb7ee..fa7aa1a8da2f4 100644 --- a/qa/tasks/mon_recovery.py +++ b/qa/tasks/mon_recovery.py @@ -55,7 +55,7 @@ def task(ctx, config): manager.kill_mon(m) log.info('forming a minimal quorum for %s, then adding monitors' % mons) - qnum = (len(mons) / 2) + 1 + qnum = (len(mons) // 2) + 1 num = 0 for m in mons: manager.revive_mon(m) diff --git a/qa/tasks/rados.py b/qa/tasks/rados.py index 595c057fd0a59..13fe238ad18e8 100644 --- a/qa/tasks/rados.py +++ b/qa/tasks/rados.py @@ -158,8 +158,8 @@ def task(ctx, config): '--objects', str(config.get('objects', 500)), '--max-in-flight', str(config.get('max_in_flight', 16)), '--size', str(object_size), - '--min-stride-size', str(config.get('min_stride_size', object_size / 10)), - '--max-stride-size', str(config.get('max_stride_size', object_size / 5)), + '--min-stride-size', str(config.get('min_stride_size', object_size // 10)), + '--max-stride-size', str(config.get('max_stride_size', object_size // 5)), '--max-seconds', str(config.get('max_seconds', 0)) ]) @@ -195,11 +195,11 @@ def task(ctx, config): if config.get('write_append_excl', True): if 'write' in weights: - weights['write'] = weights['write'] / 2 + weights['write'] = weights['write'] // 2 weights['write_excl'] = weights['write'] if 'append' in weights: - weights['append'] = weights['append'] / 2 + weights['append'] = weights['append'] // 2 weights['append_excl'] = weights['append'] for op, weight in weights.items(): -- 2.39.5