From 2089bf04b9c3b4bb065bf4c0bafa97419f2d87ff Mon Sep 17 00:00:00 2001 From: Kefu Chai Date: Sun, 5 Apr 2020 00:02:40 +0800 Subject: [PATCH] qa/tasks: use "a // b" instead of "a / b" for expressions where the value is expected to be integer. as in python3, `a / b` returns a float. Signed-off-by: Kefu Chai --- qa/tasks/cephfs/cephfs_test_case.py | 2 +- qa/tasks/cephfs/filesystem.py | 2 +- qa/tasks/cephfs/test_client_limits.py | 4 ++-- qa/tasks/cephfs/test_client_recovery.py | 2 +- qa/tasks/cephfs/test_data_scan.py | 12 ++++++------ qa/tasks/cephfs/test_full.py | 10 +++++----- qa/tasks/cephfs/test_strays.py | 6 +++--- qa/tasks/mon_recovery.py | 2 +- qa/tasks/rados.py | 8 ++++---- 9 files changed, 24 insertions(+), 24 deletions(-) diff --git a/qa/tasks/cephfs/cephfs_test_case.py b/qa/tasks/cephfs/cephfs_test_case.py index 4d649ebda0f..d18a0819810 100644 --- a/qa/tasks/cephfs/cephfs_test_case.py +++ b/qa/tasks/cephfs/cephfs_test_case.py @@ -291,7 +291,7 @@ class CephFSTestCase(CephTestCase): timeout = 30 pause = 2 test = sorted(test) - for i in range(timeout/pause): + for i in range(timeout // pause): subtrees = self.fs.mds_asok(["get", "subtrees"], mds_id=status.get_rank(self.fs.id, rank)['name']) subtrees = filter(lambda s: s['dir']['path'].startswith('/'), subtrees) filtered = sorted([(s['dir']['path'], s['auth_first']) for s in subtrees]) diff --git a/qa/tasks/cephfs/filesystem.py b/qa/tasks/cephfs/filesystem.py index dc00e49dee7..0e0094470ae 100644 --- a/qa/tasks/cephfs/filesystem.py +++ b/qa/tasks/cephfs/filesystem.py @@ -1180,7 +1180,7 @@ class Filesystem(MDSCluster): want_objects = [ "{0:x}.{1:08x}".format(ino, n) - for n in range(0, ((size - 1) / stripe_size) + 1) + for n in range(0, ((size - 1) // stripe_size) + 1) ] exist_objects = self.rados(["ls"], pool=self.get_data_pool_name()).split("\n") diff --git a/qa/tasks/cephfs/test_client_limits.py b/qa/tasks/cephfs/test_client_limits.py index 7b496d751e3..6100fbf6dca 100644 --- a/qa/tasks/cephfs/test_client_limits.py +++ b/qa/tasks/cephfs/test_client_limits.py @@ -176,7 +176,7 @@ class TestClientLimits(CephFSTestCase): self.mount_a.create_n_files("testdir/file2", 5, True) # Wait for the health warnings. Assume mds can handle 10 request per second at least - self.wait_for_health("MDS_CLIENT_OLDEST_TID", max_requests / 10) + self.wait_for_health("MDS_CLIENT_OLDEST_TID", max_requests // 10) def _test_client_cache_size(self, mount_subdir): """ @@ -217,7 +217,7 @@ class TestClientLimits(CephFSTestCase): self.assertGreaterEqual(dentry_count, num_dirs) self.assertGreaterEqual(dentry_pinned_count, num_dirs) - cache_size = num_dirs / 10 + cache_size = num_dirs // 10 self.mount_a.set_cache_size(cache_size) def trimmed(): diff --git a/qa/tasks/cephfs/test_client_recovery.py b/qa/tasks/cephfs/test_client_recovery.py index 6cec7ae6c86..2a3e9288e38 100644 --- a/qa/tasks/cephfs/test_client_recovery.py +++ b/qa/tasks/cephfs/test_client_recovery.py @@ -160,7 +160,7 @@ class TestClientRecovery(CephFSTestCase): in_reconnect_for = self.fs.wait_for_state('up:active', timeout=self.mds_reconnect_timeout * 2) # Check that the period we waited to enter active is within a factor # of two of the reconnect timeout. - self.assertGreater(in_reconnect_for, self.mds_reconnect_timeout / 2, + self.assertGreater(in_reconnect_for, self.mds_reconnect_timeout // 2, "Should have been in reconnect phase for {0} but only took {1}".format( self.mds_reconnect_timeout, in_reconnect_for )) diff --git a/qa/tasks/cephfs/test_data_scan.py b/qa/tasks/cephfs/test_data_scan.py index fe099838e38..481ef0e36b4 100644 --- a/qa/tasks/cephfs/test_data_scan.py +++ b/qa/tasks/cephfs/test_data_scan.py @@ -146,13 +146,13 @@ class StripedStashedLayout(Workload): # Exactly stripe_count objects will exist self.os * self.sc, # Fewer than stripe_count objects will exist - self.os * self.sc / 2, - self.os * (self.sc - 1) + self.os / 2, - self.os * (self.sc - 1) + self.os / 2 - 1, - self.os * (self.sc + 1) + self.os / 2, - self.os * (self.sc + 1) + self.os / 2 + 1, + self.os * self.sc // 2, + self.os * (self.sc - 1) + self.os // 2, + self.os * (self.sc - 1) + self.os // 2 - 1, + self.os * (self.sc + 1) + self.os // 2, + self.os * (self.sc + 1) + self.os // 2 + 1, # More than stripe_count objects will exist - self.os * self.sc + self.os * self.sc / 2 + self.os * self.sc + self.os * self.sc // 2 ] def write(self): diff --git a/qa/tasks/cephfs/test_full.py b/qa/tasks/cephfs/test_full.py index 112407de18c..55a1fa61ff6 100644 --- a/qa/tasks/cephfs/test_full.py +++ b/qa/tasks/cephfs/test_full.py @@ -132,9 +132,9 @@ class FullnessTestCase(CephFSTestCase): # Fill up the cluster. This dd may or may not fail, as it depends on # how soon the cluster recognises its own fullness - self.mount_a.write_n_mb("large_file_a", self.fill_mb / 2) + self.mount_a.write_n_mb("large_file_a", self.fill_mb // 2) try: - self.mount_a.write_n_mb("large_file_b", self.fill_mb / 2) + self.mount_a.write_n_mb("large_file_b", self.fill_mb // 2) except CommandFailedError: log.info("Writing file B failed (full status happened already)") assert self.is_full() @@ -145,7 +145,7 @@ class FullnessTestCase(CephFSTestCase): # Attempting to write more data should give me ENOSPC with self.assertRaises(CommandFailedError) as ar: - self.mount_a.write_n_mb("large_file_b", 50, seek=self.fill_mb / 2) + self.mount_a.write_n_mb("large_file_b", 50, seek=self.fill_mb // 2) self.assertEqual(ar.exception.exitstatus, 1) # dd returns 1 on "No space" # Wait for the MDS to see the latest OSD map so that it will reliably @@ -368,7 +368,7 @@ class TestQuotaFull(FullnessTestCase): Test per-pool fullness, which indicates quota limits exceeded """ pool_capacity = 1024 * 1024 * 32 # arbitrary low-ish limit - fill_mb = pool_capacity / (1024 * 1024) # type: ignore + fill_mb = pool_capacity // (1024 * 1024) # type: ignore # We are only testing quota handling on the data pool, not the metadata # pool. @@ -399,7 +399,7 @@ class TestClusterFull(FullnessTestCase): max_avail = self.fs.get_pool_df(self._data_pool_name())['max_avail'] full_ratio = float(self.fs.get_config("mon_osd_full_ratio", service_type="mon")) TestClusterFull.pool_capacity = int(max_avail * full_ratio) - TestClusterFull.fill_mb = (self.pool_capacity / (1024 * 1024)) + TestClusterFull.fill_mb = (self.pool_capacity // (1024 * 1024)) def is_full(self): return self.fs.is_full() diff --git a/qa/tasks/cephfs/test_strays.py b/qa/tasks/cephfs/test_strays.py index e02eca2d68a..51732c195d9 100644 --- a/qa/tasks/cephfs/test_strays.py +++ b/qa/tasks/cephfs/test_strays.py @@ -152,7 +152,7 @@ class TestStrays(CephFSTestCase): os.mkdir(os.path.join(mount_path, subdir)) for i in range(0, file_multiplier): for size in range(0, {size_range}*size_unit, size_unit): - filename = "{{0}}_{{1}}.bin".format(i, size / size_unit) + filename = "{{0}}_{{1}}.bin".format(i, size // size_unit) with open(os.path.join(mount_path, subdir, filename), 'w') as f: f.write(size * 'x') """.format( @@ -237,7 +237,7 @@ class TestStrays(CephFSTestCase): # insanely fast such that the deletions all pass before we have polled the # statistics. if throttle_type == self.OPS_THROTTLE: - if ops_high_water < mds_max_purge_ops / 2: + if ops_high_water < mds_max_purge_ops // 2: raise RuntimeError("Ops in flight high water is unexpectedly low ({0} / {1})".format( ops_high_water, mds_max_purge_ops )) @@ -248,7 +248,7 @@ class TestStrays(CephFSTestCase): # particularly large file/directory. self.assertLessEqual(ops_high_water, mds_max_purge_ops+64) elif throttle_type == self.FILES_THROTTLE: - if files_high_water < mds_max_purge_files / 2: + if files_high_water < mds_max_purge_files // 2: raise RuntimeError("Files in flight high water is unexpectedly low ({0} / {1})".format( files_high_water, mds_max_purge_files )) diff --git a/qa/tasks/mon_recovery.py b/qa/tasks/mon_recovery.py index a4146b1ffb7..fa7aa1a8da2 100644 --- a/qa/tasks/mon_recovery.py +++ b/qa/tasks/mon_recovery.py @@ -55,7 +55,7 @@ def task(ctx, config): manager.kill_mon(m) log.info('forming a minimal quorum for %s, then adding monitors' % mons) - qnum = (len(mons) / 2) + 1 + qnum = (len(mons) // 2) + 1 num = 0 for m in mons: manager.revive_mon(m) diff --git a/qa/tasks/rados.py b/qa/tasks/rados.py index 153f0cfa28a..edce8dc805b 100644 --- a/qa/tasks/rados.py +++ b/qa/tasks/rados.py @@ -164,8 +164,8 @@ def task(ctx, config): '--objects', str(config.get('objects', 500)), '--max-in-flight', str(config.get('max_in_flight', 16)), '--size', str(object_size), - '--min-stride-size', str(config.get('min_stride_size', object_size / 10)), - '--max-stride-size', str(config.get('max_stride_size', object_size / 5)), + '--min-stride-size', str(config.get('min_stride_size', object_size // 10)), + '--max-stride-size', str(config.get('max_stride_size', object_size // 5)), '--max-seconds', str(config.get('max_seconds', 0)) ]) @@ -201,11 +201,11 @@ def task(ctx, config): if config.get('write_append_excl', True): if 'write' in weights: - weights['write'] = weights['write'] / 2 + weights['write'] = weights['write'] // 2 weights['write_excl'] = weights['write'] if 'append' in weights: - weights['append'] = weights['append'] / 2 + weights['append'] = weights['append'] // 2 weights['append_excl'] = weights['append'] for op, weight in weights.items(): -- 2.39.5