]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
qa/tasks: use "a // b" instead of "a / b"
authorKefu Chai <kchai@redhat.com>
Sat, 4 Apr 2020 16:02:40 +0000 (00:02 +0800)
committerKefu Chai <kchai@redhat.com>
Wed, 3 Jun 2020 11:56:59 +0000 (19:56 +0800)
for expressions where the value is expected to be integer. as in
python3, `a / b` returns a float.

Signed-off-by: Kefu Chai <kchai@redhat.com>
(cherry picked from commit 2089bf04b9c3b4bb065bf4c0bafa97419f2d87ff)

Conflicts:
qa/tasks/cephfs/test_full.py
qa/tasks/cephfs/test_strays.py: trivial resolutions

qa/tasks/cephfs/cephfs_test_case.py
qa/tasks/cephfs/filesystem.py
qa/tasks/cephfs/test_client_limits.py
qa/tasks/cephfs/test_client_recovery.py
qa/tasks/cephfs/test_data_scan.py
qa/tasks/cephfs/test_full.py
qa/tasks/cephfs/test_strays.py
qa/tasks/cephfs/test_volume_client.py
qa/tasks/cephfs/test_volumes.py
qa/tasks/mon_recovery.py
qa/tasks/rados.py

index f5aef8d747d2eb8a5d13fccd29194204a2804770..576e3d75332184a1dcae9abd1c98dc7f683ae1fa 100644 (file)
@@ -292,7 +292,7 @@ class CephFSTestCase(CephTestCase):
         timeout = 30
         pause = 2
         test = sorted(test)
-        for i in range(timeout/pause):
+        for i in range(timeout // pause):
             subtrees = self.fs.mds_asok(["get", "subtrees"], mds_id=status.get_rank(self.fs.id, rank)['name'])
             subtrees = filter(lambda s: s['dir']['path'].startswith('/'), subtrees)
             filtered = sorted([(s['dir']['path'], s['auth_first']) for s in subtrees])
index d149387a6984b282f36bcff427f75a62ad6053c6..24051294ca3e735988e890295aa1d9788707dd72 100644 (file)
@@ -1151,7 +1151,7 @@ class Filesystem(MDSCluster):
 
         want_objects = [
             "{0:x}.{1:08x}".format(ino, n)
-            for n in range(0, ((size - 1) / stripe_size) + 1)
+            for n in range(0, ((size - 1) // stripe_size) + 1)
         ]
 
         exist_objects = self.rados(["ls"], pool=self.get_data_pool_name()).split("\n")
index 706f4af0addd3c5d63549c45772fa6df85b778ca..bf4a469ed1e51812719527702bb3e6f93cca9ded 100644 (file)
@@ -39,10 +39,10 @@ class TestClientLimits(CephFSTestCase):
         :param use_subdir: whether to put test files in a subdir or use root
         """
 
-        cache_size = open_files/2
+        cache_size = open_files // 2
 
         self.set_conf('mds', 'mds cache size', cache_size)
-        self.set_conf('mds', 'mds_recall_max_caps', open_files/2)
+        self.set_conf('mds', 'mds_recall_max_caps', open_files // 2)
         self.set_conf('mds', 'mds_recall_warning_threshold', open_files)
         self.fs.mds_fail_restart()
         self.fs.wait_for_daemons()
@@ -174,7 +174,7 @@ class TestClientLimits(CephFSTestCase):
         self.mount_a.create_n_files("testdir/file2", 5, True)
 
         # Wait for the health warnings. Assume mds can handle 10 request per second at least
-        self.wait_for_health("MDS_CLIENT_OLDEST_TID", max_requests / 10)
+        self.wait_for_health("MDS_CLIENT_OLDEST_TID", max_requests // 10)
 
     def _test_client_cache_size(self, mount_subdir):
         """
@@ -215,7 +215,7 @@ class TestClientLimits(CephFSTestCase):
         self.assertGreaterEqual(dentry_count, num_dirs)
         self.assertGreaterEqual(dentry_pinned_count, num_dirs)
 
-        cache_size = num_dirs / 10
+        cache_size = num_dirs // 10
         self.mount_a.set_cache_size(cache_size)
 
         def trimmed():
index ab86ae7a89c7863e36d108c88fdde748a1e5d84c..c7806b71292f305375860d9b8b6630a7a59fdd00 100644 (file)
@@ -161,7 +161,7 @@ class TestClientRecovery(CephFSTestCase):
         in_reconnect_for = self.fs.wait_for_state('up:active', timeout=self.mds_reconnect_timeout * 2)
         # Check that the period we waited to enter active is within a factor
         # of two of the reconnect timeout.
-        self.assertGreater(in_reconnect_for, self.mds_reconnect_timeout / 2,
+        self.assertGreater(in_reconnect_for, self.mds_reconnect_timeout // 2,
                            "Should have been in reconnect phase for {0} but only took {1}".format(
                                self.mds_reconnect_timeout, in_reconnect_for
                            ))
index 512e4754122b0db7373b1f6bd79891e8e3bde6a8..e18c2da522ce9aa463d6ea1024f976102662264c 100644 (file)
@@ -146,13 +146,13 @@ class StripedStashedLayout(Workload):
             # Exactly stripe_count objects will exist
             self.os * self.sc,
             # Fewer than stripe_count objects will exist
-            self.os * self.sc / 2,
-            self.os * (self.sc - 1) + self.os / 2,
-            self.os * (self.sc - 1) + self.os / 2 - 1,
-            self.os * (self.sc + 1) + self.os / 2,
-            self.os * (self.sc + 1) + self.os / 2 + 1,
+            self.os * self.sc // 2,
+            self.os * (self.sc - 1) + self.os // 2,
+            self.os * (self.sc - 1) + self.os // 2 - 1,
+            self.os * (self.sc + 1) + self.os // 2,
+            self.os * (self.sc + 1) + self.os // 2 + 1,
             # More than stripe_count objects will exist
-            self.os * self.sc + self.os * self.sc / 2
+            self.os * self.sc + self.os * self.sc // 2
         ]
 
     def write(self):
index d51e24794e029d3dc2ecb696a35ed3b1cf9864fb..d20325ed25092c23ea57a243092713f61da9ddba 100644 (file)
@@ -122,9 +122,9 @@ class FullnessTestCase(CephFSTestCase):
 
         # Fill up the cluster.  This dd may or may not fail, as it depends on
         # how soon the cluster recognises its own fullness
-        self.mount_a.write_n_mb("large_file_a", self.fill_mb / 2)
+        self.mount_a.write_n_mb("large_file_a", self.fill_mb // 2)
         try:
-            self.mount_a.write_n_mb("large_file_b", self.fill_mb / 2)
+            self.mount_a.write_n_mb("large_file_b", self.fill_mb // 2)
         except CommandFailedError:
             log.info("Writing file B failed (full status happened already)")
             assert self.is_full()
@@ -135,7 +135,7 @@ class FullnessTestCase(CephFSTestCase):
 
         # Attempting to write more data should give me ENOSPC
         with self.assertRaises(CommandFailedError) as ar:
-            self.mount_a.write_n_mb("large_file_b", 50, seek=self.fill_mb / 2)
+            self.mount_a.write_n_mb("large_file_b", 50, seek=self.fill_mb // 2)
         self.assertEqual(ar.exception.exitstatus, 1)  # dd returns 1 on "No space"
 
         # Wait for the MDS to see the latest OSD map so that it will reliably
@@ -358,7 +358,7 @@ class TestQuotaFull(FullnessTestCase):
     Test per-pool fullness, which indicates quota limits exceeded
     """
     pool_capacity = 1024 * 1024 * 32   # arbitrary low-ish limit
-    fill_mb = pool_capacity / (1024 * 1024)
+    fill_mb = pool_capacity // (1024 * 1024)
 
     # We are only testing quota handling on the data pool, not the metadata
     # pool.
@@ -389,7 +389,7 @@ class TestClusterFull(FullnessTestCase):
             max_avail = self.fs.get_pool_df(self._data_pool_name())['max_avail']
             full_ratio = float(self.fs.get_config("mon_osd_full_ratio", service_type="mon"))
             TestClusterFull.pool_capacity = int(max_avail * full_ratio)
-            TestClusterFull.fill_mb = (self.pool_capacity / (1024 * 1024))
+            TestClusterFull.fill_mb = (self.pool_capacity // (1024 * 1024))
 
     def is_full(self):
         return self.fs.is_full()
index d7be983c570be0a1f89346fb6c2be48b6784d663..f518afe79583c42f5fc7be168005f94bd56d94ff 100644 (file)
@@ -152,7 +152,7 @@ class TestStrays(CephFSTestCase):
             os.mkdir(os.path.join(mount_path, subdir))
             for i in range(0, file_multiplier):
                 for size in range(0, {size_range}*size_unit, size_unit):
-                    filename = "{{0}}_{{1}}.bin".format(i, size / size_unit)
+                    filename = "{{0}}_{{1}}.bin".format(i, size // size_unit)
                     with open(os.path.join(mount_path, subdir, filename), 'w') as f:
                         f.write(size * 'x')
         """.format(
@@ -237,7 +237,7 @@ class TestStrays(CephFSTestCase):
         # insanely fast such that the deletions all pass before we have polled the
         # statistics.
         if throttle_type == self.OPS_THROTTLE:
-            if ops_high_water < mds_max_purge_ops / 2:
+            if ops_high_water < mds_max_purge_ops // 2:
                 raise RuntimeError("Ops in flight high water is unexpectedly low ({0} / {1})".format(
                     ops_high_water, mds_max_purge_ops
                 ))
@@ -248,7 +248,7 @@ class TestStrays(CephFSTestCase):
             # particularly large file/directory.
             self.assertLessEqual(ops_high_water, mds_max_purge_ops+64)
         elif throttle_type == self.FILES_THROTTLE:
-            if files_high_water < mds_max_purge_files / 2:
+            if files_high_water < mds_max_purge_files // 2:
                 raise RuntimeError("Files in flight high water is unexpectedly low ({0} / {1})".format(
                     files_high_water, mds_max_purge_files
                 ))
index 8687e910be1ddb8377c7674b0ec5f6b9118b86cb..e31145397521d1fbfa157849dc9149ed2de259c3 100644 (file)
@@ -381,7 +381,7 @@ vc.disconnect()
         for p in osd_map['pools']:
             existing_pg_count += p['pg_num']
 
-        expected_pg_num = (max_overall - existing_pg_count) / 10
+        expected_pg_num = (max_overall - existing_pg_count) // 10
         log.info("max_per_osd {0}".format(max_per_osd))
         log.info("osd_count {0}".format(osd_count))
         log.info("max_overall {0}".format(max_overall))
index 2b94583b7b1f5de9ac79f84f991485c594f0bc79..40af32e09b8258328344fffd3f8acc90dc35657b 100644 (file)
@@ -2120,7 +2120,7 @@ class TestVolumes(CephFSTestCase):
 
         pool_capacity = 32 * 1024 * 1024
         # number of files required to fill up 99% of the pool
-        nr_files = int((pool_capacity * 0.99) / (TestVolumes.DEFAULT_FILE_SIZE * 1024 * 1024))
+        nr_files = int((pool_capacity * 0.99) // (TestVolumes.DEFAULT_FILE_SIZE * 1024 * 1024))
 
         # create subvolume
         self._fs_cmd("subvolume", "create", self.volname, subvolume)
@@ -2139,7 +2139,7 @@ class TestVolumes(CephFSTestCase):
         self.fs.add_data_pool(new_pool)
 
         self.fs.mon_manager.raw_cluster_cmd("osd", "pool", "set-quota", new_pool,
-                                            "max_bytes", "{0}".format(pool_capacity / 4))
+                                            "max_bytes", "{0}".format(pool_capacity // 4))
 
         # schedule a clone
         self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1, "--pool_layout", new_pool)
index a4146b1ffb7ee009c0902ed701d299c394652dd1..fa7aa1a8da2f4539ece7469ebc182bc08c78d175 100644 (file)
@@ -55,7 +55,7 @@ def task(ctx, config):
             manager.kill_mon(m)
 
         log.info('forming a minimal quorum for %s, then adding monitors' % mons)
-        qnum = (len(mons) / 2) + 1
+        qnum = (len(mons) // 2) + 1
         num = 0
         for m in mons:
             manager.revive_mon(m)
index 595c057fd0a5935e7cb697d9c2340c8cb6af90fc..13fe238ad18e8cbd940d039047f6da2d7943bfba 100644 (file)
@@ -158,8 +158,8 @@ def task(ctx, config):
         '--objects', str(config.get('objects', 500)),
         '--max-in-flight', str(config.get('max_in_flight', 16)),
         '--size', str(object_size),
-        '--min-stride-size', str(config.get('min_stride_size', object_size / 10)),
-        '--max-stride-size', str(config.get('max_stride_size', object_size / 5)),
+        '--min-stride-size', str(config.get('min_stride_size', object_size // 10)),
+        '--max-stride-size', str(config.get('max_stride_size', object_size // 5)),
         '--max-seconds', str(config.get('max_seconds', 0))
         ])
 
@@ -195,11 +195,11 @@ def task(ctx, config):
 
     if config.get('write_append_excl', True):
         if 'write' in weights:
-            weights['write'] = weights['write'] / 2
+            weights['write'] = weights['write'] // 2
             weights['write_excl'] = weights['write']
 
         if 'append' in weights:
-            weights['append'] = weights['append'] / 2
+            weights['append'] = weights['append'] // 2
             weights['append_excl'] = weights['append']
 
     for op, weight in weights.items():