]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
qa/tasks: use "a // b" instead of "a / b"
authorKefu Chai <kchai@redhat.com>
Sat, 4 Apr 2020 16:02:40 +0000 (00:02 +0800)
committerKefu Chai <kchai@redhat.com>
Sun, 14 Jun 2020 08:34:51 +0000 (16:34 +0800)
for expressions where the value is expected to be integer. as in
python3, `a / b` returns a float.

Signed-off-by: Kefu Chai <kchai@redhat.com>
(cherry picked from commit 2089bf04b9c3b4bb065bf4c0bafa97419f2d87ff)

qa/tasks/cephfs/cephfs_test_case.py
qa/tasks/cephfs/filesystem.py
qa/tasks/cephfs/test_client_limits.py
qa/tasks/cephfs/test_client_recovery.py
qa/tasks/cephfs/test_data_scan.py
qa/tasks/cephfs/test_full.py
qa/tasks/cephfs/test_strays.py
qa/tasks/mon_recovery.py
qa/tasks/rados.py

index fb0483a190d2f7587d914ca8f5cafa3b39c65350..4447e77422e2151a4c810d71542203a7fc0ad9d6 100644 (file)
@@ -292,7 +292,7 @@ class CephFSTestCase(CephTestCase):
         timeout = 30
         pause = 2
         test = sorted(test)
-        for i in range(timeout/pause):
+        for i in range(timeout // pause):
             subtrees = self.fs.mds_asok(["get", "subtrees"], mds_id=status.get_rank(self.fs.id, rank)['name'])
             subtrees = filter(lambda s: s['dir']['path'].startswith('/'), subtrees)
             filtered = sorted([(s['dir']['path'], s['auth_first']) for s in subtrees])
index e3b71fe7b31a1eaaeb304e5ba06f069052925e51..348cd55704d945aaca7382d7c7ab4ffcae21feb3 100644 (file)
@@ -1180,7 +1180,7 @@ class Filesystem(MDSCluster):
 
         want_objects = [
             "{0:x}.{1:08x}".format(ino, n)
-            for n in range(0, ((size - 1) / stripe_size) + 1)
+            for n in range(0, ((size - 1) // stripe_size) + 1)
         ]
 
         exist_objects = self.rados(["ls"], pool=self.get_data_pool_name()).split("\n")
index 7b496d751e3bca7a7286b6bb0cfd32c839042dde..6100fbf6dcaefa2bf11c7af212c7f613f797e1fd 100644 (file)
@@ -176,7 +176,7 @@ class TestClientLimits(CephFSTestCase):
         self.mount_a.create_n_files("testdir/file2", 5, True)
 
         # Wait for the health warnings. Assume mds can handle 10 request per second at least
-        self.wait_for_health("MDS_CLIENT_OLDEST_TID", max_requests / 10)
+        self.wait_for_health("MDS_CLIENT_OLDEST_TID", max_requests // 10)
 
     def _test_client_cache_size(self, mount_subdir):
         """
@@ -217,7 +217,7 @@ class TestClientLimits(CephFSTestCase):
         self.assertGreaterEqual(dentry_count, num_dirs)
         self.assertGreaterEqual(dentry_pinned_count, num_dirs)
 
-        cache_size = num_dirs / 10
+        cache_size = num_dirs // 10
         self.mount_a.set_cache_size(cache_size)
 
         def trimmed():
index e18fe997e4d1b0e48b91f4875eea42a5987caddf..05541583f2f473291e7b91bc2e4367444542e81f 100644 (file)
@@ -160,7 +160,7 @@ class TestClientRecovery(CephFSTestCase):
         in_reconnect_for = self.fs.wait_for_state('up:active', timeout=self.mds_reconnect_timeout * 2)
         # Check that the period we waited to enter active is within a factor
         # of two of the reconnect timeout.
-        self.assertGreater(in_reconnect_for, self.mds_reconnect_timeout / 2,
+        self.assertGreater(in_reconnect_for, self.mds_reconnect_timeout // 2,
                            "Should have been in reconnect phase for {0} but only took {1}".format(
                                self.mds_reconnect_timeout, in_reconnect_for
                            ))
index 512e4754122b0db7373b1f6bd79891e8e3bde6a8..e18c2da522ce9aa463d6ea1024f976102662264c 100644 (file)
@@ -146,13 +146,13 @@ class StripedStashedLayout(Workload):
             # Exactly stripe_count objects will exist
             self.os * self.sc,
             # Fewer than stripe_count objects will exist
-            self.os * self.sc / 2,
-            self.os * (self.sc - 1) + self.os / 2,
-            self.os * (self.sc - 1) + self.os / 2 - 1,
-            self.os * (self.sc + 1) + self.os / 2,
-            self.os * (self.sc + 1) + self.os / 2 + 1,
+            self.os * self.sc // 2,
+            self.os * (self.sc - 1) + self.os // 2,
+            self.os * (self.sc - 1) + self.os // 2 - 1,
+            self.os * (self.sc + 1) + self.os // 2,
+            self.os * (self.sc + 1) + self.os // 2 + 1,
             # More than stripe_count objects will exist
-            self.os * self.sc + self.os * self.sc / 2
+            self.os * self.sc + self.os * self.sc // 2
         ]
 
     def write(self):
index 85b5ac200255f9957988a04549d220d115b41804..eaa36c7c9d62fbc9384c357a850e7587e254cdb3 100644 (file)
@@ -125,9 +125,9 @@ class FullnessTestCase(CephFSTestCase):
 
         # Fill up the cluster.  This dd may or may not fail, as it depends on
         # how soon the cluster recognises its own fullness
-        self.mount_a.write_n_mb("large_file_a", self.fill_mb / 2)
+        self.mount_a.write_n_mb("large_file_a", self.fill_mb // 2)
         try:
-            self.mount_a.write_n_mb("large_file_b", self.fill_mb / 2)
+            self.mount_a.write_n_mb("large_file_b", self.fill_mb // 2)
         except CommandFailedError:
             log.info("Writing file B failed (full status happened already)")
             assert self.is_full()
@@ -138,7 +138,7 @@ class FullnessTestCase(CephFSTestCase):
 
         # Attempting to write more data should give me ENOSPC
         with self.assertRaises(CommandFailedError) as ar:
-            self.mount_a.write_n_mb("large_file_b", 50, seek=self.fill_mb / 2)
+            self.mount_a.write_n_mb("large_file_b", 50, seek=self.fill_mb // 2)
         self.assertEqual(ar.exception.exitstatus, 1)  # dd returns 1 on "No space"
 
         # Wait for the MDS to see the latest OSD map so that it will reliably
@@ -361,7 +361,7 @@ class TestQuotaFull(FullnessTestCase):
     Test per-pool fullness, which indicates quota limits exceeded
     """
     pool_capacity = 1024 * 1024 * 32  # arbitrary low-ish limit
-    fill_mb = pool_capacity / (1024 * 1024)  # type: ignore
+    fill_mb = pool_capacity // (1024 * 1024)  # type: ignore
 
     # We are only testing quota handling on the data pool, not the metadata
     # pool.
@@ -392,7 +392,7 @@ class TestClusterFull(FullnessTestCase):
             max_avail = self.fs.get_pool_df(self._data_pool_name())['max_avail']
             full_ratio = float(self.fs.get_config("mon_osd_full_ratio", service_type="mon"))
             TestClusterFull.pool_capacity = int(max_avail * full_ratio)
-            TestClusterFull.fill_mb = (self.pool_capacity / (1024 * 1024))
+            TestClusterFull.fill_mb = (self.pool_capacity // (1024 * 1024))
 
     def is_full(self):
         return self.fs.is_full()
index e02eca2d68a1f61c2da7c975df85ffc7834b2434..51732c195d96b20c5f06798366b3af9ac72bf044 100644 (file)
@@ -152,7 +152,7 @@ class TestStrays(CephFSTestCase):
             os.mkdir(os.path.join(mount_path, subdir))
             for i in range(0, file_multiplier):
                 for size in range(0, {size_range}*size_unit, size_unit):
-                    filename = "{{0}}_{{1}}.bin".format(i, size / size_unit)
+                    filename = "{{0}}_{{1}}.bin".format(i, size // size_unit)
                     with open(os.path.join(mount_path, subdir, filename), 'w') as f:
                         f.write(size * 'x')
         """.format(
@@ -237,7 +237,7 @@ class TestStrays(CephFSTestCase):
         # insanely fast such that the deletions all pass before we have polled the
         # statistics.
         if throttle_type == self.OPS_THROTTLE:
-            if ops_high_water < mds_max_purge_ops / 2:
+            if ops_high_water < mds_max_purge_ops // 2:
                 raise RuntimeError("Ops in flight high water is unexpectedly low ({0} / {1})".format(
                     ops_high_water, mds_max_purge_ops
                 ))
@@ -248,7 +248,7 @@ class TestStrays(CephFSTestCase):
             # particularly large file/directory.
             self.assertLessEqual(ops_high_water, mds_max_purge_ops+64)
         elif throttle_type == self.FILES_THROTTLE:
-            if files_high_water < mds_max_purge_files / 2:
+            if files_high_water < mds_max_purge_files // 2:
                 raise RuntimeError("Files in flight high water is unexpectedly low ({0} / {1})".format(
                     files_high_water, mds_max_purge_files
                 ))
index e09e9877b571f2848edfda6e383f4663ee2405de..511973f34d4e8b2633198338e69481b48247d313 100644 (file)
@@ -55,7 +55,7 @@ def task(ctx, config):
             manager.kill_mon(m)
 
         log.info('forming a minimal quorum for %s, then adding monitors' % mons)
-        qnum = (len(mons) / 2) + 1
+        qnum = (len(mons) // 2) + 1
         num = 0
         for m in mons:
             manager.revive_mon(m)
index 153f0cfa28af302a3cf6bfa9f4d01548246b85b4..edce8dc805bf969b4bd10b36afe16bdb3c8302f2 100644 (file)
@@ -164,8 +164,8 @@ def task(ctx, config):
         '--objects', str(config.get('objects', 500)),
         '--max-in-flight', str(config.get('max_in_flight', 16)),
         '--size', str(object_size),
-        '--min-stride-size', str(config.get('min_stride_size', object_size / 10)),
-        '--max-stride-size', str(config.get('max_stride_size', object_size / 5)),
+        '--min-stride-size', str(config.get('min_stride_size', object_size // 10)),
+        '--max-stride-size', str(config.get('max_stride_size', object_size // 5)),
         '--max-seconds', str(config.get('max_seconds', 0))
         ])
 
@@ -201,11 +201,11 @@ def task(ctx, config):
 
     if config.get('write_append_excl', True):
         if 'write' in weights:
-            weights['write'] = weights['write'] / 2
+            weights['write'] = weights['write'] // 2
             weights['write_excl'] = weights['write']
 
         if 'append' in weights:
-            weights['append'] = weights['append'] / 2
+            weights['append'] = weights['append'] // 2
             weights['append_excl'] = weights['append']
 
     for op, weight in weights.items():