From 683153c56e4db9ac5d17dbdcc6b741a27fb37062 Mon Sep 17 00:00:00 2001 From: Patrick Donnelly Date: Wed, 24 Jun 2020 10:26:16 -0700 Subject: [PATCH] qa: fix flake8 warnings Signed-off-by: Patrick Donnelly --- qa/tasks/cephfs/cephfs_test_case.py | 1 - qa/tasks/cephfs/filesystem.py | 2 +- qa/tasks/cephfs/test_exports.py | 17 ++++++++--------- qa/tasks/cephfs/test_strays.py | 2 +- qa/tasks/cephfs/test_volumes.py | 4 ++-- 5 files changed, 12 insertions(+), 14 deletions(-) diff --git a/qa/tasks/cephfs/cephfs_test_case.py b/qa/tasks/cephfs/cephfs_test_case.py index 1c206dc592ec0..0dc2a6a937031 100644 --- a/qa/tasks/cephfs/cephfs_test_case.py +++ b/qa/tasks/cephfs/cephfs_test_case.py @@ -1,4 +1,3 @@ -import time import json import logging from tasks.ceph_test_case import CephTestCase diff --git a/qa/tasks/cephfs/filesystem.py b/qa/tasks/cephfs/filesystem.py index 29c168a4953a7..3ec21125490ca 100644 --- a/qa/tasks/cephfs/filesystem.py +++ b/qa/tasks/cephfs/filesystem.py @@ -232,7 +232,7 @@ class CephCluster(object): log.debug(f"_json_asok output\n{pretty}") return j else: - log.debug(f"_json_asok output empty") + log.debug("_json_asok output empty") return None diff --git a/qa/tasks/cephfs/test_exports.py b/qa/tasks/cephfs/test_exports.py index 3e005dfbde12c..3cced538de093 100644 --- a/qa/tasks/cephfs/test_exports.py +++ b/qa/tasks/cephfs/test_exports.py @@ -1,10 +1,9 @@ import logging import random import time -import unittest from tasks.cephfs.fuse_mount import FuseMount from tasks.cephfs.cephfs_test_case import CephFSTestCase -from teuthology.orchestra.run import CommandFailedError, Raw +from teuthology.orchestra.run import CommandFailedError log = logging.getLogger(__name__) @@ -147,8 +146,8 @@ class TestExportPin(CephFSTestCase): That the export pin does not prevent empty (nothing in cache) subtree merging. """ - self.mount_a.setfattr(f"1", "ceph.dir.pin", "0") - self.mount_a.setfattr(f"1/2", "ceph.dir.pin", "1") + self.mount_a.setfattr("1", "ceph.dir.pin", "0") + self.mount_a.setfattr("1/2", "ceph.dir.pin", "1") self._wait_subtrees([('/1', 0), ('/1/2', 1)], status=self.status) self.mount_a.umount_wait() # release all caps def _drop(): @@ -167,7 +166,7 @@ class TestEphemeralPins(CephFSTestCase): self.config_set('mds', 'mds_export_ephemeral_distributed', True) self.config_set('mds', 'mds_export_ephemeral_random_max', 1.0) - self.mount_a.run_shell_payload(f""" + self.mount_a.run_shell_payload(""" set -e # Use up a random number of inode numbers so the ephemeral pinning is not the same every test. @@ -309,8 +308,8 @@ done # pin /tree so it does not export during failover self._setup_tree(distributed=True, export=0) - subtrees = self._wait_distributed_subtrees(100, status=self.status, rank="all") - test = [(s['dir']['path'], s['auth_first']) for s in subtrees] + self._wait_distributed_subtrees(100, status=self.status, rank="all") + #test = [(s['dir']['path'], s['auth_first']) for s in subtrees] before = self.fs.ranks_perf(lambda p: p['mds']['exported']) log.info(f"export stats: {before}") self.fs.rank_fail(rank=1) @@ -405,7 +404,7 @@ done self._setup_tree(count=0, random=1.0) self._setup_tree(path="tree/pin", count=count) self._wait_random_subtrees(count+1, status=self.status, rank="all") - self.mount_a.setfattr(f"tree/pin", "ceph.dir.pin", "1") + self.mount_a.setfattr("tree/pin", "ceph.dir.pin", "1") self._wait_subtrees([("/tree/pin", 1)], status=self.status, rank=1, path="/tree/pin") def test_ephemeral_randomness(self): @@ -428,7 +427,7 @@ done count = 100 self._setup_tree(count=count, random=1.0) - subtrees = self._wait_random_subtrees(count, status=self.status, rank="all") + self._wait_random_subtrees(count, status=self.status, rank="all") self.mount_a.umount_wait() # release all caps def _drop(): self.fs.ranks_tell(["cache", "drop"], status=self.status) diff --git a/qa/tasks/cephfs/test_strays.py b/qa/tasks/cephfs/test_strays.py index f5c3cc2a1d7a7..4dd70d3ee05d8 100644 --- a/qa/tasks/cephfs/test_strays.py +++ b/qa/tasks/cephfs/test_strays.py @@ -614,7 +614,7 @@ ln dir_1/original dir_2/linkto # Shut down rank 1 self.fs.set_max_mds(1) - status = self.fs.wait_for_daemons(timeout=120) + self.fs.wait_for_daemons(timeout=120) # See that the stray counter on rank 0 has incremented self.assertEqual(self.get_mdc_stat("strays_created", rank_0_id), 1) diff --git a/qa/tasks/cephfs/test_volumes.py b/qa/tasks/cephfs/test_volumes.py index 97b8c9b2664a2..c8eac670e7751 100644 --- a/qa/tasks/cephfs/test_volumes.py +++ b/qa/tasks/cephfs/test_volumes.py @@ -601,7 +601,7 @@ class TestVolumes(CephFSTestCase): path = self._fs_cmd("subvolume", "getpath", self.volname, subvolume) path = os.path.dirname(path) # get subvolume path - subtrees = self._get_subtrees(status=status, rank=1) + self._get_subtrees(status=status, rank=1) self._wait_subtrees([(path, 1)], status=status) def test_subvolumegroup_pin_distributed(self): @@ -621,7 +621,7 @@ class TestVolumes(CephFSTestCase): def test_subvolume_pin_random(self): self.fs.set_max_mds(2) - status = self.fs.wait_for_daemons() + self.fs.wait_for_daemons() self.config_set('mds', 'mds_export_ephemeral_random', True) subvolume = self._generate_random_subvolume_name() -- 2.39.5