From: Aashish Sharma Date: Tue, 14 Jul 2020 04:58:57 +0000 (+0530) Subject: mgr/dashboard:minimize console log traces of Dashboard backend API tests X-Git-Tag: v14.2.17~30^2 X-Git-Url: http://git.apps.os.sepia.ceph.com/?a=commitdiff_plain;h=1b3ad4993e817f9a5619e219422d856435807b48;p=ceph.git mgr/dashboard:minimize console log traces of Dashboard backend API tests Changed log levels in some files to reduce console log traces Fixes: https://tracker.ceph.com/issues/45822 Signed-off-by: Aashish Sharma (cherry picked from commit f5dd0195cd45b6e00e43ef97e209e565e6452777) --- diff --git a/qa/tasks/cephfs/filesystem.py b/qa/tasks/cephfs/filesystem.py index 09f44a1f5f8e0..c5531f94212d4 100644 --- a/qa/tasks/cephfs/filesystem.py +++ b/qa/tasks/cephfs/filesystem.py @@ -484,7 +484,7 @@ class Filesystem(MDSCluster): while count > max_mds: targets = sorted(self.get_ranks(status=status), key=lambda r: r['rank'], reverse=True) target = targets[0] - log.info("deactivating rank %d" % target['rank']) + log.debug("deactivating rank %d" % target['rank']) self.deactivate(target['rank']) status = self.wait_for_daemons(skip_max_mds_check=True) count = len(list(self.get_ranks(status=status))) @@ -538,7 +538,7 @@ class Filesystem(MDSCluster): else: data_pool_name = self.data_pool_name - log.info("Creating filesystem '{0}'".format(self.name)) + log.debug("Creating filesystem '{0}'".format(self.name)) self.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', self.metadata_pool_name, self.pgs_per_fs_pool.__str__()) @@ -548,7 +548,7 @@ class Filesystem(MDSCluster): '--allow-dangerous-metadata-overlay') else: if self.ec_profile and 'disabled' not in self.ec_profile: - log.info("EC profile is %s", self.ec_profile) + log.debug("EC profile is %s", self.ec_profile) cmd = ['osd', 'erasure-code-profile', 'set', data_pool_name] cmd.extend(self.ec_profile) self.mon_manager.raw_cluster_cmd(*cmd) @@ -749,7 +749,7 @@ class Filesystem(MDSCluster): else: raise - log.info("are_daemons_healthy: mds map: {0}".format(mds_map)) + log.debug("are_daemons_healthy: mds map: {0}".format(mds_map)) for mds_id, mds_status in mds_map['info'].items(): if mds_status['state'] not in ["up:active", "up:standby", "up:standby-replay"]: @@ -758,13 +758,13 @@ class Filesystem(MDSCluster): elif mds_status['state'] == 'up:active': active_count += 1 - log.info("are_daemons_healthy: {0}/{1}".format( + log.debug("are_daemons_healthy: {0}/{1}".format( active_count, mds_map['max_mds'] )) if not skip_max_mds_check: if active_count > mds_map['max_mds']: - log.info("are_daemons_healthy: number of actives is greater than max_mds: {0}".format(mds_map)) + log.debug("are_daemons_healthy: number of actives is greater than max_mds: {0}".format(mds_map)) return False elif active_count == mds_map['max_mds']: # The MDSMap says these guys are active, but let's check they really are @@ -788,7 +788,7 @@ class Filesystem(MDSCluster): else: return False else: - log.info("are_daemons_healthy: skipping max_mds check") + log.debug("are_daemons_healthy: skipping max_mds check") return True def get_daemon_names(self, state=None, status=None): @@ -897,7 +897,7 @@ class Filesystem(MDSCluster): elapsed += 1 if elapsed > timeout: - log.info("status = {0}".format(status)) + log.debug("status = {0}".format(status)) raise RuntimeError("Timed out waiting for MDS daemons to become healthy") status = self.status() @@ -979,7 +979,7 @@ class Filesystem(MDSCluster): journal_header_dump = self.get_metadata_object('Journaler::Header', journal_header_object) version = journal_header_dump['journal_header']['stream_format'] - log.info("Read journal version {0}".format(version)) + log.debug("Read journal version {0}".format(version)) return version @@ -1028,11 +1028,11 @@ class Filesystem(MDSCluster): try: mds_info = status.get_rank(self.id, rank) current_state = mds_info['state'] if mds_info else None - log.info("Looked up MDS state for mds.{0}: {1}".format(rank, current_state)) + log.debug("Looked up MDS state for mds.{0}: {1}".format(rank, current_state)) except: mdsmap = self.get_mds_map(status=status) if rank in mdsmap['failed']: - log.info("Waiting for rank {0} to come back.".format(rank)) + log.debug("Waiting for rank {0} to come back.".format(rank)) current_state = None else: raise @@ -1040,7 +1040,7 @@ class Filesystem(MDSCluster): # mds_info is None if no daemon with this ID exists in the map mds_info = status.get_mds(mds_id) current_state = mds_info['state'] if mds_info else None - log.info("Looked up MDS state for {0}: {1}".format(mds_id, current_state)) + log.debug("Looked up MDS state for {0}: {1}".format(mds_id, current_state)) else: # In general, look for a single MDS states = [m['state'] for m in status.get_ranks(self.id)] @@ -1050,11 +1050,11 @@ class Filesystem(MDSCluster): current_state = reject else: current_state = None - log.info("mapped states {0} to {1}".format(states, current_state)) + log.debug("mapped states {0} to {1}".format(states, current_state)) elapsed = time.time() - started_at if current_state == goal_state: - log.info("reached state '{0}' in {1}s".format(current_state, elapsed)) + log.debug("reached state '{0}' in {1}s".format(current_state, elapsed)) return elapsed elif reject is not None and current_state == reject: raise RuntimeError("MDS in reject state {0}".format(current_state)) @@ -1187,12 +1187,12 @@ class Filesystem(MDSCluster): missing = set(want_objects) - set(exist_objects) if missing: - log.info("Objects missing (ino {0}, size {1}): {2}".format( + log.debug("Objects missing (ino {0}, size {1}): {2}".format( ino, size, missing )) return False else: - log.info("All objects for ino {0} size {1} found".format(ino, size)) + log.debug("All objects for ino {0} size {1} found".format(ino, size)) return True def data_objects_absent(self, ino, size): @@ -1200,12 +1200,12 @@ class Filesystem(MDSCluster): present = set(want_objects) & set(exist_objects) if present: - log.info("Objects not absent (ino {0}, size {1}): {2}".format( + log.debug("Objects not absent (ino {0}, size {1}): {2}".format( ino, size, present )) return False else: - log.info("All objects for ino {0} size {1} are absent".format(ino, size)) + log.debug("All objects for ino {0} size {1} are absent".format(ino, size)) return True def dirfrag_exists(self, ino, frag): @@ -1324,7 +1324,7 @@ class Filesystem(MDSCluster): t1 = datetime.datetime.now() r = self.tool_remote.sh(script=base_args + args, stdout=StringIO()).strip() duration = datetime.datetime.now() - t1 - log.info("Ran {0} in time {1}, result:\n{2}".format( + log.debug("Ran {0} in time {1}, result:\n{2}".format( base_args + args, duration, r )) return r diff --git a/qa/tasks/mgr/dashboard/helper.py b/qa/tasks/mgr/dashboard/helper.py index 01f2d3ff8d8db..5c430a6927980 100644 --- a/qa/tasks/mgr/dashboard/helper.py +++ b/qa/tasks/mgr/dashboard/helper.py @@ -176,7 +176,7 @@ class DashboardTestCase(MgrTestCase): @classmethod def _request(cls, url, method, data=None, params=None, set_cookies=False): url = "{}{}".format(cls._base_uri, url) - log.info("Request %s to %s", method, url) + log.debug("Request %s to %s", method, url) headers = {} cookies = {} if cls._token: @@ -286,7 +286,7 @@ class DashboardTestCase(MgrTestCase): return None if cls._resp.status_code != 202: - log.info("task finished immediately") + log.debug("task finished immediately") return res cls._assertIn('name', res) @@ -298,8 +298,7 @@ class DashboardTestCase(MgrTestCase): res_task = None while retries > 0 and not res_task: retries -= 1 - log.info("task (%s, %s) is still executing", task_name, - task_metadata) + log.debug("task (%s, %s) is still executing", task_name, task_metadata) time.sleep(1) _res = cls._get('/api/task?name={}'.format(task_name)) cls._assertEq(cls._resp.status_code, 200) @@ -314,7 +313,7 @@ class DashboardTestCase(MgrTestCase): raise Exception("Waiting for task ({}, {}) to finish timed out. {}" .format(task_name, task_metadata, _res)) - log.info("task (%s, %s) finished", task_name, task_metadata) + log.debug("task (%s, %s) finished", task_name, task_metadata) if res_task['success']: if method == 'POST': cls._resp.status_code = 201 @@ -400,13 +399,13 @@ class DashboardTestCase(MgrTestCase): @classmethod def _ceph_cmd(cls, cmd): res = cls.mgr_cluster.mon_manager.raw_cluster_cmd(*cmd) - log.info("command result: %s", res) + log.debug("command result: %s", res) return res @classmethod def _ceph_cmd_result(cls, cmd): exitstatus = cls.mgr_cluster.mon_manager.raw_cluster_cmd_result(*cmd) - log.info("command exit status: %d", exitstatus) + log.debug("command exit status: %d", exitstatus) return exitstatus @classmethod diff --git a/qa/tasks/mgr/mgr_test_case.py b/qa/tasks/mgr/mgr_test_case.py index 44d25a7af80fa..66f8748697427 100644 --- a/qa/tasks/mgr/mgr_test_case.py +++ b/qa/tasks/mgr/mgr_test_case.py @@ -138,7 +138,7 @@ class MgrTestCase(CephTestCase): mgr_map = cls.mgr_cluster.get_mgr_map() done = mgr_map['active_gid'] != initial_gid and mgr_map['available'] if done: - log.info("Restarted after module load (new active {0}/{1})".format( + log.debug("Restarted after module load (new active {0}/{1})".format( mgr_map['active_name'] , mgr_map['active_gid'])) return done cls.wait_until_true(has_restarted, timeout=30) @@ -159,7 +159,7 @@ class MgrTestCase(CephTestCase): uri = mgr_map['x']['services'][service_name] - log.info("Found {0} at {1} (daemon {2}/{3})".format( + log.debug("Found {0} at {1} (daemon {2}/{3})".format( service_name, uri, mgr_map['x']['active_name'], mgr_map['x']['active_gid'])) @@ -183,7 +183,7 @@ class MgrTestCase(CephTestCase): cls.mgr_cluster.mgr_fail(mgr_id) for mgr_id in cls.mgr_cluster.mgr_ids: - log.info("Using port {0} for {1} on mgr.{2}".format( + log.debug("Using port {0} for {1} on mgr.{2}".format( assign_port, module_name, mgr_id )) cls.mgr_cluster.set_module_localized_conf(module_name, mgr_id, @@ -198,7 +198,7 @@ class MgrTestCase(CephTestCase): mgr_map = cls.mgr_cluster.get_mgr_map() done = mgr_map['available'] if done: - log.info("Available after assign ports (new active {0}/{1})".format( + log.debug("Available after assign ports (new active {0}/{1})".format( mgr_map['active_name'], mgr_map['active_gid'])) return done cls.wait_until_true(is_available, timeout=30) diff --git a/qa/tasks/vstart_runner.py b/qa/tasks/vstart_runner.py index f815c9e6f88df..71fb71a534644 100644 --- a/qa/tasks/vstart_runner.py +++ b/qa/tasks/vstart_runner.py @@ -53,6 +53,11 @@ from teuthology.orchestra.daemon import DaemonGroup from teuthology.config import config as teuth_config import six import logging +try: + import urllib3 + urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) +except: + pass log = logging.getLogger(__name__) @@ -193,13 +198,13 @@ class LocalRemoteProcess(object): return False def kill(self): - log.info("kill ") + log.debug("kill ") if self.subproc.pid and not self.finished: - log.info("kill: killing pid {0} ({1})".format( + log.debug("kill: killing pid {0} ({1})".format( self.subproc.pid, self.args)) safe_kill(self.subproc.pid) else: - log.info("kill: already terminated ({0})".format(self.args)) + log.debug("kill: already terminated ({0})".format(self.args)) @property def stdin(self): @@ -266,7 +271,7 @@ class LocalRemote(object): args[0] )) - log.info("Running {0}".format(args)) + log.debug("Running {0}".format(args)) if shell: subproc = subprocess.Popen(quote(args), @@ -354,9 +359,9 @@ class LocalDaemon(object): for line in lines: if line.find("ceph-{0} -i {1}".format(self.daemon_type, self.daemon_id)) != -1: - log.info("Found ps line for daemon: {0}".format(line)) + log.debug("Found ps line for daemon: {0}".format(line)) return int(line.split()[0]) - log.info("No match for {0} {1}: {2}".format( + log.debug("No match for {0} {1}: {2}".format( self.daemon_type, self.daemon_id, ps_txt )) return None @@ -375,14 +380,14 @@ class LocalDaemon(object): return pid = self._get_pid() - log.info("Killing PID {0} for {1}.{2}".format(pid, self.daemon_type, self.daemon_id)) + log.debug("Killing PID {0} for {1}.{2}".format(pid, self.daemon_type, self.daemon_id)) os.kill(pid, signal.SIGTERM) waited = 0 while pid is not None: new_pid = self._get_pid() if new_pid is not None and new_pid != pid: - log.info("Killing new PID {0}".format(new_pid)) + log.debug("Killing new PID {0}".format(new_pid)) pid = new_pid os.kill(pid, signal.SIGTERM) @@ -410,7 +415,7 @@ class LocalDaemon(object): os.kill(self._get_pid(), sig) if not silent: - log.info("Sent signal {0} to {1}.{2}".format(sig, self.daemon_type, self.daemon_id)) + log.debug("Sent signal {0} to {1}.{2}".format(sig, self.daemon_type, self.daemon_id)) def safe_kill(pid): @@ -453,9 +458,9 @@ class LocalFuseMount(FuseMount): # Previous mount existed, reuse the old name name = self.fs.name self.fs = LocalFilesystem(self.ctx, name=name) - log.info('Wait for MDS to reach steady state...') + log.debug('Wait for MDS to reach steady state...') self.fs.wait_for_daemons() - log.info('Ready to start {}...'.format(type(self).__name__)) + log.debug('Ready to start {}...'.format(type(self).__name__)) @property def _prefix(self): @@ -511,7 +516,7 @@ class LocalFuseMount(FuseMount): # Before starting ceph-fuse process, note the contents of # /sys/fs/fuse/connections pre_mount_conns = list_connections() - log.info("Pre-mount connections: {0}".format(pre_mount_conns)) + log.debug("Pre-mount connections: {0}".format(pre_mount_conns)) prefix = [os.path.join(BIN_PREFIX, "ceph-fuse")] if os.getuid() != 0: @@ -531,7 +536,7 @@ class LocalFuseMount(FuseMount): self.mountpoint ], wait=False) - log.info("Mounting client.{0} with pid {1}".format(self.client_id, self.fuse_daemon.subproc.pid)) + log.debug("Mounting client.{0} with pid {1}".format(self.client_id, self.fuse_daemon.subproc.pid)) # Wait for the connection reference to appear in /sys waited = 0 @@ -549,7 +554,7 @@ class LocalFuseMount(FuseMount): )) post_mount_conns = list_connections() - log.info("Post-mount connections: {0}".format(post_mount_conns)) + log.debug("Post-mount connections: {0}".format(post_mount_conns)) # Record our fuse connection number so that we can use it when # forcing an unmount @@ -583,7 +588,7 @@ class LocalCephManager(CephManager): # certain teuthology tests want to run tasks in parallel self.lock = threading.RLock() - self.log = lambda x: log.info(x) + self.log = lambda x: log.debug(x) # Don't bother constructing a map of pools: it should be empty # at test cluster start, and in any case it would be out of date @@ -677,7 +682,7 @@ class LocalCephCluster(CephCluster): existing_str += "\n[{0}]\n".format(subsys) for key, val in kvs.items(): # Comment out existing instance if it exists - log.info("Searching for existing instance {0}/{1}".format( + log.debug("Searching for existing instance {0}/{1}".format( key, subsys )) existing_section = re.search("^\[{0}\]$([\n]|[^\[])+".format( @@ -689,7 +694,7 @@ class LocalCephCluster(CephCluster): existing_val = re.search("^\s*[^#]({0}) =".format(key), section_str, re.MULTILINE) if existing_val: start = existing_section.start() + existing_val.start(1) - log.info("Found string to replace at {0}".format( + log.debug("Found string to replace at {0}".format( start )) existing_str = existing_str[0:start] + "#" + existing_str[start:] @@ -756,7 +761,7 @@ class LocalFilesystem(Filesystem, LocalMDSCluster): self.mds_ids = list(self.mds_ids) - log.info("Discovered MDS IDs: {0}".format(self.mds_ids)) + log.debug("Discovered MDS IDs: {0}".format(self.mds_ids)) self.mon_manager = LocalCephManager() @@ -813,7 +818,7 @@ class InteractiveFailureResult(unittest.TextTestResult): def enumerate_methods(s): - log.info("e: {0}".format(s)) + log.debug("e: {0}".format(s)) for t in s._tests: if isinstance(t, suite.BaseTestSuite): for sub in enumerate_methods(t): @@ -824,15 +829,15 @@ def enumerate_methods(s): def load_tests(modules, loader): if modules: - log.info("Executing modules: {0}".format(modules)) + log.debug("Executing modules: {0}".format(modules)) module_suites = [] for mod_name in modules: # Test names like cephfs.test_auto_repair module_suites.append(loader.loadTestsFromName(mod_name)) - log.info("Loaded: {0}".format(list(module_suites))) + log.debug("Loaded: {0}".format(list(module_suites))) return suite.TestSuite(module_suites) else: - log.info("Executing all cephfs tests") + log.debug("Executing all cephfs tests") return loader.discover( os.path.join(os.path.dirname(os.path.abspath(__file__)), "cephfs") ) @@ -1091,7 +1096,7 @@ def exec_test(): if not is_named: victims.append((case, method)) - log.info("Disabling {0} tests because of is_for_teuthology or needs_trimming".format(len(victims))) + log.debug("Disabling {0} tests because of is_for_teuthology or needs_trimming".format(len(victims))) for s, method in victims: s._tests.remove(method)