]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
mgr/dashboard:minimize console log traces of Dashboard backend API tests 39545/head
authorAashish Sharma <aashishsharma@localhost.localdomain>
Tue, 14 Jul 2020 04:58:57 +0000 (10:28 +0530)
committerAashish Sharma <aashishsharma@localhost.localdomain>
Thu, 18 Feb 2021 10:05:45 +0000 (15:35 +0530)
Changed log levels in some files to reduce console log traces

Fixes: https://tracker.ceph.com/issues/45822
Signed-off-by: Aashish Sharma <aasharma@redhat.com>
(cherry picked from commit f5dd0195cd45b6e00e43ef97e209e565e6452777)

qa/tasks/cephfs/filesystem.py
qa/tasks/mgr/dashboard/helper.py
qa/tasks/mgr/mgr_test_case.py
qa/tasks/vstart_runner.py

index bf337f84fc9cbef90d3618cb67692b699f14e028..7f01b0ff48000d60c21a2343f7fbb5a896ec32e1 100644 (file)
@@ -510,7 +510,7 @@ class Filesystem(MDSCluster):
                 while count > max_mds:
                     targets = sorted(self.get_ranks(status=status), key=lambda r: r['rank'], reverse=True)
                     target = targets[0]
-                    log.info("deactivating rank %d" % target['rank'])
+                    log.debug("deactivating rank %d" % target['rank'])
                     self.deactivate(target['rank'])
                     status = self.wait_for_daemons(skip_max_mds_check=True)
                     count = len(list(self.get_ranks(status=status)))
@@ -571,7 +571,7 @@ class Filesystem(MDSCluster):
         else:
             data_pool_name = self.data_pool_name
 
-        log.info("Creating filesystem '{0}'".format(self.name))
+        log.debug("Creating filesystem '{0}'".format(self.name))
 
         self.mon_manager.raw_cluster_cmd('osd', 'pool', 'create',
                                          self.metadata_pool_name, self.pgs_per_fs_pool.__str__())
@@ -581,7 +581,7 @@ class Filesystem(MDSCluster):
                                              '--allow-dangerous-metadata-overlay')
         else:
             if self.ec_profile and 'disabled' not in self.ec_profile:
-                log.info("EC profile is %s", self.ec_profile)
+                log.debug("EC profile is %s", self.ec_profile)
                 cmd = ['osd', 'erasure-code-profile', 'set', data_pool_name]
                 cmd.extend(self.ec_profile)
                 self.mon_manager.raw_cluster_cmd(*cmd)
@@ -782,7 +782,7 @@ class Filesystem(MDSCluster):
             else:
                 raise
 
-        log.info("are_daemons_healthy: mds map: {0}".format(mds_map))
+        log.debug("are_daemons_healthy: mds map: {0}".format(mds_map))
 
         for mds_id, mds_status in mds_map['info'].items():
             if mds_status['state'] not in ["up:active", "up:standby", "up:standby-replay"]:
@@ -791,13 +791,13 @@ class Filesystem(MDSCluster):
             elif mds_status['state'] == 'up:active':
                 active_count += 1
 
-        log.info("are_daemons_healthy: {0}/{1}".format(
+        log.debug("are_daemons_healthy: {0}/{1}".format(
             active_count, mds_map['max_mds']
         ))
 
         if not skip_max_mds_check:
             if active_count > mds_map['max_mds']:
-                log.info("are_daemons_healthy: number of actives is greater than max_mds: {0}".format(mds_map))
+                log.debug("are_daemons_healthy: number of actives is greater than max_mds: {0}".format(mds_map))
                 return False
             elif active_count == mds_map['max_mds']:
                 # The MDSMap says these guys are active, but let's check they really are
@@ -821,7 +821,7 @@ class Filesystem(MDSCluster):
             else:
                 return False
         else:
-            log.info("are_daemons_healthy: skipping max_mds check")
+            log.debug("are_daemons_healthy: skipping max_mds check")
             return True
 
     def get_daemon_names(self, state=None, status=None):
@@ -932,7 +932,7 @@ class Filesystem(MDSCluster):
                 elapsed += 1
 
             if elapsed > timeout:
-                log.info("status = {0}".format(status))
+                log.debug("status = {0}".format(status))
                 raise RuntimeError("Timed out waiting for MDS daemons to become healthy")
 
             status = self.status()
@@ -1014,7 +1014,7 @@ class Filesystem(MDSCluster):
         journal_header_dump = self.get_metadata_object('Journaler::Header', journal_header_object)
 
         version = journal_header_dump['journal_header']['stream_format']
-        log.info("Read journal version {0}".format(version))
+        log.debug("Read journal version {0}".format(version))
 
         return version
 
@@ -1079,11 +1079,11 @@ class Filesystem(MDSCluster):
                 try:
                     mds_info = status.get_rank(self.id, rank)
                     current_state = mds_info['state'] if mds_info else None
-                    log.info("Looked up MDS state for mds.{0}: {1}".format(rank, current_state))
+                    log.debug("Looked up MDS state for mds.{0}: {1}".format(rank, current_state))
                 except:
                     mdsmap = self.get_mds_map(status=status)
                     if rank in mdsmap['failed']:
-                        log.info("Waiting for rank {0} to come back.".format(rank))
+                        log.debug("Waiting for rank {0} to come back.".format(rank))
                         current_state = None
                     else:
                         raise
@@ -1091,7 +1091,7 @@ class Filesystem(MDSCluster):
                 # mds_info is None if no daemon with this ID exists in the map
                 mds_info = status.get_mds(mds_id)
                 current_state = mds_info['state'] if mds_info else None
-                log.info("Looked up MDS state for {0}: {1}".format(mds_id, current_state))
+                log.debug("Looked up MDS state for {0}: {1}".format(mds_id, current_state))
             else:
                 # In general, look for a single MDS
                 states = [m['state'] for m in status.get_ranks(self.id)]
@@ -1101,11 +1101,11 @@ class Filesystem(MDSCluster):
                     current_state = reject
                 else:
                     current_state = None
-                log.info("mapped states {0} to {1}".format(states, current_state))
+                log.debug("mapped states {0} to {1}".format(states, current_state))
 
             elapsed = time.time() - started_at
             if current_state == goal_state:
-                log.info("reached state '{0}' in {1}s".format(current_state, elapsed))
+                log.debug("reached state '{0}' in {1}s".format(current_state, elapsed))
                 return elapsed
             elif reject is not None and current_state == reject:
                 raise RuntimeError("MDS in reject state {0}".format(current_state))
@@ -1237,12 +1237,12 @@ class Filesystem(MDSCluster):
         missing = set(want_objects) - set(exist_objects)
 
         if missing:
-            log.info("Objects missing (ino {0}, size {1}): {2}".format(
+            log.debug("Objects missing (ino {0}, size {1}): {2}".format(
                 ino, size, missing
             ))
             return False
         else:
-            log.info("All objects for ino {0} size {1} found".format(ino, size))
+            log.debug("All objects for ino {0} size {1} found".format(ino, size))
             return True
 
     def data_objects_absent(self, ino, size):
@@ -1250,12 +1250,12 @@ class Filesystem(MDSCluster):
         present = set(want_objects) & set(exist_objects)
 
         if present:
-            log.info("Objects not absent (ino {0}, size {1}): {2}".format(
+            log.debug("Objects not absent (ino {0}, size {1}): {2}".format(
                 ino, size, present
             ))
             return False
         else:
-            log.info("All objects for ino {0} size {1} are absent".format(ino, size))
+            log.debug("All objects for ino {0} size {1} are absent".format(ino, size))
             return True
 
     def dirfrag_exists(self, ino, frag):
@@ -1374,7 +1374,7 @@ class Filesystem(MDSCluster):
         t1 = datetime.datetime.now()
         r = self.tool_remote.sh(script=base_args + args, stdout=StringIO()).strip()
         duration = datetime.datetime.now() - t1
-        log.info("Ran {0} in time {1}, result:\n{2}".format(
+        log.debug("Ran {0} in time {1}, result:\n{2}".format(
             base_args + args, duration, r
         ))
         return r
index 64cbba9e3f623a69986a276651a91c1c81b574f2..7c491968259ac09d8bdcf58c5d5d46fd2755d647 100644 (file)
@@ -200,7 +200,7 @@ class DashboardTestCase(MgrTestCase):
     @classmethod
     def _request(cls, url, method, data=None, params=None, set_cookies=False):
         url = "{}{}".format(cls._base_uri, url)
-        log.info("Request %s to %s", method, url)
+        log.debug("Request %s to %s", method, url)
         headers = {}
         cookies = {}
         if cls._token:
@@ -310,7 +310,7 @@ class DashboardTestCase(MgrTestCase):
             return None
 
         if cls._resp.status_code != 202:
-            log.info("task finished immediately")
+            log.debug("task finished immediately")
             return res
 
         cls._assertIn('name', res)
@@ -322,8 +322,7 @@ class DashboardTestCase(MgrTestCase):
         res_task = None
         while retries > 0 and not res_task:
             retries -= 1
-            log.info("task (%s, %s) is still executing", task_name,
-                     task_metadata)
+            log.debug("task (%s, %s) is still executing", task_name, task_metadata)
             time.sleep(1)
             _res = cls._get('/api/task?name={}'.format(task_name))
             cls._assertEq(cls._resp.status_code, 200)
@@ -338,7 +337,7 @@ class DashboardTestCase(MgrTestCase):
             raise Exception("Waiting for task ({}, {}) to finish timed out. {}"
                             .format(task_name, task_metadata, _res))
 
-        log.info("task (%s, %s) finished", task_name, task_metadata)
+        log.debug("task (%s, %s) finished", task_name, task_metadata)
         if res_task['success']:
             if method == 'POST':
                 cls._resp.status_code = 201
@@ -424,13 +423,13 @@ class DashboardTestCase(MgrTestCase):
     @classmethod
     def _ceph_cmd(cls, cmd):
         res = cls.mgr_cluster.mon_manager.raw_cluster_cmd(*cmd)
-        log.info("command result: %s", res)
+        log.debug("command result: %s", res)
         return res
 
     @classmethod
     def _ceph_cmd_result(cls, cmd):
         exitstatus = cls.mgr_cluster.mon_manager.raw_cluster_cmd_result(*cmd)
-        log.info("command exit status: %d", exitstatus)
+        log.debug("command exit status: %d", exitstatus)
         return exitstatus
 
     def set_config_key(self, key, value):
index 8687b5f29002026de39709cbf9354b35f871248e..f5392d3ba38a33ac25e7aa68dc77f835036ac25f 100644 (file)
@@ -117,7 +117,7 @@ class MgrTestCase(CephTestCase):
         if is_disabled():
             return
 
-        log.info("Unloading Mgr module %s ...", module_name)
+        log.debug("Unloading Mgr module %s ...", module_name)
         cls.mgr_cluster.mon_manager.raw_cluster_cmd('mgr', 'module', 'disable', module_name)
         cls.wait_until_true(is_disabled, timeout=30)
 
@@ -143,7 +143,7 @@ class MgrTestCase(CephTestCase):
                 if module_name in always_on:
                     return
 
-        log.info("Loading Mgr module %s ...", module_name)
+        log.debug("Loading Mgr module %s ...", module_name)
         initial_gid = initial_mgr_map['active_gid']
         cls.mgr_cluster.mon_manager.raw_cluster_cmd(
             "mgr", "module", "enable", module_name, "--force")
@@ -153,7 +153,7 @@ class MgrTestCase(CephTestCase):
             mgr_map = cls.mgr_cluster.get_mgr_map()
             done = mgr_map['active_gid'] != initial_gid and mgr_map['available']
             if done:
-                log.info("Restarted after module load (new active {0}/{1})".format(
+                log.debug("Restarted after module load (new active {0}/{1})".format(
                     mgr_map['active_name'], mgr_map['active_gid']))
             return done
         cls.wait_until_true(has_restarted, timeout=30)
@@ -174,7 +174,7 @@ class MgrTestCase(CephTestCase):
 
         uri = mgr_map['x']['services'][service_name]
 
-        log.info("Found {0} at {1} (daemon {2}/{3})".format(
+        log.debug("Found {0} at {1} (daemon {2}/{3})".format(
             service_name, uri, mgr_map['x']['active_name'],
             mgr_map['x']['active_gid']))
 
@@ -198,7 +198,7 @@ class MgrTestCase(CephTestCase):
             cls.mgr_cluster.mgr_fail(mgr_id)
 
         for mgr_id in cls.mgr_cluster.mgr_ids:
-            log.info("Using port {0} for {1} on mgr.{2}".format(
+            log.debug("Using port {0} for {1} on mgr.{2}".format(
                 assign_port, module_name, mgr_id
             ))
             cls.mgr_cluster.set_module_localized_conf(module_name, mgr_id,
@@ -214,7 +214,7 @@ class MgrTestCase(CephTestCase):
             mgr_map = cls.mgr_cluster.get_mgr_map()
             done = mgr_map['available']
             if done:
-                log.info("Available after assign ports (new active {0}/{1})".format(
+                log.debug("Available after assign ports (new active {0}/{1})".format(
                     mgr_map['active_name'], mgr_map['active_gid']))
             return done
         cls.wait_until_true(is_available, timeout=30)
index 50f5cc48f2f3ecf0dba9518e3bc86780f45574ab..2ba79ea95c058c7e83b9b5d849d2392f74ba3f26 100644 (file)
@@ -53,6 +53,11 @@ from teuthology.orchestra.daemon import DaemonGroup
 from teuthology.config import config as teuth_config
 import six
 import logging
+try:
+    import urllib3
+    urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
+except:
+    pass
 
 def init_log():
     global log
@@ -210,13 +215,13 @@ class LocalRemoteProcess(object):
             return False
 
     def kill(self):
-        log.info("kill ")
+        log.debug("kill ")
         if self.subproc.pid and not self.finished:
-            log.info("kill: killing pid {0} ({1})".format(
+            log.debug("kill: killing pid {0} ({1})".format(
                 self.subproc.pid, self.args))
             safe_kill(self.subproc.pid)
         else:
-            log.info("kill: already terminated ({0})".format(self.args))
+            log.debug("kill: already terminated ({0})".format(self.args))
 
     @property
     def stdin(self):
@@ -360,7 +365,7 @@ class LocalRemote(object):
                     args[0]
                 ))
 
-        log.info("Running {0}".format(args))
+        log.debug("Running {0}".format(args))
 
         if shell:
             subproc = subprocess.Popen(quote(args),
@@ -454,13 +459,13 @@ class LocalDaemon(object):
 
         for line in lines:
             if line.find("ceph-{0} -i {1}".format(self.daemon_type, self.daemon_id)) != -1:
-                log.info("Found ps line for daemon: {0}".format(line))
+                log.debug("Found ps line for daemon: {0}".format(line))
                 return int(line.split()[0])
         if opt_log_ps_output:
-            log.info("No match for {0} {1}: {2}".format(
+            log.debug("No match for {0} {1}: {2}".format(
                 self.daemon_type, self.daemon_id, ps_txt))
         else:
-            log.info("No match for {0} {1}".format(self.daemon_type,
+            log.debug("No match for {0} {1}".format(self.daemon_type,
                      self.daemon_id))
             return None
 
@@ -478,14 +483,14 @@ class LocalDaemon(object):
             return
 
         pid = self._get_pid()
-        log.info("Killing PID {0} for {1}.{2}".format(pid, self.daemon_type, self.daemon_id))
+        log.debug("Killing PID {0} for {1}.{2}".format(pid, self.daemon_type, self.daemon_id))
         os.kill(pid, signal.SIGTERM)
 
         waited = 0
         while pid is not None:
             new_pid = self._get_pid()
             if new_pid is not None and new_pid != pid:
-                log.info("Killing new PID {0}".format(new_pid))
+                log.debug("Killing new PID {0}".format(new_pid))
                 pid = new_pid
                 os.kill(pid, signal.SIGTERM)
 
@@ -515,7 +520,7 @@ class LocalDaemon(object):
 
         os.kill(self._get_pid(), sig)
         if not silent:
-            log.info("Sent signal {0} to {1}.{2}".format(sig, self.daemon_type, self.daemon_id))
+            log.debug("Sent signal {0} to {1}.{2}".format(sig, self.daemon_type, self.daemon_id))
 
 
 def safe_kill(pid):
@@ -617,9 +622,9 @@ class LocalKernelMount(KernelMount):
             # Previous mount existed, reuse the old name
             name = self.fs.name
         self.fs = LocalFilesystem(self.ctx, name=name)
-        log.info('Wait for MDS to reach steady state...')
+        log.debug('Wait for MDS to reach steady state...')
         self.fs.wait_for_daemons()
-        log.info('Ready to start {}...'.format(type(self).__name__))
+        log.debug('Ready to start {}...'.format(type(self).__name__))
 
     @property
     def _prefix(self):
@@ -778,9 +783,9 @@ class LocalFuseMount(FuseMount):
             # Previous mount existed, reuse the old name
             name = self.fs.name
         self.fs = LocalFilesystem(self.ctx, name=name)
-        log.info('Wait for MDS to reach steady state...')
+        log.debug('Wait for MDS to reach steady state...')
         self.fs.wait_for_daemons()
-        log.info('Ready to start {}...'.format(type(self).__name__))
+        log.debug('Ready to start {}...'.format(type(self).__name__))
 
     @property
     def _prefix(self):
@@ -836,7 +841,7 @@ class LocalFuseMount(FuseMount):
         # Before starting ceph-fuse process, note the contents of
         # /sys/fs/fuse/connections
         pre_mount_conns = list_connections()
-        log.info("Pre-mount connections: {0}".format(pre_mount_conns))
+        log.debug("Pre-mount connections: {0}".format(pre_mount_conns))
 
         prefix = [os.path.join(BIN_PREFIX, "ceph-fuse")]
         if os.getuid() != 0:
@@ -858,7 +863,7 @@ class LocalFuseMount(FuseMount):
                                                 self.mountpoint
                                             ], wait=False)
 
-        log.info("Mounting client.{0} with pid {1}".format(self.client_id, self.fuse_daemon.subproc.pid))
+        log.debug("Mounting client.{0} with pid {1}".format(self.client_id, self.fuse_daemon.subproc.pid))
 
         # Wait for the connection reference to appear in /sys
         waited = 0
@@ -876,7 +881,7 @@ class LocalFuseMount(FuseMount):
                 ))
             post_mount_conns = list_connections()
 
-        log.info("Post-mount connections: {0}".format(post_mount_conns))
+        log.debug("Post-mount connections: {0}".format(post_mount_conns))
 
         # Record our fuse connection number so that we can use it when
         # forcing an unmount
@@ -912,7 +917,7 @@ class LocalCephManager(CephManager):
         # certain teuthology tests want to run tasks in parallel
         self.lock = threading.RLock()
 
-        self.log = lambda x: log.info(x)
+        self.log = lambda x: log.debug(x)
 
         # Don't bother constructing a map of pools: it should be empty
         # at test cluster start, and in any case it would be out of date
@@ -1054,7 +1059,7 @@ class LocalCephCluster(CephCluster):
             existing_str += "\n[{0}]\n".format(subsys)
             for key, val in kvs.items():
                 # Comment out existing instance if it exists
-                log.info("Searching for existing instance {0}/{1}".format(
+                log.debug("Searching for existing instance {0}/{1}".format(
                     key, subsys
                 ))
                 existing_section = re.search("^\[{0}\]$([\n]|[^\[])+".format(
@@ -1066,7 +1071,7 @@ class LocalCephCluster(CephCluster):
                     existing_val = re.search("^\s*[^#]({0}) =".format(key), section_str, re.MULTILINE)
                     if existing_val:
                         start = existing_section.start() + existing_val.start(1)
-                        log.info("Found string to replace at {0}".format(
+                        log.debug("Found string to replace at {0}".format(
                             start
                         ))
                         existing_str = existing_str[0:start] + "#" + existing_str[start:]
@@ -1133,7 +1138,7 @@ class LocalFilesystem(Filesystem, LocalMDSCluster):
 
         self.mds_ids = list(self.mds_ids)
 
-        log.info("Discovered MDS IDs: {0}".format(self.mds_ids))
+        log.debug("Discovered MDS IDs: {0}".format(self.mds_ids))
 
         self.mon_manager = LocalCephManager()
 
@@ -1190,7 +1195,7 @@ class InteractiveFailureResult(unittest.TextTestResult):
 
 
 def enumerate_methods(s):
-    log.info("e: {0}".format(s))
+    log.debug("e: {0}".format(s))
     for t in s._tests:
         if isinstance(t, suite.BaseTestSuite):
             for sub in enumerate_methods(t):
@@ -1201,15 +1206,15 @@ def enumerate_methods(s):
 
 def load_tests(modules, loader):
     if modules:
-        log.info("Executing modules: {0}".format(modules))
+        log.debug("Executing modules: {0}".format(modules))
         module_suites = []
         for mod_name in modules:
             # Test names like cephfs.test_auto_repair
             module_suites.append(loader.loadTestsFromName(mod_name))
-        log.info("Loaded: {0}".format(list(module_suites)))
+        log.debug("Loaded: {0}".format(list(module_suites)))
         return suite.TestSuite(module_suites)
     else:
-        log.info("Executing all cephfs tests")
+        log.debug("Executing all cephfs tests")
         return loader.discover(
             os.path.join(os.path.dirname(os.path.abspath(__file__)), "cephfs")
         )
@@ -1289,7 +1294,7 @@ def clear_old_log():
         with open(logpath, 'w') as logfile:
             logfile.write('')
         init_log()
-        log.info('logging in a fresh file now...')
+        log.debug('logging in a fresh file now...')
 
 def exec_test():
     # Parse arguments
@@ -1506,7 +1511,7 @@ def exec_test():
             if not is_named:
                 victims.append((case, method))
 
-    log.info("Disabling {0} tests because of is_for_teuthology or needs_trimming".format(len(victims)))
+    log.debug("Disabling {0} tests because of is_for_teuthology or needs_trimming".format(len(victims)))
     for s, method in victims:
         s._tests.remove(method)