]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
mgr/dashboard:minimize console log traces of Dashboard backend API tests 36088/head
authorAashish Sharma <aashishsharma@localhost.localdomain>
Tue, 14 Jul 2020 04:58:57 +0000 (10:28 +0530)
committerAashish Sharma <aashishsharma@localhost.localdomain>
Wed, 23 Dec 2020 06:02:43 +0000 (11:32 +0530)
Changed log levels in some files to reduce console log traces

Fixes: https://tracker.ceph.com/issues/45822
Signed-off-by: Aashish Sharma <aasharma@redhat.com>
qa/tasks/cephfs/filesystem.py
qa/tasks/mgr/dashboard/helper.py
qa/tasks/mgr/mgr_test_case.py
qa/tasks/vstart_runner.py

index e3e2f5b6e6bb38947690d052a4dc70914e68c738..8df09412576a176578155de5db20b6f573fe4174 100644 (file)
@@ -532,7 +532,7 @@ class Filesystem(MDSCluster):
                 while count > max_mds:
                     targets = sorted(self.get_ranks(status=status), key=lambda r: r['rank'], reverse=True)
                     target = targets[0]
-                    log.info("deactivating rank %d" % target['rank'])
+                    log.debug("deactivating rank %d" % target['rank'])
                     self.deactivate(target['rank'])
                     status = self.wait_for_daemons(skip_max_mds_check=True)
                     count = len(list(self.get_ranks(status=status)))
@@ -593,7 +593,7 @@ class Filesystem(MDSCluster):
         else:
             data_pool_name = self.data_pool_name
 
-        log.info("Creating filesystem '{0}'".format(self.name))
+        log.debug("Creating filesystem '{0}'".format(self.name))
 
         self.mon_manager.raw_cluster_cmd('osd', 'pool', 'create',
                                          self.metadata_pool_name, self.pgs_per_fs_pool.__str__())
@@ -603,7 +603,7 @@ class Filesystem(MDSCluster):
                                              '--allow-dangerous-metadata-overlay')
         else:
             if self.ec_profile and 'disabled' not in self.ec_profile:
-                log.info("EC profile is %s", self.ec_profile)
+                log.debug("EC profile is %s", self.ec_profile)
                 cmd = ['osd', 'erasure-code-profile', 'set', data_pool_name]
                 cmd.extend(self.ec_profile)
                 self.mon_manager.raw_cluster_cmd(*cmd)
@@ -839,7 +839,7 @@ class Filesystem(MDSCluster):
             else:
                 raise
 
-        log.info("are_daemons_healthy: mds map: {0}".format(mds_map))
+        log.debug("are_daemons_healthy: mds map: {0}".format(mds_map))
 
         for mds_id, mds_status in mds_map['info'].items():
             if mds_status['state'] not in ["up:active", "up:standby", "up:standby-replay"]:
@@ -848,13 +848,13 @@ class Filesystem(MDSCluster):
             elif mds_status['state'] == 'up:active':
                 active_count += 1
 
-        log.info("are_daemons_healthy: {0}/{1}".format(
+        log.debug("are_daemons_healthy: {0}/{1}".format(
             active_count, mds_map['max_mds']
         ))
 
         if not skip_max_mds_check:
             if active_count > mds_map['max_mds']:
-                log.info("are_daemons_healthy: number of actives is greater than max_mds: {0}".format(mds_map))
+                log.debug("are_daemons_healthy: number of actives is greater than max_mds: {0}".format(mds_map))
                 return False
             elif active_count == mds_map['max_mds']:
                 # The MDSMap says these guys are active, but let's check they really are
@@ -878,7 +878,7 @@ class Filesystem(MDSCluster):
             else:
                 return False
         else:
-            log.info("are_daemons_healthy: skipping max_mds check")
+            log.debug("are_daemons_healthy: skipping max_mds check")
             return True
 
     def get_daemon_names(self, state=None, status=None):
@@ -989,7 +989,7 @@ class Filesystem(MDSCluster):
                 elapsed += 1
 
             if elapsed > timeout:
-                log.info("status = {0}".format(status))
+                log.debug("status = {0}".format(status))
                 raise RuntimeError("Timed out waiting for MDS daemons to become healthy")
 
             status = self.status()
@@ -1065,7 +1065,7 @@ class Filesystem(MDSCluster):
         journal_header_dump = self.get_metadata_object('Journaler::Header', journal_header_object)
 
         version = journal_header_dump['journal_header']['stream_format']
-        log.info("Read journal version {0}".format(version))
+        log.debug("Read journal version {0}".format(version))
 
         return version
 
@@ -1130,11 +1130,11 @@ class Filesystem(MDSCluster):
                 try:
                     mds_info = status.get_rank(self.id, rank)
                     current_state = mds_info['state'] if mds_info else None
-                    log.info("Looked up MDS state for mds.{0}: {1}".format(rank, current_state))
+                    log.debug("Looked up MDS state for mds.{0}: {1}".format(rank, current_state))
                 except:
                     mdsmap = self.get_mds_map(status=status)
                     if rank in mdsmap['failed']:
-                        log.info("Waiting for rank {0} to come back.".format(rank))
+                        log.debug("Waiting for rank {0} to come back.".format(rank))
                         current_state = None
                     else:
                         raise
@@ -1142,7 +1142,7 @@ class Filesystem(MDSCluster):
                 # mds_info is None if no daemon with this ID exists in the map
                 mds_info = status.get_mds(mds_id)
                 current_state = mds_info['state'] if mds_info else None
-                log.info("Looked up MDS state for {0}: {1}".format(mds_id, current_state))
+                log.debug("Looked up MDS state for {0}: {1}".format(mds_id, current_state))
             else:
                 # In general, look for a single MDS
                 states = [m['state'] for m in status.get_ranks(self.id)]
@@ -1152,11 +1152,11 @@ class Filesystem(MDSCluster):
                     current_state = reject
                 else:
                     current_state = None
-                log.info("mapped states {0} to {1}".format(states, current_state))
+                log.debug("mapped states {0} to {1}".format(states, current_state))
 
             elapsed = time.time() - started_at
             if current_state == goal_state:
-                log.info("reached state '{0}' in {1}s".format(current_state, elapsed))
+                log.debug("reached state '{0}' in {1}s".format(current_state, elapsed))
                 return elapsed
             elif reject is not None and current_state == reject:
                 raise RuntimeError("MDS in reject state {0}".format(current_state))
@@ -1289,12 +1289,12 @@ class Filesystem(MDSCluster):
         missing = set(want_objects) - set(exist_objects)
 
         if missing:
-            log.info("Objects missing (ino {0}, size {1}): {2}".format(
+            log.debug("Objects missing (ino {0}, size {1}): {2}".format(
                 ino, size, missing
             ))
             return False
         else:
-            log.info("All objects for ino {0} size {1} found".format(ino, size))
+            log.debug("All objects for ino {0} size {1} found".format(ino, size))
             return True
 
     def data_objects_absent(self, ino, size):
@@ -1302,12 +1302,12 @@ class Filesystem(MDSCluster):
         present = set(want_objects) & set(exist_objects)
 
         if present:
-            log.info("Objects not absent (ino {0}, size {1}): {2}".format(
+            log.debug("Objects not absent (ino {0}, size {1}): {2}".format(
                 ino, size, present
             ))
             return False
         else:
-            log.info("All objects for ino {0} size {1} are absent".format(ino, size))
+            log.debug("All objects for ino {0} size {1} are absent".format(ino, size))
             return True
 
     def dirfrag_exists(self, ino, frag):
@@ -1438,7 +1438,7 @@ class Filesystem(MDSCluster):
         t1 = datetime.datetime.now()
         r = self.tool_remote.sh(script=base_args + args, stdout=StringIO()).strip()
         duration = datetime.datetime.now() - t1
-        log.info("Ran {0} in time {1}, result:\n{2}".format(
+        log.debug("Ran {0} in time {1}, result:\n{2}".format(
             base_args + args, duration, r
         ))
         return r
index 78915eb201527bf8abb17d81078f4aec38131978..3e85fc3ba862cb4177f8ba9e971ab70d42464141 100644 (file)
@@ -279,7 +279,7 @@ class DashboardTestCase(MgrTestCase):
     def _request(cls, url, method, data=None, params=None, version=DEFAULT_VERSION,
                  set_cookies=False):
         url = "{}{}".format(cls._base_uri, url)
-        log.info("Request %s to %s", method, url)
+        log.debug("Request %s to %s", method, url)
         headers = {}
         cookies = {}
         if cls._token:
@@ -394,7 +394,7 @@ class DashboardTestCase(MgrTestCase):
             return None
 
         if cls._resp.status_code != 202:
-            log.info("task finished immediately")
+            log.debug("task finished immediately")
             return res
 
         cls._assertIn('name', res)
@@ -406,8 +406,7 @@ class DashboardTestCase(MgrTestCase):
         res_task = None
         while retries > 0 and not res_task:
             retries -= 1
-            log.info("task (%s, %s) is still executing", task_name,
-                     task_metadata)
+            log.debug("task (%s, %s) is still executing", task_name, task_metadata)
             time.sleep(1)
             _res = cls._get('/api/task?name={}'.format(task_name), version=version)
             cls._assertEq(cls._resp.status_code, 200)
@@ -422,7 +421,7 @@ class DashboardTestCase(MgrTestCase):
             raise Exception("Waiting for task ({}, {}) to finish timed out. {}"
                             .format(task_name, task_metadata, _res))
 
-        log.info("task (%s, %s) finished", task_name, task_metadata)
+        log.debug("task (%s, %s) finished", task_name, task_metadata)
         if res_task['success']:
             if method == 'POST':
                 cls._resp.status_code = 201
@@ -511,13 +510,13 @@ class DashboardTestCase(MgrTestCase):
     @classmethod
     def _ceph_cmd(cls, cmd):
         res = cls.mgr_cluster.mon_manager.raw_cluster_cmd(*cmd)
-        log.info("command result: %s", res)
+        log.debug("command result: %s", res)
         return res
 
     @classmethod
     def _ceph_cmd_result(cls, cmd):
         exitstatus = cls.mgr_cluster.mon_manager.raw_cluster_cmd_result(*cmd)
-        log.info("command exit status: %d", exitstatus)
+        log.debug("command exit status: %d", exitstatus)
         return exitstatus
 
     @classmethod
index 8687b5f29002026de39709cbf9354b35f871248e..f5392d3ba38a33ac25e7aa68dc77f835036ac25f 100644 (file)
@@ -117,7 +117,7 @@ class MgrTestCase(CephTestCase):
         if is_disabled():
             return
 
-        log.info("Unloading Mgr module %s ...", module_name)
+        log.debug("Unloading Mgr module %s ...", module_name)
         cls.mgr_cluster.mon_manager.raw_cluster_cmd('mgr', 'module', 'disable', module_name)
         cls.wait_until_true(is_disabled, timeout=30)
 
@@ -143,7 +143,7 @@ class MgrTestCase(CephTestCase):
                 if module_name in always_on:
                     return
 
-        log.info("Loading Mgr module %s ...", module_name)
+        log.debug("Loading Mgr module %s ...", module_name)
         initial_gid = initial_mgr_map['active_gid']
         cls.mgr_cluster.mon_manager.raw_cluster_cmd(
             "mgr", "module", "enable", module_name, "--force")
@@ -153,7 +153,7 @@ class MgrTestCase(CephTestCase):
             mgr_map = cls.mgr_cluster.get_mgr_map()
             done = mgr_map['active_gid'] != initial_gid and mgr_map['available']
             if done:
-                log.info("Restarted after module load (new active {0}/{1})".format(
+                log.debug("Restarted after module load (new active {0}/{1})".format(
                     mgr_map['active_name'], mgr_map['active_gid']))
             return done
         cls.wait_until_true(has_restarted, timeout=30)
@@ -174,7 +174,7 @@ class MgrTestCase(CephTestCase):
 
         uri = mgr_map['x']['services'][service_name]
 
-        log.info("Found {0} at {1} (daemon {2}/{3})".format(
+        log.debug("Found {0} at {1} (daemon {2}/{3})".format(
             service_name, uri, mgr_map['x']['active_name'],
             mgr_map['x']['active_gid']))
 
@@ -198,7 +198,7 @@ class MgrTestCase(CephTestCase):
             cls.mgr_cluster.mgr_fail(mgr_id)
 
         for mgr_id in cls.mgr_cluster.mgr_ids:
-            log.info("Using port {0} for {1} on mgr.{2}".format(
+            log.debug("Using port {0} for {1} on mgr.{2}".format(
                 assign_port, module_name, mgr_id
             ))
             cls.mgr_cluster.set_module_localized_conf(module_name, mgr_id,
@@ -214,7 +214,7 @@ class MgrTestCase(CephTestCase):
             mgr_map = cls.mgr_cluster.get_mgr_map()
             done = mgr_map['available']
             if done:
-                log.info("Available after assign ports (new active {0}/{1})".format(
+                log.debug("Available after assign ports (new active {0}/{1})".format(
                     mgr_map['active_name'], mgr_map['active_gid']))
             return done
         cls.wait_until_true(is_available, timeout=30)
index 2e9035c94dfc166396b8a2da6af0214078c009fe..2cd0e776b0ef0fb2837cb0e7556f139fb6c8980e 100644 (file)
@@ -57,6 +57,11 @@ from teuthology.config import config as teuth_config
 from teuthology.contextutil import safe_while
 from teuthology.contextutil import MaxWhileTries
 from teuthology.orchestra.run import CommandFailedError
+try:
+    import urllib3
+    urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
+except:
+    pass
 
 def init_log():
     global log
@@ -232,16 +237,16 @@ class LocalRemoteProcess(object):
             return False
 
     def kill(self):
-        log.info("kill ")
+        log.debug("kill ")
         if self.subproc.pid and not self.finished:
-            log.info("kill: killing pid {0} ({1})".format(
+            log.debug("kill: killing pid {0} ({1})".format(
                 self.subproc.pid, self.args))
             if self.fuse_pid != -1:
                 safe_kill(self.fuse_pid)
             else:
                 safe_kill(self.subproc.pid)
         else:
-            log.info("kill: already terminated ({0})".format(self.args))
+            log.debug("kill: already terminated ({0})".format(self.args))
 
     @property
     def stdin(self):
@@ -420,7 +425,7 @@ class LocalRemote(object):
                     args[0]
                 ))
 
-        log.info('> ' +
+        log.debug('> ' +
                  ' '.join([str(a.value) if isinstance(a, Raw) else a for a in args]))
 
         if shell:
@@ -515,13 +520,13 @@ class LocalDaemon(object):
 
         for line in lines:
             if line.find("ceph-{0} -i {1}".format(self.daemon_type, self.daemon_id)) != -1:
-                log.info("Found ps line for daemon: {0}".format(line))
+                log.debug("Found ps line for daemon: {0}".format(line))
                 return int(line.split()[0])
         if opt_log_ps_output:
-            log.info("No match for {0} {1}: {2}".format(
+            log.debug("No match for {0} {1}: {2}".format(
                 self.daemon_type, self.daemon_id, ps_txt))
         else:
-            log.info("No match for {0} {1}".format(self.daemon_type,
+            log.debug("No match for {0} {1}".format(self.daemon_type,
                      self.daemon_id))
             return None
 
@@ -539,14 +544,14 @@ class LocalDaemon(object):
             return
 
         pid = self._get_pid()
-        log.info("Killing PID {0} for {1}.{2}".format(pid, self.daemon_type, self.daemon_id))
+        log.debug("Killing PID {0} for {1}.{2}".format(pid, self.daemon_type, self.daemon_id))
         os.kill(pid, signal.SIGTERM)
 
         waited = 0
         while pid is not None:
             new_pid = self._get_pid()
             if new_pid is not None and new_pid != pid:
-                log.info("Killing new PID {0}".format(new_pid))
+                log.debug("Killing new PID {0}".format(new_pid))
                 pid = new_pid
                 os.kill(pid, signal.SIGTERM)
 
@@ -576,7 +581,7 @@ class LocalDaemon(object):
 
         os.kill(self._get_pid(), sig)
         if not silent:
-            log.info("Sent signal {0} to {1}.{2}".format(sig, self.daemon_type, self.daemon_id))
+            log.debug("Sent signal {0} to {1}.{2}".format(sig, self.daemon_type, self.daemon_id))
 
 
 def safe_kill(pid):
@@ -633,9 +638,9 @@ class LocalKernelMount(KernelMount):
             # Previous mount existed, reuse the old name
             name = self.fs.name
         self.fs = LocalFilesystem(self.ctx, name=name)
-        log.info('Wait for MDS to reach steady state...')
+        log.debug('Wait for MDS to reach steady state...')
         self.fs.wait_for_daemons()
-        log.info('Ready to start {}...'.format(type(self).__name__))
+        log.debug('Ready to start {}...'.format(type(self).__name__))
 
     @property
     def _prefix(self):
@@ -767,9 +772,9 @@ class LocalFuseMount(FuseMount):
             # Previous mount existed, reuse the old name
             name = self.fs.name
         self.fs = LocalFilesystem(self.ctx, name=name)
-        log.info('Wait for MDS to reach steady state...')
+        log.debug('Wait for MDS to reach steady state...')
         self.fs.wait_for_daemons()
-        log.info('Ready to start {}...'.format(type(self).__name__))
+        log.debug('Ready to start {}...'.format(type(self).__name__))
 
     @property
     def _prefix(self):
@@ -838,7 +843,7 @@ class LocalFuseMount(FuseMount):
         # Before starting ceph-fuse process, note the contents of
         # /sys/fs/fuse/connections
         pre_mount_conns = list_connections()
-        log.info("Pre-mount connections: {0}".format(pre_mount_conns))
+        log.debug("Pre-mount connections: {0}".format(pre_mount_conns))
 
         cmdargs = []
         if self.using_namespace:
@@ -864,7 +869,7 @@ class LocalFuseMount(FuseMount):
         self.fuse_daemon = self.client_remote.run(args=cmdargs, wait=False,
             omit_sudo=False, stdout=mountcmd_stdout, stderr=mountcmd_stderr)
         self._set_fuse_daemon_pid(check_status)
-        log.info("Mounting client.{0} with pid "
+        log.debug("Mounting client.{0} with pid "
                  "{1}".format(self.client_id, self.fuse_daemon.subproc.pid))
 
         # Wait for the connection reference to appear in /sys
@@ -890,7 +895,7 @@ class LocalFuseMount(FuseMount):
                 ))
             post_mount_conns = list_connections()
 
-        log.info("Post-mount connections: {0}".format(post_mount_conns))
+        log.debug("Post-mount connections: {0}".format(post_mount_conns))
 
         # Record our fuse connection number so that we can use it when
         # forcing an unmount
@@ -951,7 +956,7 @@ class LocalCephManager(CephManager):
         # certain teuthology tests want to run tasks in parallel
         self.lock = threading.RLock()
 
-        self.log = lambda x: log.info(x)
+        self.log = lambda x: log.debug(x)
 
         # Don't bother constructing a map of pools: it should be empty
         # at test cluster start, and in any case it would be out of date
@@ -1105,7 +1110,7 @@ class LocalCephCluster(CephCluster):
             existing_str += "\n[{0}]\n".format(subsys)
             for key, val in kvs.items():
                 # Comment out existing instance if it exists
-                log.info("Searching for existing instance {0}/{1}".format(
+                log.debug("Searching for existing instance {0}/{1}".format(
                     key, subsys
                 ))
                 existing_section = re.search("^\[{0}\]$([\n]|[^\[])+".format(
@@ -1117,7 +1122,7 @@ class LocalCephCluster(CephCluster):
                     existing_val = re.search("^\s*[^#]({0}) =".format(key), section_str, re.MULTILINE)
                     if existing_val:
                         start = existing_section.start() + existing_val.start(1)
-                        log.info("Found string to replace at {0}".format(
+                        log.debug("Found string to replace at {0}".format(
                             start
                         ))
                         existing_str = existing_str[0:start] + "#" + existing_str[start:]
@@ -1190,7 +1195,7 @@ class LocalFilesystem(Filesystem, LocalMDSCluster):
 
         self.mds_ids = list(self.mds_ids)
 
-        log.info("Discovered MDS IDs: {0}".format(self.mds_ids))
+        log.debug("Discovered MDS IDs: {0}".format(self.mds_ids))
 
         self.mon_manager = LocalCephManager()
 
@@ -1269,7 +1274,7 @@ class LocalContext(object):
 
 
 def enumerate_methods(s):
-    log.info("e: {0}".format(s))
+    log.debug("e: {0}".format(s))
     for t in s._tests:
         if isinstance(t, suite.BaseTestSuite):
             for sub in enumerate_methods(t):
@@ -1280,15 +1285,15 @@ def enumerate_methods(s):
 
 def load_tests(modules, loader):
     if modules:
-        log.info("Executing modules: {0}".format(modules))
+        log.debug("Executing modules: {0}".format(modules))
         module_suites = []
         for mod_name in modules:
             # Test names like cephfs.test_auto_repair
             module_suites.append(loader.loadTestsFromName(mod_name))
-        log.info("Loaded: {0}".format(list(module_suites)))
+        log.debug("Loaded: {0}".format(list(module_suites)))
         return suite.TestSuite(module_suites)
     else:
-        log.info("Executing all cephfs tests")
+        log.debug("Executing all cephfs tests")
         return loader.discover(
             os.path.join(os.path.dirname(os.path.abspath(__file__)), "cephfs")
         )
@@ -1346,7 +1351,7 @@ def clear_old_log():
         with open(logpath, 'w') as logfile:
             logfile.write('')
         init_log()
-        log.info('logging in a fresh file now...')
+        log.debug('logging in a fresh file now...')
 
 
 class LogStream(object):
@@ -1729,7 +1734,7 @@ def exec_test():
             if not is_named:
                 victims.append((case, method))
 
-    log.info("Disabling {0} tests because of is_for_teuthology or needs_trimming".format(len(victims)))
+    log.debug("Disabling {0} tests because of is_for_teuthology or needs_trimming".format(len(victims)))
     for s, method in victims:
         s._tests.remove(method)