]> git-server-git.apps.pok.os.sepia.ceph.com Git - teuthology.git/commitdiff
Added task level log files and job tasks html report file in archive directory wip-sunil-rh
authorsunilkumarn417 <sunnagar@redhat.com>
Wed, 16 Sep 2020 13:21:41 +0000 (18:51 +0530)
committersunilkumarn417 <sunnagar@redhat.com>
Fri, 25 Sep 2020 09:11:18 +0000 (14:41 +0530)
And change max job time in worker.py.

Signed-off-by: sunilkumarn417 <sunnagar@redhat.com>
teuthology/run_tasks.py
teuthology/templates/task-results.html [new file with mode: 0644]
teuthology/worker.py

index 99e5a4abb72b003d83ed1c5455b6b46b9fba60ca..8fcd54510b9a656eb2f54f97793770ed79de83ee 100644 (file)
@@ -7,6 +7,7 @@ import types
 
 from copy import deepcopy
 from humanfriendly import format_timespan
+from jinja2 import Environment, FileSystemLoader, select_autoescape
 
 from teuthology.config import config as teuth_config
 from teuthology.exceptions import ConnectionLostError
@@ -15,7 +16,11 @@ from teuthology.misc import get_http_log_path, get_results_url
 from teuthology.sentry import get_client as get_sentry_client
 from teuthology.timer import Timer
 
+root_logger = logging.getLogger()
 log = logging.getLogger(__name__)
+formatter = logging.Formatter(
+        fmt=u'%(asctime)s.%(msecs)03d %(levelname)s:%(name)s:%(message)s',
+        datefmt='%Y-%m-%dT%H:%M:%S')
 
 
 def get_task(name):
@@ -63,6 +68,88 @@ def _import(from_package, module_name, task_name, fail_on_import_error=False):
     return module
 
 
+def add_file_handler(logger, level, log_formatter, filename):
+    """
+    Method to add handler
+    Args:
+        logger: logger
+        level : logging level
+        log_formatter: logging format
+        filename : destination file
+    Returns:
+        handler
+    """
+    handler = logging.FileHandler(filename)
+    handler.setLevel(level)
+    handler.setFormatter(log_formatter)
+    logger.addHandler(handler)
+    return handler
+
+
+def get_log_level(ctx):
+    """
+    Get log level
+    Args:
+        ctx: context object
+    Returns:
+        log level
+    """
+    return logging.DEBUG if ctx.verbose else logging.INFO
+
+
+def remove_log_handler(handler):
+    """
+    Remove log handler
+    Args:
+        handler: log handler obj
+    """
+    if handler:
+        root_logger.removeHandler(handler)
+
+
+def configure_logging(ctx, archive_path, directory):
+    """
+    Configure logging
+        1) create directory in archive path
+        2) get log level and log formatter
+    Args:
+        ctx: context object
+        archive_path: log file name
+        directory: directory name
+    Returns:
+        handler
+    """
+    # create tasks directory for task logs
+    log_path = os.path.join(archive_path, directory)
+    os.makedirs(log_path, exist_ok=True)
+
+    # configure log level and formatter
+    log_level = get_log_level(ctx)
+    return log_path, formatter, log_level
+
+
+def generate_unique_name(task_name, task_list):
+    """
+    Generate unique log file name
+    """
+    task_count = len([task_name for task in task_list
+                      if task.get('name').startswith(task_name)]) + 1
+    return "{}-{}.log".format(task_name, task_count), task_count
+
+
+def generate_result(all_tasks):
+    """
+    Display and Generate task results
+    """
+    mark = "=" * 25
+    if not all_tasks:
+        log.warning("No tasks found to display results")
+        return
+    log.info("\n{} Tasks Results {}\n".format(mark, mark))
+    for task in all_tasks:
+        log.info(task)
+
+
 def run_one_task(taskname, **kwargs):
     taskname = taskname.replace('-', '_')
     task = get_task(taskname)
@@ -71,7 +158,14 @@ def run_one_task(taskname, **kwargs):
 
 def run_tasks(tasks, ctx):
     archive_path = ctx.config.get('archive_path')
+    tasks_dir = "tasks"
+    log_path = str()
+    log_level = str()
+    log_formatter = str()
+
     if archive_path:
+        log_path, log_formatter, log_level = \
+            configure_logging(ctx, archive_path, tasks_dir)
         timer = Timer(
             path=os.path.join(archive_path, 'timing.yaml'),
             sync=True,
@@ -79,19 +173,55 @@ def run_tasks(tasks, ctx):
     else:
         timer = Timer()
     stack = []
+    all_tasks = []
     try:
         for taskdict in tasks:
             try:
                 ((taskname, config),) = taskdict.items()
             except (ValueError, AttributeError):
                 raise RuntimeError('Invalid task definition: %s' % taskdict)
+
+            # Task log creation
+            handler = str()
+            log_file_name, count = generate_unique_name(taskname, all_tasks)
+            task_log_file = os.path.join(log_path, log_file_name)
+            task_log_link = os.path.join(tasks_dir, log_file_name)
+            all_tasks.append(
+                {
+                    "name": taskname + "[%s]" % count if count > 1 else taskname,
+                    "log_file_name": task_log_link,
+                    "log_link": task_log_link,
+                    "log_file": task_log_file
+                }
+            )
+
+            if archive_path:
+                # generate unique log file name
+                handler = add_file_handler(root_logger, log_level,
+                                           log_formatter, task_log_file)
             log.info('Running task %s...', taskname)
+
             timer.mark('%s enter' % taskname)
             manager = run_one_task(taskname, ctx=ctx, config=config)
+
             if hasattr(manager, '__enter__'):
-                stack.append((taskname, manager))
+                stack.append((taskname, manager, log_file_name))
                 manager.__enter__()
+            log.info("No exceptions found : Pass")
+            all_tasks[-1].update({'status': 'PASS'})
+            remove_log_handler(handler)
     except BaseException as e:
+        all_tasks[-1].update({'status': 'FAIL'})
+        task = all_tasks[-1]
+        handler = str()
+
+        # Create failed task log file
+        if archive_path:
+            fail_log_file = os.path.join(log_path,
+                                         "failed-" + task.get('log_file_name'))
+            handler = add_file_handler(root_logger, log_level,
+                                       log_formatter, fail_log_file)
+
         if isinstance(e, ConnectionLostError):
             # Prevent connection issues being flagged as failures
             set_status(ctx.summary, 'dead')
@@ -153,6 +283,10 @@ def run_tasks(tasks, ctx):
                 emsg = 'Possible configuration error in yaml file'
                 log.error(emsg)
                 ctx.summary['failure_info'] = emsg
+        log.error(ctx.summary)
+
+        # Remove failed log file handler
+        remove_log_handler(handler)
     finally:
         try:
             exc_info = sys.exc_info()
@@ -164,9 +298,24 @@ def run_tasks(tasks, ctx):
                     .format(sleep_before_teardown))
                 notify_sleep_before_teardown(ctx, stack, sleep_before_teardown)
                 time.sleep(sleep_before_teardown)
+
+            # Remove tasks log file handlers
+            if archive_path:
+                for handler in root_logger.handlers:
+                    if isinstance(handler, logging.FileHandler):
+                        if log_path in handler.baseFilename:
+                            root_logger.removeHandler(handler)
+
             while stack:
-                taskname, manager = stack.pop()
-                log.debug('Unwinding manager %s', taskname)
+                # Append unwinding logs to same task log file(s)
+                taskname, manager, log_file_name = stack.pop()
+                handler = str()
+                if archive_path:
+                    log_file = os.path.join(log_path, log_file_name)
+                    handler = add_file_handler(root_logger, log_level,
+                                               log_formatter, log_file)
+
+                log.debug('\n\nUnwinding manager %s' % taskname)
                 timer.mark('%s exit' % taskname)
                 try:
                     suppress = manager.__exit__(*exc_info)
@@ -194,6 +343,9 @@ def run_tasks(tasks, ctx):
                     if suppress:
                         exc_info = (None, None, None)
 
+                # Remove task log file handler
+                remove_log_handler(handler)
+
             if exc_info != (None, None, None):
                 log.debug('Exception was not quenched, exiting: %s: %s',
                           exc_info[0].__name__, exc_info[1])
@@ -202,6 +354,41 @@ def run_tasks(tasks, ctx):
             # be careful about cyclic references
             del exc_info
         timer.mark("tasks complete")
+        generate_result(all_tasks)
+        build_tasks_results(archive_path, all_tasks)
+
+
+def build_tasks_results(archive_path, all_tasks):
+    """
+    Build task results html file
+    Args:
+        ctx: context object
+        archive_path: archive path
+        all_tasks: task results
+    """
+    if not archive_path:
+        log.warning("task results html file cannot be formed")
+        return
+
+    template_dir = os.path.join(os.path.dirname(__file__),
+                                'templates')
+
+    env = Environment(
+        loader=FileSystemLoader(template_dir),
+        autoescape=select_autoescape(['html', 'xml'])
+    )
+
+    template = env.get_template('task-results.html')
+
+    html = template.render(
+        test_results=all_tasks
+    )
+
+    html_fpath = os.path.join(archive_path, "result.html")
+
+    with open(html_fpath, "wt") as html_file:
+        html_file.write(html)
+
 
 def build_email_body(ctx, stack, sleep_time_sec):
     email_template_path = os.path.dirname(__file__) + \
@@ -214,7 +401,7 @@ def build_email_body(ctx, stack, sleep_time_sec):
     archive_path = ctx.config.get('archive_path')
     job_id = ctx.config.get('job_id')
     status = get_status(ctx.summary)
-    stack_path = '/'.join(task for task, _ in stack)
+    stack_path = '/'.join(task for task, _, _ in stack)
     suite_name=ctx.config.get('suite')
     sleep_date=time.time()
     sleep_date_str=time.strftime('%Y-%m-%d %H:%M:%S',
@@ -239,6 +426,7 @@ def build_email_body(ctx, stack, sleep_time_sec):
     )
     return (subject.strip(), body.strip())
 
+
 def notify_sleep_before_teardown(ctx, stack, sleep_time):
     email = ctx.config.get('email', None)
     if not email:
@@ -256,4 +444,3 @@ def notify_sleep_before_teardown(ctx, stack, sleep_time):
     smtp = smtplib.SMTP('localhost')
     smtp.sendmail(msg['From'], [msg['To']], msg.as_string())
     smtp.quit()
-
diff --git a/teuthology/templates/task-results.html b/teuthology/templates/task-results.html
new file mode 100644 (file)
index 0000000..b425831
--- /dev/null
@@ -0,0 +1,41 @@
+<!DOCTYPE html>
+<html lang="en">
+<head>
+    <meta charset="UTF-8">
+    <title></title>
+    <style>
+    table.full-table{
+        font-family: arial, sans-serif;
+        border-collapse: collapse;
+        width: 100%;
+    }
+
+    td, th {
+        border: 1px solid #dddddd;
+        text-align: left;
+        padding: 5px;
+    }
+    </style>
+</head>
+<body>
+    <h2 style="color: black;" >Test Job Result</h2>
+    <table class="full-table">
+        <tr>
+            <th style="color: blue">Task Name</th>
+            <th style="color: blue">Task Log</th>
+            <th style="color: blue">Status</th>
+        </tr>
+        {% for test in test_results %}
+            <tr>
+                <td>{{ test.name }}</td>
+                <td>{{ test.log_file }}</td>
+                {% if test.status == 'PASS' %}
+                    <td><a style="color: green;" href="{{ test['log_link'] }}">{{ test.status }}</a></td>
+                {% elif test.status == 'FAIL' %}
+                    <td><a style="color: red;" href="{{ test['log_link'] }}">{{ test.status }}</a></td>
+                {% endif %}
+            </tr>
+        {% endfor %}
+    </table>
+</body>
+</html>
index e7c1eb6ef7a8114c32fded41324f0fec8627de79..748584d3d7e294bb7361235c2b6c8357908b997c 100644 (file)
@@ -296,6 +296,14 @@ def run_job(job_config, teuth_bin_path, archive_dir, verbose):
 def run_with_watchdog(process, job_config):
     job_start_time = datetime.utcnow()
 
+    # change max job time for system tests
+    max_job_time = teuth_config.max_job_time
+    log.info("Job name : {}".format(job_config['name']))
+    if 'system-tests:' in job_config['name']:
+        max_job_time = 1800
+
+    log.info("max job time set to {}".format(max_job_time))
+
     # Only push the information that's relevant to the watchdog, to save db
     # load
     job_info = dict(
@@ -310,9 +318,10 @@ def run_with_watchdog(process, job_config):
         # Kill jobs that have been running longer than the global max
         run_time = datetime.utcnow() - job_start_time
         total_seconds = run_time.days * 60 * 60 * 24 + run_time.seconds
-        if total_seconds > teuth_config.max_job_time:
+        log.info("Total seconds elapsed : {} ".format(total_seconds))
+        if total_seconds > max_job_time:
             log.warning("Job ran longer than {max}s. Killing...".format(
-                max=teuth_config.max_job_time))
+                max=max_job_time))
             kill_job(job_info['name'], job_info['job_id'],
                      teuth_config.archive_base, job_config['owner'])