xref: /aosp_15_r20/external/autotest/site_utils/generate_test_report (revision 9c5db1993ded3edbeafc8092d69fe5de2ee02df7)
1#!/usr/bin/python3
2# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
3# Use of this source code is governed by a BSD-style license that can be
4# found in the LICENSE file.
5
6
7"""Parses and displays the contents of one or more autoserv result directories.
8
9This script parses the contents of one or more autoserv results folders and
10generates test reports.
11"""
12
13from __future__ import absolute_import
14from __future__ import division
15from __future__ import print_function
16
17import datetime
18import glob
19import json
20import logging
21import operator
22import optparse
23import os
24import re
25import six
26from six.moves import range
27import sys
28
29import common
30from autotest_lib.utils import terminal
31
32
33_STDOUT_IS_TTY = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()
34
35
36def Die(message_format, *args, **kwargs):
37    """Log a message and kill the current process.
38
39    @param message_format: string for logging.error.
40
41    """
42    logging.error(message_format, *args, **kwargs)
43    sys.exit(1)
44
45
46class CrashWaiver:
47    """Represents a crash that we want to ignore for now."""
48    def __init__(self, signals, deadline, url, person):
49        self.signals = signals
50        self.deadline = datetime.datetime.strptime(deadline, '%Y-%b-%d')
51        self.issue_url = url
52        self.suppressor = person
53
54# List of crashes which are okay to ignore. This list should almost always be
55# empty. If you add an entry, include the bug URL and your name, something like
56#     'crashy':CrashWaiver(
57#       ['sig 11'], '2011-Aug-18', 'http://crosbug/123456', 'developer'),
58
59_CRASH_ALLOWLIST = {
60}
61
62
63class ResultCollector(object):
64    """Collects status and performance data from an autoserv results dir."""
65
66    def __init__(self, collect_perf=True, collect_attr=False,
67                 collect_info=False, escape_error=False,
68                 allow_chrome_crashes=False):
69        """Initialize ResultsCollector class.
70
71        @param collect_perf: Should perf keyvals be collected?
72        @param collect_attr: Should attr keyvals be collected?
73        @param collect_info: Should info keyvals be collected?
74        @param escape_error: Escape error message text for tools.
75        @param allow_chrome_crashes: Treat Chrome crashes as non-fatal.
76
77        """
78        self._collect_perf = collect_perf
79        self._collect_attr = collect_attr
80        self._collect_info = collect_info
81        self._escape_error = escape_error
82        self._allow_chrome_crashes = allow_chrome_crashes
83
84    def _CollectPerf(self, testdir):
85        """Parses keyval file under testdir and return the perf keyval pairs.
86
87        @param testdir: autoserv test result directory path.
88
89        @return dict of perf keyval pairs.
90
91        """
92        if not self._collect_perf:
93            return {}
94        return self._CollectKeyval(testdir, 'perf')
95
96    def _CollectAttr(self, testdir):
97        """Parses keyval file under testdir and return the attr keyval pairs.
98
99        @param testdir: autoserv test result directory path.
100
101        @return dict of attr keyval pairs.
102
103        """
104        if not self._collect_attr:
105            return {}
106        return self._CollectKeyval(testdir, 'attr')
107
108    def _CollectKeyval(self, testdir, keyword):
109        """Parses keyval file under testdir.
110
111        If testdir contains a result folder, process the keyval file and return
112        a dictionary of perf keyval pairs.
113
114        @param testdir: The autoserv test result directory.
115        @param keyword: The keyword of keyval, either 'perf' or 'attr'.
116
117        @return If the perf option is disabled or the there's no keyval file
118                under testdir, returns an empty dictionary. Otherwise, returns
119                a dictionary of parsed keyvals. Duplicate keys are uniquified
120                by their instance number.
121
122        """
123        keyval = {}
124        keyval_file = os.path.join(testdir, 'results', 'keyval')
125        if not os.path.isfile(keyval_file):
126            return keyval
127
128        instances = {}
129
130        for line in open(keyval_file):
131            match = re.search(r'^(.+){%s}=(.+)$' % keyword, line)
132            if match:
133                key = match.group(1)
134                val = match.group(2)
135
136                # If the same key name was generated multiple times, uniquify
137                # all instances other than the first one by adding the instance
138                # count to the key name.
139                key_inst = key
140                instance = instances.get(key, 0)
141                if instance:
142                    key_inst = '%s{%d}' % (key, instance)
143                instances[key] = instance + 1
144
145                keyval[key_inst] = val
146
147        return keyval
148
149    def _CollectCrashes(self, status_raw):
150        """Parses status_raw file for crashes.
151
152        Saves crash details if crashes are discovered.  If an allowlist is
153        present, only records allowed crashes.
154
155        @param status_raw: The contents of the status.log or status file from
156                the test.
157
158        @return a list of crash entries to be reported.
159
160        """
161        crashes = []
162        regex = re.compile(
163                'Received crash notification for ([-\w]+).+ (sig \d+)')
164        chrome_regex = re.compile(r'^supplied_[cC]hrome|^chrome$')
165        for match in regex.finditer(status_raw):
166            w = _CRASH_ALLOWLIST.get(match.group(1))
167            if (self._allow_chrome_crashes and
168                    chrome_regex.match(match.group(1))):
169                print('@@@STEP_WARNINGS@@@')
170                print('%s crashed with %s' % (match.group(1), match.group(2)))
171            elif (w is not None and match.group(2) in w.signals and
172                        w.deadline > datetime.datetime.now()):
173                print('Ignoring crash in %s for waiver that expires %s' % (
174                        match.group(1), w.deadline.strftime('%Y-%b-%d')))
175            else:
176                crashes.append('%s %s' % match.groups())
177        return crashes
178
179    def _CollectInfo(self, testdir, custom_info):
180        """Parses *_info files under testdir/sysinfo/var/log.
181
182        If the sysinfo/var/log/*info files exist, save information that shows
183        hw, ec and bios version info.
184
185        This collection of extra info is disabled by default (this funtion is
186        a no-op).  It is enabled only if the --info command-line option is
187        explicitly supplied.  Normal job parsing does not supply this option.
188
189        @param testdir: The autoserv test result directory.
190        @param custom_info: Dictionary to collect detailed ec/bios info.
191
192        @return a dictionary of info that was discovered.
193
194        """
195        if not self._collect_info:
196            return {}
197        info = custom_info
198
199        sysinfo_dir = os.path.join(testdir, 'sysinfo', 'var', 'log')
200        for info_file, info_keys in six.iteritems(
201            {'ec_info.txt': ['fw_version'],
202             'bios_info.txt': ['fwid', 'hwid']}):
203            info_file_path = os.path.join(sysinfo_dir, info_file)
204            if not os.path.isfile(info_file_path):
205                continue
206            # Some example raw text that might be matched include:
207            #
208            # fw_version           | snow_v1.1.332-cf20b3e
209            # fwid = Google_Snow.2711.0.2012_08_06_1139 # Active firmware ID
210            # hwid = DAISY TEST A-A 9382                # Hardware ID
211            info_regex = re.compile(r'^(%s)\s*[|=]\s*(.*)' %
212                                    '|'.join(info_keys))
213            with open(info_file_path, 'r') as f:
214                for line in f:
215                    line = line.strip()
216                    line = line.split('#')[0]
217                    match = info_regex.match(line)
218                    if match:
219                        info[match.group(1)] = str(match.group(2)).strip()
220        return info
221
222    def _CollectEndTimes(self, status_raw, status_re='', is_end=True):
223        """Helper to match and collect timestamp and localtime.
224
225        Preferred to locate timestamp and localtime with an
226        'END GOOD test_name...' line.  However, aborted tests occasionally fail
227        to produce this line and then need to scrape timestamps from the 'START
228        test_name...' line.
229
230        @param status_raw: multi-line text to search.
231        @param status_re: status regex to seek (e.g. GOOD|FAIL)
232        @param is_end: if True, search for 'END' otherwise 'START'.
233
234        @return Tuple of timestamp, localtime retrieved from the test status
235                log.
236
237        """
238        timestamp = ''
239        localtime = ''
240
241        localtime_re = r'\w+\s+\w+\s+[:\w]+'
242        match_filter = (
243                r'^\s*%s\s+(?:%s).*timestamp=(\d*).*localtime=(%s).*$' % (
244                'END' if is_end else 'START', status_re, localtime_re))
245        matches = re.findall(match_filter, status_raw, re.MULTILINE)
246        if matches:
247            # There may be multiple lines with timestamp/localtime info.
248            # The last one found is selected because it will reflect the end
249            # time.
250            for i in range(len(matches)):
251                timestamp_, localtime_ = matches[-(i+1)]
252                if not timestamp or timestamp_ > timestamp:
253                    timestamp = timestamp_
254                    localtime = localtime_
255        return timestamp, localtime
256
257    def _CheckExperimental(self, testdir):
258        """Parses keyval file and return the value of `experimental`.
259
260        @param testdir: The result directory that has the keyval file.
261
262        @return The value of 'experimental', which is a boolean value indicating
263                whether it is an experimental test or not.
264
265        """
266        keyval_file = os.path.join(testdir, 'keyval')
267        if not os.path.isfile(keyval_file):
268            return False
269
270        with open(keyval_file) as f:
271            for line in f:
272                match = re.match(r'experimental=(.+)', line)
273                if match:
274                    return match.group(1) == 'True'
275            else:
276                return False
277
278    def _get_failure_msg_from_status(self, status_raw):
279        reason_tags = 'ABORT|ERROR|FAIL|WARN|TEST_NA'
280        match = re.search(r'^\t+(%s)\t(.+)' % (reason_tags),
281                          status_raw, re.MULTILINE)
282
283        error_msg = 'Reason Unknown'
284        if match:
285            failure_type = match.group(1)
286            reason = match.group(2).split('\t')[4]
287            if self._escape_error:
288                reason = re.escape(reason)
289            error_msg = ': '.join([failure_type, reason])
290
291        return error_msg
292
293    def _get_full_status(self, status_raw):
294        """Collect the full status of a test, and err msg if any.
295
296        This will grab the full status, rather than just pass/fail.
297        Additionally, if there is an err msg, it will be scraped as well.
298
299        @param status_raw: the status log, as a string.
300
301        @return The full status, and the err msg, if any.
302
303        """
304        status = 'Error'
305        if re.search(r'%s' % 'FAIL', status_raw):
306            status = 'Fail'
307        elif re.search(r'%s' % 'ERROR', status_raw):
308            status = 'Error'
309        elif re.search(r'%s' % 'ABORT', status_raw):
310            status = 'Abort'
311        elif re.search(r'%s' % 'WARN', status_raw):
312            status = 'Warn'
313        elif re.search(r'%s' % 'TEST_NA', status_raw):
314            status = 'Not Run'
315        elif re.search(r'GOOD.+completed successfully', status_raw):
316            status = 'Pass'
317            return status, None
318
319        return status, self._get_failure_msg_from_status(status_raw)
320
321    def _CollectResult(self, testdir, results, is_experimental=False):
322        """Collects results stored under testdir into a dictionary.
323
324        The presence/location of status files (status.log, status and
325        job_report.html) varies depending on whether the job is a simple
326        client test, simple server test, old-style suite or new-style
327        suite.  For example:
328        -In some cases a single job_report.html may exist but many times
329         multiple instances are produced in a result tree.
330        -Most tests will produce a status.log but client tests invoked
331         by a server test will only emit a status file.
332
333        The two common criteria that seem to define the presence of a
334        valid test result are:
335        1. Existence of a 'status.log' or 'status' file. Note that if both a
336             'status.log' and 'status' file exist for a test, the 'status' file
337             is always a subset of the 'status.log' fle contents.
338        2. Presence of a 'debug' directory.
339
340        In some cases multiple 'status.log' files will exist where the parent
341        'status.log' contains the contents of multiple subdirectory 'status.log'
342        files.  Parent and subdirectory 'status.log' files are always expected
343        to agree on the outcome of a given test.
344
345        The test results discovered from the 'status*' files are included
346        in the result dictionary.  The test directory name and a test directory
347        timestamp/localtime are saved to be used as sort keys for the results.
348
349        The value of 'is_experimental' is included in the result dictionary.
350
351        @param testdir: The autoserv test result directory.
352        @param results: A list to which a populated test-result-dictionary will
353                be appended if a status file is found.
354        @param is_experimental: A boolean value indicating whether the result
355                directory is for an experimental test.
356
357        """
358        status_file = os.path.join(testdir, 'status.log')
359        top_level = True
360
361        if not os.path.isfile(status_file):
362            status_file = os.path.join(testdir, 'status')
363            top_level = False
364            if not os.path.isfile(status_file):
365                return
366
367        # Status is True if GOOD, else False for all others.
368        status = False
369        error_msg = ''
370        status_raw = open(status_file, 'r').read()
371        failure_tags = 'ABORT|ERROR|FAIL'
372        warning_tag = 'WARN|TEST_NA'
373        failure = re.search(r'%s' % failure_tags, status_raw)
374        warning = re.search(r'%s' % warning_tag, status_raw) and not failure
375        good = (re.search(r'GOOD.+completed successfully', status_raw) and
376                             not (failure or warning))
377
378        # We'd like warnings to allow the tests to pass, but still gather info.
379        if good or warning:
380            status = True
381
382        if not good:
383            error_msg = self._get_failure_msg_from_status(status_raw)
384
385        # Grab the timestamp - can be used for sorting the test runs.
386        # Grab the localtime - may be printed to enable line filtering by date.
387        # Designed to match a line like this:
388        #   END GOOD testname ... timestamp=1347324321 localtime=Sep 10 17:45:21
389        status_re = r'GOOD|%s|%s' % (failure_tags, warning_tag)
390        endtimestamp, endlocaltime = self._CollectEndTimes(status_raw,
391                                                           status_re)
392        starttimestamp, startlocaltime = self._CollectEndTimes(status_raw,
393                                                               is_end=False)
394        # Hung tests will occasionally skip printing the END line so grab
395        # a default timestamp from the START line in those cases.
396        if not endtimestamp:
397            endtimestamp, endlocaltime = starttimestamp, startlocaltime
398
399        full_status = False
400        for r in results:
401            # Already logged results for this test.
402            if r['testdir'] in testdir:
403                full_status, err = None, None
404                break
405
406        if full_status is not None:
407            full_status, err = self._get_full_status(status_raw)
408
409        results.append({
410                'testdir': testdir,
411                'crashes': self._CollectCrashes(status_raw),
412                'status': status,
413                'error_msg': error_msg,
414                'localtime': endlocaltime,
415                'timestamp': endtimestamp,
416                'perf': self._CollectPerf(testdir),
417                'attr': self._CollectAttr(testdir),
418                'info': self._CollectInfo(testdir, {'localtime': endlocaltime,
419                                                    'timestamp': endtimestamp}),
420                'experimental': is_experimental,
421                'full_status': full_status,
422                'full_err': err,
423                'startlocaltime': startlocaltime,
424                'starttimestamp': starttimestamp
425                })
426
427    def RecursivelyCollectResults(self,
428                                  resdir,
429                                  parent_experimental_tag=False,
430                                  results=[]):
431        """Recursively collect results into a list of dictionaries.
432
433        Only recurses into directories that possess a 'debug' subdirectory
434        because anything else is not considered a 'test' directory.
435
436        The value of 'experimental' in keyval file is used to determine whether
437        the result is for an experimental test. If it is, all its sub
438        directories are considered to be experimental tests too.
439
440        @param resdir: results/test directory to parse results from and recurse
441                into.
442        @param parent_experimental_tag: A boolean value, used to keep track of
443                whether its parent directory is for an experimental test.
444
445        @return List of dictionaries of results.
446
447        """
448        is_experimental = (parent_experimental_tag or
449                           self._CheckExperimental(resdir))
450        self._CollectResult(resdir, results, is_experimental)
451        for testdir in glob.glob(os.path.join(resdir, '*')):
452            # Remove false positives that are missing a debug dir.
453            if not os.path.exists(os.path.join(testdir, 'debug')):
454                continue
455
456            self.RecursivelyCollectResults(testdir, is_experimental, results)
457        return results
458
459
460class ReportGenerator(object):
461    """Collects and displays data from autoserv results directories.
462
463    This class collects status and performance data from one or more autoserv
464    result directories and generates test reports.
465    """
466
467    _KEYVAL_INDENT = 2
468    _STATUS_STRINGS = {'hr': {'pass': '[  PASSED  ]', 'fail': '[  FAILED  ]'},
469                       'csv': {'pass': 'PASS', 'fail': 'FAIL'}}
470
471    def __init__(self, options, args):
472        self._options = options
473        self._args = args
474        self._color = terminal.Color(options.color)
475        self._results = []
476
477    def _CollectAllResults(self):
478        """Parses results into the self._results list.
479
480        Builds a list (self._results) where each entry is a dictionary of
481        result data from one test (which may contain other tests). Each
482        dictionary will contain values such as: test folder, status, localtime,
483        crashes, error_msg, perf keyvals [optional], info [optional].
484
485        """
486        collector = ResultCollector(
487                collect_perf=self._options.perf,
488                collect_attr=self._options.attr,
489                collect_info=self._options.info,
490                escape_error=self._options.escape_error,
491                allow_chrome_crashes=self._options.allow_chrome_crashes)
492
493        for resdir in self._args:
494            if not os.path.isdir(resdir):
495                Die('%r does not exist', resdir)
496            self._results.extend(collector.RecursivelyCollectResults(resdir))
497
498        if not self._results:
499            Die('no test directories found')
500
501    def _GenStatusString(self, status):
502        """Given a bool indicating success or failure, return the right string.
503
504        Also takes --csv into account, returns old-style strings if it is set.
505
506        @param status: True or False, indicating success or failure.
507
508        @return The appropriate string for printing..
509
510        """
511        success = 'pass' if status else 'fail'
512        if self._options.csv:
513            return self._STATUS_STRINGS['csv'][success]
514        return self._STATUS_STRINGS['hr'][success]
515
516    def _Indent(self, msg):
517        """Given a message, indents it appropriately.
518
519        @param msg: string to indent.
520        @return indented version of msg.
521
522        """
523        return ' ' * self._KEYVAL_INDENT + msg
524
525    def _GetTestColumnWidth(self):
526        """Returns the test column width based on the test data.
527
528        The test results are aligned by discovering the longest width test
529        directory name or perf key stored in the list of result dictionaries.
530
531        @return The width for the test column.
532
533        """
534        width = 0
535        for result in self._results:
536            width = max(width, len(result['testdir']))
537            perf = result.get('perf')
538            if perf:
539                perf_key_width = len(max(perf, key=len))
540                width = max(width, perf_key_width + self._KEYVAL_INDENT)
541        return width
542
543    def _PrintDashLine(self, width):
544        """Prints a line of dashes as a separator in output.
545
546        @param width: an integer.
547        """
548        if not self._options.csv:
549            print(''.ljust(width +
550                  len(self._STATUS_STRINGS['hr']['pass']), '-'))
551
552    def _PrintEntries(self, entries):
553        """Prints a list of strings, delimited based on --csv flag.
554
555        @param entries: a list of strings, entities to output.
556
557        """
558        delimiter = ',' if self._options.csv else ' '
559        print(delimiter.join(entries))
560
561    def _PrintErrors(self, test, error_msg):
562        """Prints an indented error message, unless the --csv flag is set.
563
564        @param test: the name of a test with which to prefix the line.
565        @param error_msg: a message to print.  None is allowed, but ignored.
566
567        """
568        if not self._options.csv and error_msg:
569            self._PrintEntries([test, self._Indent(error_msg)])
570
571    def _PrintErrorLogs(self, test, test_string):
572        """Prints the error log for |test| if --debug is set.
573
574        @param test: the name of a test suitable for embedding in a path
575        @param test_string: the name of a test with which to prefix the line.
576
577        """
578        if self._options.print_debug:
579            debug_file_regex = os.path.join(
580                    'results.', test, 'debug',
581                    '%s*.ERROR' % os.path.basename(test))
582            for path in glob.glob(debug_file_regex):
583                try:
584                    with open(path) as fh:
585                        for line in fh:
586                            # Ensure line is not just WS.
587                            if len(line.lstrip()) <=  0:
588                                continue
589                            self._PrintEntries(
590                                    [test_string, self._Indent(line.rstrip())])
591                except IOError:
592                    print('Could not open %s' % path)
593
594    def _PrintResultDictKeyVals(self, test_entry, result_dict):
595        """Formatted print a dict of keyvals like 'perf' or 'info'.
596
597        This function emits each keyval on a single line for uncompressed
598        review.  The 'perf' dictionary contains performance keyvals while the
599        'info' dictionary contains ec info, bios info and some test timestamps.
600
601        @param test_entry: The unique name of the test (dir) - matches other
602                test output.
603        @param result_dict: A dict of keyvals to be presented.
604
605        """
606        if not result_dict:
607            return
608        dict_keys = list(result_dict.keys())
609        dict_keys.sort()
610        width = self._GetTestColumnWidth()
611        for dict_key in dict_keys:
612            if self._options.csv:
613                key_entry = dict_key
614            else:
615                key_entry = dict_key.ljust(width - self._KEYVAL_INDENT)
616                key_entry = key_entry.rjust(width)
617            value_entry = self._color.Color(
618                    self._color.BOLD, result_dict[dict_key])
619            self._PrintEntries([test_entry, key_entry, value_entry])
620
621    def _GetSortedTests(self):
622        """Sort the test result dicts in preparation for results printing.
623
624        By default sorts the results directionaries by their test names.
625        However, when running long suites, it is useful to see if an early test
626        has wedged the system and caused the remaining tests to abort/fail. The
627        datetime-based chronological sorting allows this view.
628
629        Uses the --sort-chron command line option to control.
630
631        """
632        if self._options.sort_chron:
633            # Need to reverse sort the test dirs to ensure the suite folder
634            # shows at the bottom. Because the suite folder shares its datetime
635            # with the last test it shows second-to-last without the reverse
636            # sort first.
637            tests = sorted(self._results, key=operator.itemgetter('testdir'),
638                           reverse=True)
639            tests = sorted(tests, key=operator.itemgetter('timestamp'))
640        else:
641            tests = sorted(self._results, key=operator.itemgetter('testdir'))
642        return tests
643
644    # TODO(zamorzaev): reuse this method in _GetResultsForHTMLReport to avoid
645    # code copying.
646    def _GetDedupedResults(self):
647        """Aggregate results from multiple retries of the same test."""
648        deduped_results = {}
649        for test in self._GetSortedTests():
650            test_details_matched = re.search(r'(.*)results-(\d[0-9]*)-(.*)',
651                                             test['testdir'])
652            if not test_details_matched:
653                continue
654
655            log_dir, test_number, test_name = test_details_matched.groups()
656            if (test_name in deduped_results and
657                deduped_results[test_name].get('status')):
658                # Already have a successfull (re)try.
659                continue
660
661            deduped_results[test_name] = test
662        return list(deduped_results.values())
663
664    def _GetResultsForHTMLReport(self):
665        """Return cleaned results for HTML report.!"""
666        import copy
667        tests = copy.deepcopy(self._GetSortedTests())
668        pass_tag = "Pass"
669        fail_tag = "Fail"
670        na_tag = "NA"
671        count = 0
672        html_results = {}
673        for test_status in tests:
674            individual_tc_results = {}
675            test_details_matched = re.search(r'(.*)results-(\d[0-9]*)-(.*)',
676                                             test_status['testdir'])
677            if not test_details_matched:
678                continue
679            log_dir = test_details_matched.group(1)
680            test_number = test_details_matched.group(2)
681            test_name = test_details_matched.group(3)
682            if '/' in test_name:
683                test_name = test_name.split('/')[0]
684            if test_status['error_msg'] is None:
685                test_status['error_msg'] = ''
686            if test_name not in html_results:
687                count = count + 1
688                # Arranging the results in an order
689                individual_tc_results['status'] = test_status['status']
690                individual_tc_results['error_msg'] = test_status['error_msg']
691                individual_tc_results['s_no'] = count
692                individual_tc_results['crashes'] = test_status['crashes']
693
694                # Add <b> and </b> tag for the good format in the report.
695                individual_tc_results['attempts'] = \
696                    '<b>test_result_number: %s - %s</b> : %s' % (
697                        test_number, log_dir, test_status['error_msg'])
698                html_results[test_name] = individual_tc_results
699            else:
700
701                # If test found already then we are using the previous data
702                # instead of creating two different html rows. If existing
703                # status is False then needs to be updated
704                if html_results[test_name]['status'] is False:
705                    html_results[test_name]['status'] = test_status['status']
706                    html_results[test_name]['error_msg'] = test_status[
707                        'error_msg']
708                    html_results[test_name]['crashes'] = \
709                        html_results[test_name]['crashes'] + test_status[
710                            'crashes']
711                    html_results[test_name]['attempts'] = \
712                        html_results[test_name]['attempts'] + \
713                        '</br><b>test_result_number : %s - %s</b> : %s' % (
714                            test_number, log_dir, test_status['error_msg'])
715
716        # Re-formating the dictionary as s_no as key. So that we can have
717        # ordered data at the end
718        sorted_html_results = {}
719        for key in html_results.keys():
720            sorted_html_results[str(html_results[key]['s_no'])] = \
721                    html_results[key]
722            sorted_html_results[str(html_results[key]['s_no'])]['test'] = key
723
724        # Mapping the Test case status if True->Pass, False->Fail and if
725        # True and the error message then NA
726        for key in sorted_html_results.keys():
727            if sorted_html_results[key]['status']:
728                if sorted_html_results[key]['error_msg'] != '':
729                    sorted_html_results[key]['status'] = na_tag
730                else:
731                    sorted_html_results[key]['status'] = pass_tag
732            else:
733                sorted_html_results[key]['status'] = fail_tag
734
735        return sorted_html_results
736
737    def GenerateReportHTML(self):
738        """Generate clean HTMl report for the results."""
739
740        results = self._GetResultsForHTMLReport()
741        html_table_header = """ <th>S.No</th>
742                                <th>Test</th>
743                                <th>Status</th>
744                                <th>Error Message</th>
745                                <th>Crashes</th>
746                                <th>Attempts</th>
747                            """
748        passed_tests = len([key for key in results.keys() if results[key][
749                'status'].lower() == 'pass'])
750        failed_tests = len([key for key in results.keys() if results[key][
751            'status'].lower() == 'fail'])
752        na_tests = len([key for key in results.keys() if results[key][
753            'status'].lower() == 'na'])
754        total_tests = passed_tests + failed_tests + na_tests
755
756        # Sort the keys
757        ordered_keys = sorted([int(key) for key in results.keys()])
758        html_table_body = ''
759        for key in ordered_keys:
760            key = str(key)
761            if results[key]['status'].lower() == 'pass':
762                color = 'LimeGreen'
763            elif results[key]['status'].lower() == 'na':
764                color = 'yellow'
765            else:
766                color = 'red'
767            html_table_body = html_table_body + """<tr>
768                                                    <td>%s</td>
769                                                    <td>%s</td>
770                                                    <td
771                                                    style="background-color:%s;">
772                                                    %s</td>
773                                                    <td>%s</td>
774                                                    <td>%s</td>
775                                                    <td>%s</td></tr>""" % \
776                                                (key, results[key]['test'],
777                                                 color,
778                                                 results[key]['status'],
779                                                 results[key]['error_msg'],
780                                                 results[key]['crashes'],
781                                                 results[key]['attempts'])
782        html_page = """
783                        <!DOCTYPE html>
784                        <html lang="en">
785                        <head>
786                            <title>Automation Results</title>
787                            <meta charset="utf-8">
788                            <meta name="viewport" content="width=device-width,initial-scale=1">
789                            <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css">
790                            <script src="https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js"></script>
791                            <script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js"></script>
792                        </head>
793                        <body>
794                            <div class="container">
795                                <h2>Automation Report</h2>
796                                <table class="table table-bordered" border="1">
797                                    <thead>
798                                        <tr style="background-color:LightSkyBlue;">
799                                        \n%s
800                                        </tr>
801                                    </thead>
802                                    <tbody>
803                                    \n%s
804                                    </tbody>
805                                </table>
806                                <div class="row">
807                                    <div class="col-sm-4">Passed: <b>%d</b></div>
808                                    <div class="col-sm-4">Failed: <b>%d</b></div>
809                                    <div class="col-sm-4">NA: <b>%d</b></div>
810                                </div>
811                                <div class="row">
812                                    <div class="col-sm-4">Total: <b>%d</b></div>
813                                </div>
814                            </div>
815                        </body>
816                        </html>
817
818                """ % (html_table_header, html_table_body, passed_tests,
819                       failed_tests, na_tests, total_tests)
820        with open(os.path.join(self._options.html_report_dir,
821                               "test_report.html"), 'w') as html_file:
822            html_file.write(html_page)
823
824    def _GenerateReportText(self):
825        """Prints a result report to stdout.
826
827        Prints a result table to stdout. Each row of the table contains the
828        test result directory and the test result (PASS, FAIL). If the perf
829        option is enabled, each test entry is followed by perf keyval entries
830        from the test results.
831
832        """
833        tests = self._GetSortedTests()
834        width = self._GetTestColumnWidth()
835
836        crashes = {}
837        tests_pass = 0
838        self._PrintDashLine(width)
839
840        for result in tests:
841            testdir = result['testdir']
842            test_entry = testdir if self._options.csv else testdir.ljust(width)
843
844            status_entry = self._GenStatusString(result['status'])
845            if result['status']:
846                color = self._color.GREEN
847                # Change the color of 'PASSED' if the test run wasn't completely
848                # ok, so it's more obvious it isn't a pure pass.
849                if 'WARN' in result['error_msg']:
850                    color = self._color.YELLOW
851                elif 'TEST_NA' in result['error_msg']:
852                    color = self._color.MAGENTA
853                tests_pass += 1
854            else:
855                color = self._color.RED
856
857            test_entries = [test_entry, self._color.Color(color, status_entry)]
858
859            info = result.get('info', {})
860            info.update(result.get('attr', {}))
861            if self._options.csv and (self._options.info or self._options.attr):
862                if info:
863                    test_entries.extend(['%s=%s' % (k, info[k])
864                                        for k in sorted(info.keys())])
865                if not result['status'] and result['error_msg']:
866                    test_entries.append('reason="%s"' % result['error_msg'])
867
868            self._PrintEntries(test_entries)
869            self._PrintErrors(test_entry, result['error_msg'])
870
871            # Print out error log for failed tests.
872            if not result['status']:
873                self._PrintErrorLogs(testdir, test_entry)
874
875            # Emit the perf keyvals entries. There will be no entries if the
876            # --no-perf option is specified.
877            self._PrintResultDictKeyVals(test_entry, result['perf'])
878
879            # Determine that there was a crash during this test.
880            if result['crashes']:
881                for crash in result['crashes']:
882                    if not crash in crashes:
883                        crashes[crash] = set([])
884                    crashes[crash].add(testdir)
885
886            # Emit extra test metadata info on separate lines if not --csv.
887            if not self._options.csv:
888                self._PrintResultDictKeyVals(test_entry, info)
889
890        self._PrintDashLine(width)
891
892        if not self._options.csv:
893            total_tests = len(tests)
894            percent_pass = 100 * tests_pass / total_tests
895            pass_str = '%d/%d (%d%%)' % (tests_pass, total_tests, percent_pass)
896            print('Total PASS: ' +
897                  self._color.Color(self._color.BOLD, pass_str))
898
899        if self._options.crash_detection:
900            print('')
901            if crashes:
902                print(self._color.Color(self._color.RED,
903                                        'Crashes detected during testing:'))
904                self._PrintDashLine(width)
905
906                for crash_name, crashed_tests in sorted(six.iteritems(crashes)):
907                    print(self._color.Color(self._color.RED, crash_name))
908                    for crashed_test in crashed_tests:
909                        print(self._Indent(crashed_test))
910
911                self._PrintDashLine(width)
912                print(('Total unique crashes: ' +
913                       self._color.Color(self._color.BOLD, str(len(crashes)))))
914
915            # Sometimes the builders exit before these buffers are flushed.
916            sys.stderr.flush()
917            sys.stdout.flush()
918
919    def _test_name_from_dir(self, test_dir):
920        """Return the name from the test_dir.
921
922        Examples:
923        /tmp/test_that_latest/something/else/results-n-testname
924            returns `testname`
925
926        /tmp/TTL/something/results-n-test-name-here
927            returns `test-name-here`
928
929        """
930        test_name = test_dir.split('/')[-1]
931        return '-'.join(test_name.split('-')[2:])
932
933    def _translate_to_dict(self):
934        """Return the full_status, testname, and err to a json dict."""
935        res = {'tests': []}
936        for test_info in self._results:
937            if test_info['full_status'] is None:
938                continue
939            res['tests'].append(
940                {'verdict': test_info['full_status'],
941                 'testname': self._test_name_from_dir(test_info['testdir']),
942                 'errmsg': test_info['full_err'],
943                 'resultspath': test_info['testdir'],
944                 'starttime': test_info['starttimestamp'],
945                 'endtime': test_info['timestamp']
946                 })
947        return res
948
949    def _write_simple_json(self):
950        """Write the translated json results to results.json."""
951        if not self._options.html_report_dir:
952            return
953        json_results = self._translate_to_dict()
954        with open(os.path.join(self._options.html_report_dir,
955                               "results.json"), 'w') as wf:
956                json.dump(json_results, wf)
957
958    def Run(self):
959        """Runs report generation."""
960        self._CollectAllResults()
961        self._write_simple_json()
962        if not self._options.just_status_code:
963            self._GenerateReportText()
964            if self._options.html:
965                print("\nLogging the data into test_report.html file.")
966                try:
967                    self.GenerateReportHTML()
968                except Exception as e:
969                    print("Failed to generate HTML report %s" % str(e))
970        for d in self._GetDedupedResults():
971            if d['experimental'] and self._options.ignore_experimental_tests:
972                continue
973            if not d['status'] or (
974                    self._options.crash_detection and d['crashes']):
975                # When a test fails, but autotest doesn't crash, do not exit(1)
976                if not self._options.is_cft:
977                    sys.exit(1)
978
979
980def main():
981    usage = 'Usage: %prog [options] result-directories...'
982    parser = optparse.OptionParser(usage=usage)
983    parser.add_option('--color', dest='color', action='store_true',
984                      default=_STDOUT_IS_TTY,
985                      help='Use color for text reports [default if TTY stdout]'
986                      )
987    parser.add_option('--no-color', dest='color', action='store_false',
988                      help='Don\'t use color for text reports')
989    parser.add_option('--no-crash-detection', dest='crash_detection',
990                      action='store_false', default=True,
991                      help='Don\'t report crashes or error out when detected')
992    parser.add_option('--csv', dest='csv', action='store_true',
993                      help='Output test result in CSV format.  '
994                      'Implies --no-debug --no-crash-detection.')
995    parser.add_option('--html', dest='html', action='store_true',
996                      help='To generate HTML File.  '
997                           'Implies --no-debug --no-crash-detection.')
998    parser.add_option('--html-report-dir', dest='html_report_dir',
999                      action='store', default=None, help='Path to generate '
1000                                                          'html report')
1001    parser.add_option('--info', dest='info', action='store_true',
1002                      default=False,
1003                      help='Include info keyvals in the report')
1004    parser.add_option('--escape-error', dest='escape_error',
1005                      action='store_true', default=False,
1006                      help='Escape error message text for tools.')
1007    parser.add_option('--perf', dest='perf', action='store_true',
1008                      default=True,
1009                      help='Include perf keyvals in the report [default]')
1010    parser.add_option('--attr', dest='attr', action='store_true',
1011                      default=False,
1012                      help='Include attr keyvals in the report')
1013    parser.add_option('--no-perf', dest='perf', action='store_false',
1014                      help='Don\'t include perf keyvals in the report')
1015    parser.add_option('--sort-chron', dest='sort_chron', action='store_true',
1016                      default=False,
1017                      help='Sort results by datetime instead of by test name.')
1018    parser.add_option('--no-debug', dest='print_debug', action='store_false',
1019                      default=True,
1020                      help='Don\'t print out logs when tests fail.')
1021    parser.add_option('--allow_chrome_crashes',
1022                      dest='allow_chrome_crashes',
1023                      action='store_true', default=False,
1024                      help='Treat Chrome crashes as non-fatal.')
1025    parser.add_option('--ignore_experimental_tests',
1026                      dest='ignore_experimental_tests',
1027                      action='store_true', default=False,
1028                      help='If set, experimental test results will not '
1029                           'influence the exit code.')
1030    parser.add_option('--just_status_code',
1031                      dest='just_status_code',
1032                      action='store_true', default=False,
1033                      help='Skip generating a report, just return status code.')
1034    parser.add_option('--cft',
1035                      dest='is_cft',
1036                      action='store_true', default=False,
1037                      help='If set: will not return 1 on test failure')
1038
1039
1040    (options, args) = parser.parse_args()
1041
1042    if not args:
1043        parser.print_help()
1044        Die('no result directories provided')
1045
1046    if options.csv and (options.print_debug or options.crash_detection):
1047        Warning('Forcing --no-debug --no-crash-detection')
1048        options.print_debug = False
1049        options.crash_detection = False
1050
1051    report_options = ['color', 'csv', 'info', 'escape_error', 'perf', 'attr',
1052                      'sort_chron', 'print_debug', 'html', 'html_report_dir']
1053    if options.just_status_code and any(
1054        getattr(options, opt) for opt in report_options):
1055        Warning('Passed --just_status_code and incompatible options %s' %
1056                ' '.join(opt for opt in report_options if getattr(options,opt)))
1057
1058    generator = ReportGenerator(options, args)
1059    generator.Run()
1060
1061
1062if __name__ == '__main__':
1063    main()
1064