|  | #!/usr/bin/env python3 | 
|  | # vim: set syntax=python ts=4 : | 
|  | # Copyright (c) 2020 Intel Corporation | 
|  | # SPDX-License-Identifier: Apache-2.0 | 
|  | """Zephyr Sanity Tests | 
|  |  | 
|  | Also check the "User and Developer Guides" at https://docs.zephyrproject.org/ | 
|  |  | 
|  | This script scans for the set of unit test applications in the git | 
|  | repository and attempts to execute them. By default, it tries to | 
|  | build each test case on one platform per architecture, using a precedence | 
|  | list defined in an architecture configuration file, and if possible | 
|  | run the tests in any available emulators or simulators on the system. | 
|  |  | 
|  | Test cases are detected by the presence of a 'testcase.yaml' or a sample.yaml | 
|  | files in the application's project directory. This file may contain one or more | 
|  | blocks, each identifying a test scenario. The title of the block is a name for | 
|  | the test case, which only needs to be unique for the test cases specified in | 
|  | that testcase meta-data. The full canonical name for each test case is <path to | 
|  | test case>/<block>. | 
|  |  | 
|  | Each test block in the testcase meta data can define the following key/value | 
|  | pairs: | 
|  |  | 
|  | tags: <list of tags> (required) | 
|  | A set of string tags for the testcase. Usually pertains to | 
|  | functional domains but can be anything. Command line invocations | 
|  | of this script can filter the set of tests to run based on tag. | 
|  |  | 
|  | skip: <True|False> (default False) | 
|  | skip testcase unconditionally. This can be used for broken tests. | 
|  |  | 
|  | slow: <True|False> (default False) | 
|  | Don't build or run this test case unless --enable-slow was passed | 
|  | in on the command line. Intended for time-consuming test cases | 
|  | that are only run under certain circumstances, like daily | 
|  | builds. | 
|  |  | 
|  | extra_args: <list of extra arguments> | 
|  | Extra cache entries to pass to CMake when building or running the | 
|  | test case. | 
|  |  | 
|  | extra_configs: <list of extra configurations> | 
|  | Extra configuration options to be merged with a master prj.conf | 
|  | when building or running the test case. | 
|  |  | 
|  | build_only: <True|False> (default False) | 
|  | If true, don't try to run the test even if the selected platform | 
|  | supports it. | 
|  |  | 
|  | build_on_all: <True|False> (default False) | 
|  | If true, attempt to build test on all available platforms. | 
|  |  | 
|  | depends_on: <list of features> | 
|  | A board or platform can announce what features it supports, this option | 
|  | will enable the test only those platforms that provide this feature. | 
|  |  | 
|  | min_ram: <integer> | 
|  | minimum amount of RAM needed for this test to build and run. This is | 
|  | compared with information provided by the board metadata. | 
|  |  | 
|  | min_flash: <integer> | 
|  | minimum amount of ROM needed for this test to build and run. This is | 
|  | compared with information provided by the board metadata. | 
|  |  | 
|  | timeout: <number of seconds> | 
|  | Length of time to run test in emulator before automatically killing it. | 
|  | Default to 60 seconds. | 
|  |  | 
|  | arch_allow: <list of arches, such as x86, arm, arc> | 
|  | Set of architectures that this test case should only be run for. | 
|  |  | 
|  | arch_exclude: <list of arches, such as x86, arm, arc> | 
|  | Set of architectures that this test case should not run on. | 
|  |  | 
|  | platform_allow: <list of platforms> | 
|  | Set of platforms that this test case should only be run for. | 
|  |  | 
|  | platform_exclude: <list of platforms> | 
|  | Set of platforms that this test case should not run on. | 
|  |  | 
|  | extra_sections: <list of extra binary sections> | 
|  | When computing sizes, sanitycheck will report errors if it finds | 
|  | extra, unexpected sections in the Zephyr binary unless they are named | 
|  | here. They will not be included in the size calculation. | 
|  |  | 
|  | filter: <expression> | 
|  | Filter whether the testcase should be run by evaluating an expression | 
|  | against an environment containing the following values: | 
|  |  | 
|  | { ARCH : <architecture>, | 
|  | PLATFORM : <platform>, | 
|  | <all CONFIG_* key/value pairs in the test's generated defconfig>, | 
|  | <all DT_* key/value pairs in the test's generated device tree file>, | 
|  | <all CMake key/value pairs in the test's generated CMakeCache.txt file>, | 
|  | *<env>: any environment variable available | 
|  | } | 
|  |  | 
|  | The grammar for the expression language is as follows: | 
|  |  | 
|  | expression ::= expression "and" expression | 
|  | | expression "or" expression | 
|  | | "not" expression | 
|  | | "(" expression ")" | 
|  | | symbol "==" constant | 
|  | | symbol "!=" constant | 
|  | | symbol "<" number | 
|  | | symbol ">" number | 
|  | | symbol ">=" number | 
|  | | symbol "<=" number | 
|  | | symbol "in" list | 
|  | | symbol ":" string | 
|  | | symbol | 
|  |  | 
|  | list ::= "[" list_contents "]" | 
|  |  | 
|  | list_contents ::= constant | 
|  | | list_contents "," constant | 
|  |  | 
|  | constant ::= number | 
|  | | string | 
|  |  | 
|  |  | 
|  | For the case where expression ::= symbol, it evaluates to true | 
|  | if the symbol is defined to a non-empty string. | 
|  |  | 
|  | Operator precedence, starting from lowest to highest: | 
|  |  | 
|  | or (left associative) | 
|  | and (left associative) | 
|  | not (right associative) | 
|  | all comparison operators (non-associative) | 
|  |  | 
|  | arch_allow, arch_exclude, platform_allow, platform_exclude | 
|  | are all syntactic sugar for these expressions. For instance | 
|  |  | 
|  | arch_exclude = x86 arc | 
|  |  | 
|  | Is the same as: | 
|  |  | 
|  | filter = not ARCH in ["x86", "arc"] | 
|  |  | 
|  | The ':' operator compiles the string argument as a regular expression, | 
|  | and then returns a true value only if the symbol's value in the environment | 
|  | matches. For example, if CONFIG_SOC="stm32f107xc" then | 
|  |  | 
|  | filter = CONFIG_SOC : "stm.*" | 
|  |  | 
|  | Would match it. | 
|  |  | 
|  | The set of test cases that actually run depends on directives in the testcase | 
|  | filed and options passed in on the command line. If there is any confusion, | 
|  | running with -v or examining the discard report (sanitycheck_discard.csv) | 
|  | can help show why particular test cases were skipped. | 
|  |  | 
|  | Metrics (such as pass/fail state and binary size) for the last code | 
|  | release are stored in scripts/sanity_chk/sanity_last_release.csv. | 
|  | To update this, pass the --all --release options. | 
|  |  | 
|  | To load arguments from a file, write '+' before the file name, e.g., | 
|  | +file_name. File content must be one or more valid arguments separated by | 
|  | line break instead of white spaces. | 
|  |  | 
|  | Most everyday users will run with no arguments. | 
|  |  | 
|  | """ | 
|  |  | 
|  | import os | 
|  | import argparse | 
|  | import sys | 
|  | import logging | 
|  | import time | 
|  | import itertools | 
|  | import shutil | 
|  | from collections import OrderedDict | 
|  | import multiprocessing | 
|  | from itertools import islice | 
|  | import csv | 
|  | from colorama import Fore | 
|  | from pathlib import Path | 
|  |  | 
|  |  | 
|  | ZEPHYR_BASE = os.getenv("ZEPHYR_BASE") | 
|  | if not ZEPHYR_BASE: | 
|  | # This file has been zephyr/scripts/sanitycheck for years, | 
|  | # and that is not going to change anytime soon. Let the user | 
|  | # run this script as ./scripts/sanitycheck without making them | 
|  | # set ZEPHYR_BASE. | 
|  | ZEPHYR_BASE = str(Path(__file__).resolve().parents[1]) | 
|  |  | 
|  | # Propagate this decision to child processes. | 
|  | os.environ['ZEPHYR_BASE'] = ZEPHYR_BASE | 
|  |  | 
|  | print(f'ZEPHYR_BASE unset, using "{ZEPHYR_BASE}"') | 
|  |  | 
|  | try: | 
|  | from anytree import RenderTree, Node, find | 
|  | except ImportError: | 
|  | print("Install the anytree module to use the --test-tree option") | 
|  |  | 
|  | try: | 
|  | from tabulate import tabulate | 
|  | except ImportError: | 
|  | print("Install tabulate python module with pip to use --device-testing option.") | 
|  |  | 
|  | sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/sanity_chk")) | 
|  |  | 
|  | from sanitylib import HardwareMap, TestSuite, SizeCalculator, CoverageTool | 
|  |  | 
|  | logger = logging.getLogger('sanitycheck') | 
|  | logger.setLevel(logging.DEBUG) | 
|  |  | 
|  | def size_report(sc): | 
|  | logger.info(sc.filename) | 
|  | logger.info("SECTION NAME             VMA        LMA     SIZE  HEX SZ TYPE") | 
|  | for i in range(len(sc.sections)): | 
|  | v = sc.sections[i] | 
|  |  | 
|  | logger.info("%-17s 0x%08x 0x%08x %8d 0x%05x %-7s" % | 
|  | (v["name"], v["virt_addr"], v["load_addr"], v["size"], v["size"], | 
|  | v["type"])) | 
|  |  | 
|  | logger.info("Totals: %d bytes (ROM), %d bytes (RAM)" % | 
|  | (sc.rom_size, sc.ram_size)) | 
|  | logger.info("") | 
|  |  | 
|  |  | 
|  | def export_tests(filename, tests): | 
|  | with open(filename, "wt") as csvfile: | 
|  | fieldnames = ['section', 'subsection', 'title', 'reference'] | 
|  | cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep) | 
|  | for test in tests: | 
|  | data = test.split(".") | 
|  | if len(data) > 1: | 
|  | subsec = " ".join(data[1].split("_")).title() | 
|  | rowdict = { | 
|  | "section": data[0].capitalize(), | 
|  | "subsection": subsec, | 
|  | "title": test, | 
|  | "reference": test | 
|  | } | 
|  | cw.writerow(rowdict) | 
|  | else: | 
|  | logger.error("{} can't be exported: ".format(test)) | 
|  |  | 
|  |  | 
|  | def parse_arguments(): | 
|  | parser = argparse.ArgumentParser( | 
|  | description=__doc__, | 
|  | formatter_class=argparse.RawDescriptionHelpFormatter) | 
|  | parser.fromfile_prefix_chars = "+" | 
|  |  | 
|  | case_select = parser.add_argument_group("Test case selection", | 
|  | """ | 
|  | Artificially long but functional example: | 
|  | $ ./scripts/sanitycheck -v     \\ | 
|  | --testcase-root tests/ztest/base    \\ | 
|  | --testcase-root tests/kernel   \\ | 
|  | --test      tests/ztest/base/testing.ztest.verbose_0  \\ | 
|  | --test      tests/kernel/fifo/fifo_api/kernel.fifo.poll | 
|  |  | 
|  | "kernel.fifo.poll" is one of the test section names in | 
|  | __/fifo_api/testcase.yaml | 
|  | """) | 
|  |  | 
|  | parser.add_argument("--force-toolchain", action="store_true", | 
|  | help="Do not filter based on toolchain, use the set " | 
|  | " toolchain unconditionally") | 
|  | parser.add_argument( | 
|  | "-p", "--platform", action="append", | 
|  | help="Platform filter for testing. This option may be used multiple " | 
|  | "times. Testcases will only be built/run on the platforms " | 
|  | "specified. If this option is not used, then platforms marked " | 
|  | "as default in the platform metadata file will be chosen " | 
|  | "to build and test. ") | 
|  |  | 
|  | parser.add_argument("-P", "--exclude-platform", action="append", default=[], | 
|  | help="""Exclude platforms and do not build or run any tests | 
|  | on those platforms. This option can be called multiple times. | 
|  | """ | 
|  | ) | 
|  |  | 
|  | parser.add_argument( | 
|  | "-a", "--arch", action="append", | 
|  | help="Arch filter for testing. Takes precedence over --platform. " | 
|  | "If unspecified, test all arches. Multiple invocations " | 
|  | "are treated as a logical 'or' relationship") | 
|  | parser.add_argument( | 
|  | "-t", "--tag", action="append", | 
|  | help="Specify tags to restrict which tests to run by tag value. " | 
|  | "Default is to not do any tag filtering. Multiple invocations " | 
|  | "are treated as a logical 'or' relationship") | 
|  | parser.add_argument("-e", "--exclude-tag", action="append", | 
|  | help="Specify tags of tests that should not run. " | 
|  | "Default is to run all tests with all tags.") | 
|  | case_select.add_argument( | 
|  | "-f", | 
|  | "--only-failed", | 
|  | action="store_true", | 
|  | help="Run only those tests that failed the previous sanity check " | 
|  | "invocation.") | 
|  |  | 
|  | parser.add_argument( | 
|  | "--retry-failed", type=int, default=0, | 
|  | help="Retry failing tests again, up to the number of times specified.") | 
|  |  | 
|  | parser.add_argument( | 
|  | "--retry-interval", type=int, default=60, | 
|  | help="Retry failing tests after specified period of time.") | 
|  |  | 
|  | test_xor_subtest = case_select.add_mutually_exclusive_group() | 
|  |  | 
|  | test_xor_subtest.add_argument( | 
|  | "-s", "--test", action="append", | 
|  | help="Run only the specified test cases. These are named by " | 
|  | "<path/relative/to/Zephyr/base/section.name.in.testcase.yaml>") | 
|  |  | 
|  | test_xor_subtest.add_argument( | 
|  | "--sub-test", action="append", | 
|  | help="""Recursively find sub-test functions and run the entire | 
|  | test section where they were found, including all sibling test | 
|  | functions. Sub-tests are named by: | 
|  | section.name.in.testcase.yaml.function_name_without_test_prefix | 
|  | Example: kernel.fifo.poll.fifo_loop | 
|  | """) | 
|  |  | 
|  | parser.add_argument( | 
|  | "-l", "--all", action="store_true", | 
|  | help="Build/test on all platforms. Any --platform arguments " | 
|  | "ignored.") | 
|  |  | 
|  | parser.add_argument( | 
|  | "-o", "--report-dir", | 
|  | help="""Output reports containing results of the test run into the | 
|  | specified directory. | 
|  | The output will be both in CSV and JUNIT format | 
|  | (sanitycheck.csv and sanitycheck.xml). | 
|  | """) | 
|  |  | 
|  | parser.add_argument( | 
|  | "--report-name", | 
|  | help="""Create a report with a custom name. | 
|  | """) | 
|  |  | 
|  | parser.add_argument( | 
|  | "--report-suffix", | 
|  | help="""Add a suffix to all generated file names, for example to add a | 
|  | version or a commit ID. | 
|  | """) | 
|  |  | 
|  | parser.add_argument("--report-excluded", | 
|  | action="store_true", | 
|  | help="""List all tests that are never run based on current scope and | 
|  | coverage. If you are looking for accurate results, run this with | 
|  | --all, but this will take a while...""") | 
|  |  | 
|  | parser.add_argument("--compare-report", | 
|  | help="Use this report file for size comparison") | 
|  |  | 
|  | parser.add_argument( | 
|  | "-B", "--subset", | 
|  | help="Only run a subset of the tests, 1/4 for running the first 25%%, " | 
|  | "3/5 means run the 3rd fifth of the total. " | 
|  | "This option is useful when running a large number of tests on " | 
|  | "different hosts to speed up execution time.") | 
|  |  | 
|  | parser.add_argument( | 
|  | "-N", "--ninja", action="store_true", | 
|  | help="Use the Ninja generator with CMake") | 
|  |  | 
|  | parser.add_argument( | 
|  | "-y", "--dry-run", action="store_true", | 
|  | help="""Create the filtered list of test cases, but don't actually | 
|  | run them. Useful if you're just interested in the discard report | 
|  | generated for every run and saved in the specified output | 
|  | directory (sanitycheck_discard.csv). | 
|  | """) | 
|  |  | 
|  | parser.add_argument("--list-tags", action="store_true", | 
|  | help="list all tags in selected tests") | 
|  |  | 
|  | case_select.add_argument("--list-tests", action="store_true", | 
|  | help="""List of all sub-test functions recursively found in | 
|  | all --testcase-root arguments. Note different sub-tests can share | 
|  | the same section name and come from different directories. | 
|  | The output is flattened and reports --sub-test names only, | 
|  | not their directories. For instance net.socket.getaddrinfo_ok | 
|  | and net.socket.fd_set belong to different directories. | 
|  | """) | 
|  |  | 
|  | case_select.add_argument("--test-tree", action="store_true", | 
|  | help="""Output the testsuite in a tree form""") | 
|  |  | 
|  | case_select.add_argument("--list-test-duplicates", action="store_true", | 
|  | help="""List tests with duplicate identifiers. | 
|  | """) | 
|  |  | 
|  | parser.add_argument("--export-tests", action="store", | 
|  | metavar="FILENAME", | 
|  | help="Export tests case meta-data to a file in CSV format." | 
|  | "Test instances can be exported per target by supplying " | 
|  | "the platform name using --platform option. (tests for only " | 
|  | " one platform can be exported at a time)") | 
|  |  | 
|  | parser.add_argument("--timestamps", | 
|  | action="store_true", | 
|  | help="Print all messages with time stamps") | 
|  |  | 
|  | parser.add_argument( | 
|  | "-r", "--release", action="store_true", | 
|  | help="Update the benchmark database with the results of this test " | 
|  | "run. Intended to be run by CI when tagging an official " | 
|  | "release. This database is used as a basis for comparison " | 
|  | "when looking for deltas in metrics such as footprint") | 
|  |  | 
|  | parser.add_argument("-W", "--disable-warnings-as-errors", action="store_true", | 
|  | help="Treat warning conditions as errors") | 
|  |  | 
|  | parser.add_argument( | 
|  | "-v", | 
|  | "--verbose", | 
|  | action="count", | 
|  | default=0, | 
|  | help="Emit debugging information, call multiple times to increase " | 
|  | "verbosity") | 
|  | parser.add_argument( | 
|  | "-i", "--inline-logs", action="store_true", | 
|  | help="Upon test failure, print relevant log data to stdout " | 
|  | "instead of just a path to it") | 
|  | parser.add_argument("--log-file", metavar="FILENAME", action="store", | 
|  | help="log also to file") | 
|  | parser.add_argument( | 
|  | "-m", "--last-metrics", action="store_true", | 
|  | help="Instead of comparing metrics from the last --release, " | 
|  | "compare with the results of the previous sanity check " | 
|  | "invocation") | 
|  | parser.add_argument( | 
|  | "-u", | 
|  | "--no-update", | 
|  | action="store_true", | 
|  | help="do not update the results of the last run of the sanity " | 
|  | "checks") | 
|  |  | 
|  | parser.add_argument( | 
|  | "-G", | 
|  | "--integration", | 
|  | action="store_true", | 
|  | help="Run integration tests") | 
|  |  | 
|  | case_select.add_argument( | 
|  | "-F", | 
|  | "--load-tests", | 
|  | metavar="FILENAME", | 
|  | action="store", | 
|  | help="Load list of tests and platforms to be run from file.") | 
|  |  | 
|  | case_select.add_argument( | 
|  | "-E", | 
|  | "--save-tests", | 
|  | metavar="FILENAME", | 
|  | action="store", | 
|  | help="Append list of tests and platforms to be run to file.") | 
|  |  | 
|  | test_or_build = parser.add_mutually_exclusive_group() | 
|  | test_or_build.add_argument( | 
|  | "-b", "--build-only", action="store_true", | 
|  | help="Only build the code, do not execute any of it in QEMU") | 
|  |  | 
|  | test_or_build.add_argument( | 
|  | "--test-only", action="store_true", | 
|  | help="""Only run device tests with current artifacts, do not build | 
|  | the code""") | 
|  | parser.add_argument( | 
|  | "--cmake-only", action="store_true", | 
|  | help="Only run cmake, do not build or run.") | 
|  |  | 
|  | parser.add_argument( | 
|  | "-M", "--runtime-artifact-cleanup", action="store_true", | 
|  | help="Delete artifacts of passing tests.") | 
|  |  | 
|  | parser.add_argument( | 
|  | "-j", "--jobs", type=int, | 
|  | help="Number of jobs for building, defaults to number of CPU threads, " | 
|  | "overcommited by factor 2 when --build-only") | 
|  |  | 
|  | parser.add_argument( | 
|  | "--show-footprint", action="store_true", | 
|  | help="Show footprint statistics and deltas since last release." | 
|  | ) | 
|  | parser.add_argument( | 
|  | "-H", "--footprint-threshold", type=float, default=5, | 
|  | help="When checking test case footprint sizes, warn the user if " | 
|  | "the new app size is greater then the specified percentage " | 
|  | "from the last release. Default is 5. 0 to warn on any " | 
|  | "increase on app size") | 
|  | parser.add_argument( | 
|  | "-D", "--all-deltas", action="store_true", | 
|  | help="Show all footprint deltas, positive or negative. Implies " | 
|  | "--footprint-threshold=0") | 
|  | parser.add_argument( | 
|  | "-O", "--outdir", | 
|  | default=os.path.join(os.getcwd(), "sanity-out"), | 
|  | help="Output directory for logs and binaries. " | 
|  | "Default is 'sanity-out' in the current directory. " | 
|  | "This directory will be cleaned unless '--no-clean' is set. " | 
|  | "The '--clobber-output' option controls what cleaning does.") | 
|  | parser.add_argument( | 
|  | "-c", "--clobber-output", action="store_true", | 
|  | help="Cleaning the output directory will simply delete it instead " | 
|  | "of the default policy of renaming.") | 
|  | parser.add_argument( | 
|  | "-n", "--no-clean", action="store_true", | 
|  | help="Re-use the outdir before building. Will result in " | 
|  | "faster compilation since builds will be incremental.") | 
|  | case_select.add_argument( | 
|  | "-T", "--testcase-root", action="append", default=[], | 
|  | help="Base directory to recursively search for test cases. All " | 
|  | "testcase.yaml files under here will be processed. May be " | 
|  | "called multiple times. Defaults to the 'samples/' and " | 
|  | "'tests/' directories at the base of the Zephyr tree.") | 
|  |  | 
|  | board_root_list = ["%s/boards" % ZEPHYR_BASE, | 
|  | "%s/scripts/sanity_chk/boards" % ZEPHYR_BASE] | 
|  |  | 
|  | parser.add_argument( | 
|  | "-A", "--board-root", action="append", default=board_root_list, | 
|  | help="""Directory to search for board configuration files. All .yaml | 
|  | files in the directory will be processed. The directory should have the same | 
|  | structure in the main Zephyr tree: boards/<arch>/<board_name>/""") | 
|  |  | 
|  | parser.add_argument( | 
|  | "-z", "--size", action="append", | 
|  | help="Don't run sanity  checks. Instead, produce a report to " | 
|  | "stdout detailing RAM/ROM sizes on the specified filenames. " | 
|  | "All other command line arguments ignored.") | 
|  | parser.add_argument( | 
|  | "-S", "--enable-slow", action="store_true", | 
|  | help="Execute time-consuming test cases that have been marked " | 
|  | "as 'slow' in testcase.yaml. Normally these are only built.") | 
|  |  | 
|  | parser.add_argument( | 
|  | "-K", "--force-platform", action="store_true", | 
|  | help="""Force testing on selected platforms, | 
|  | even if they are excluded in the test configuration""" | 
|  | ) | 
|  |  | 
|  | parser.add_argument( | 
|  | "--disable-unrecognized-section-test", action="store_true", | 
|  | default=False, | 
|  | help="Skip the 'unrecognized section' test.") | 
|  | parser.add_argument("-R", "--enable-asserts", action="store_true", | 
|  | default=True, | 
|  | help="deprecated, left for compatibility") | 
|  | parser.add_argument("--disable-asserts", action="store_false", | 
|  | dest="enable_asserts", | 
|  | help="deprecated, left for compatibility") | 
|  | parser.add_argument("-Q", "--error-on-deprecations", action="store_false", | 
|  | help="Error on deprecation warnings.") | 
|  | parser.add_argument("--enable-size-report", action="store_true", | 
|  | help="Enable expensive computation of RAM/ROM segment sizes.") | 
|  |  | 
|  | parser.add_argument( | 
|  | "-x", "--extra-args", action="append", default=[], | 
|  | help="""Extra CMake cache entries to define when building test cases. | 
|  | May be called multiple times. The key-value entries will be | 
|  | prefixed with -D before being passed to CMake. | 
|  |  | 
|  | E.g | 
|  | "sanitycheck -x=USE_CCACHE=0" | 
|  | will translate to | 
|  | "cmake -DUSE_CCACHE=0" | 
|  |  | 
|  | which will ultimately disable ccache. | 
|  | """ | 
|  | ) | 
|  |  | 
|  | parser.add_argument( | 
|  | "--emulation-only", action="store_true", | 
|  | help="Only build and run emulation platforms") | 
|  |  | 
|  | parser.add_argument( | 
|  | "--device-testing", action="store_true", | 
|  | help="Test on device directly. Specify the serial device to " | 
|  | "use with the --device-serial option.") | 
|  |  | 
|  | parser.add_argument( | 
|  | "-X", "--fixture", action="append", default=[], | 
|  | help="Specify a fixture that a board might support") | 
|  |  | 
|  | serial = parser.add_mutually_exclusive_group() | 
|  | serial.add_argument("--device-serial", | 
|  | help="""Serial device for accessing the board | 
|  | (e.g., /dev/ttyACM0) | 
|  | """) | 
|  |  | 
|  | serial.add_argument("--device-serial-pty", | 
|  | help="""Script for controlling pseudoterminal. | 
|  | Sanitycheck believes that it interacts with a terminal | 
|  | when it actually interacts with the script. | 
|  |  | 
|  | E.g "sanitycheck --device-testing | 
|  | --device-serial-pty <script> | 
|  | """) | 
|  |  | 
|  | parser.add_argument("--generate-hardware-map", | 
|  | help="""Probe serial devices connected to this platform | 
|  | and create a hardware map file to be used with | 
|  | --device-testing | 
|  | """) | 
|  |  | 
|  | parser.add_argument("--persistent-hardware-map", action='store_true', | 
|  | help="""With --generate-hardware-map, tries to use | 
|  | persistent names for serial devices on platforms | 
|  | that support this feature (currently only Linux). | 
|  | """) | 
|  |  | 
|  | parser.add_argument("--hardware-map", | 
|  | help="""Load hardware map from a file. This will be used | 
|  | for testing on hardware that is listed in the file. | 
|  | """) | 
|  |  | 
|  | parser.add_argument("--pre-script", | 
|  | help="""specify a pre script. This will be executed | 
|  | before device handler open serial port and invoke runner. | 
|  | """) | 
|  |  | 
|  | parser.add_argument( | 
|  | "--west-flash", nargs='?', const=[], | 
|  | help="""Uses west instead of ninja or make to flash when running with | 
|  | --device-testing. Supports comma-separated argument list. | 
|  |  | 
|  | E.g "sanitycheck --device-testing --device-serial /dev/ttyACM0 | 
|  | --west-flash="--board-id=foobar,--erase" | 
|  | will translate to "west flash -- --board-id=foobar --erase" | 
|  |  | 
|  | NOTE: device-testing must be enabled to use this option. | 
|  | """ | 
|  | ) | 
|  | parser.add_argument( | 
|  | "--west-runner", | 
|  | help="""Uses the specified west runner instead of default when running | 
|  | with --west-flash. | 
|  |  | 
|  | E.g "sanitycheck --device-testing --device-serial /dev/ttyACM0 | 
|  | --west-flash --west-runner=pyocd" | 
|  | will translate to "west flash --runner pyocd" | 
|  |  | 
|  | NOTE: west-flash must be enabled to use this option. | 
|  | """ | 
|  | ) | 
|  |  | 
|  | valgrind_asan_group = parser.add_mutually_exclusive_group() | 
|  |  | 
|  | valgrind_asan_group.add_argument( | 
|  | "--enable-valgrind", action="store_true", | 
|  | help="""Run binary through valgrind and check for several memory access | 
|  | errors. Valgrind needs to be installed on the host. This option only | 
|  | works with host binaries such as those generated for the native_posix | 
|  | configuration and is mutual exclusive with --enable-asan. | 
|  | """) | 
|  |  | 
|  | valgrind_asan_group.add_argument( | 
|  | "--enable-asan", action="store_true", | 
|  | help="""Enable address sanitizer to check for several memory access | 
|  | errors. Libasan needs to be installed on the host. This option only | 
|  | works with host binaries such as those generated for the native_posix | 
|  | configuration and is mutual exclusive with --enable-valgrind. | 
|  | """) | 
|  |  | 
|  | parser.add_argument( | 
|  | "--enable-lsan", action="store_true", | 
|  | help="""Enable leak sanitizer to check for heap memory leaks. | 
|  | Libasan needs to be installed on the host. This option only | 
|  | works with host binaries such as those generated for the native_posix | 
|  | configuration and when --enable-asan is given. | 
|  | """) | 
|  |  | 
|  | parser.add_argument( | 
|  | "--enable-ubsan", action="store_true", | 
|  | help="""Enable undefined behavior sanitizer to check for undefined | 
|  | behaviour during program execution. It uses an optional runtime library | 
|  | to provide better error diagnostics. This option only works with host | 
|  | binaries such as those generated for the native_posix configuration. | 
|  | """) | 
|  |  | 
|  | parser.add_argument("--enable-coverage", action="store_true", | 
|  | help="Enable code coverage using gcov.") | 
|  |  | 
|  | parser.add_argument("-C", "--coverage", action="store_true", | 
|  | help="Generate coverage reports. Implies " | 
|  | "--enable-coverage.") | 
|  |  | 
|  | parser.add_argument("--coverage-platform", action="append", default=[], | 
|  | help="Plarforms to run coverage reports on. " | 
|  | "This option may be used multiple times. " | 
|  | "Default to what was selected with --platform.") | 
|  |  | 
|  | parser.add_argument("--gcov-tool", default=None, | 
|  | help="Path to the gcov tool to use for code coverage " | 
|  | "reports") | 
|  |  | 
|  | parser.add_argument("--coverage-tool", choices=['lcov', 'gcovr'], default='lcov', | 
|  | help="Tool to use to generate coverage report.") | 
|  |  | 
|  | return parser.parse_args() | 
|  |  | 
|  |  | 
|  | def main(): | 
|  | start_time = time.time() | 
|  |  | 
|  | options = parse_arguments() | 
|  | previous_results = None | 
|  | # Cleanup | 
|  | if options.no_clean or options.only_failed or options.test_only: | 
|  | if os.path.exists(options.outdir): | 
|  | print("Keeping artifacts untouched") | 
|  | elif options.last_metrics: | 
|  | ls = os.path.join(options.outdir, "sanitycheck.csv") | 
|  | if os.path.exists(ls): | 
|  | with open(ls, "r") as fp: | 
|  | previous_results = fp.read() | 
|  | else: | 
|  | sys.exit(f"Can't compare metrics with non existing file {ls}") | 
|  | elif os.path.exists(options.outdir): | 
|  | if options.clobber_output: | 
|  | print("Deleting output directory {}".format(options.outdir)) | 
|  | shutil.rmtree(options.outdir) | 
|  | else: | 
|  | for i in range(1, 100): | 
|  | new_out = options.outdir + ".{}".format(i) | 
|  | if not os.path.exists(new_out): | 
|  | print("Renaming output directory to {}".format(new_out)) | 
|  | shutil.move(options.outdir, new_out) | 
|  | break | 
|  |  | 
|  | previous_results_file = None | 
|  | os.makedirs(options.outdir, exist_ok=True) | 
|  | if options.last_metrics and previous_results: | 
|  | previous_results_file = os.path.join(options.outdir, "baseline.csv") | 
|  | with open(previous_results_file, "w") as fp: | 
|  | fp.write(previous_results) | 
|  |  | 
|  | # create file handler which logs even debug messages | 
|  | if options.log_file: | 
|  | fh = logging.FileHandler(options.log_file) | 
|  | else: | 
|  | fh = logging.FileHandler(os.path.join(options.outdir, "sanitycheck.log")) | 
|  |  | 
|  | fh.setLevel(logging.DEBUG) | 
|  |  | 
|  | # create console handler with a higher log level | 
|  | ch = logging.StreamHandler() | 
|  |  | 
|  | VERBOSE = options.verbose | 
|  | if VERBOSE > 1: | 
|  | ch.setLevel(logging.DEBUG) | 
|  | else: | 
|  | ch.setLevel(logging.INFO) | 
|  |  | 
|  | # create formatter and add it to the handlers | 
|  | if options.timestamps: | 
|  | formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') | 
|  | else: | 
|  | formatter = logging.Formatter('%(levelname)-7s - %(message)s') | 
|  |  | 
|  | formatter_file = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') | 
|  | ch.setFormatter(formatter) | 
|  | fh.setFormatter(formatter_file) | 
|  |  | 
|  | # add the handlers to logger | 
|  | logger.addHandler(ch) | 
|  | logger.addHandler(fh) | 
|  |  | 
|  | hwm = HardwareMap() | 
|  | if options.generate_hardware_map: | 
|  | hwm.scan_hw(persistent=options.persistent_hardware_map) | 
|  | hwm.write_map(options.generate_hardware_map) | 
|  | return | 
|  |  | 
|  | if not options.device_testing and options.hardware_map: | 
|  | hwm.load_hardware_map(options.hardware_map) | 
|  |  | 
|  | logger.info("Available devices:") | 
|  | table = [] | 
|  | hwm.dump(hwmap=hwm.connected_hardware, connected_only=True) | 
|  | return | 
|  |  | 
|  | if options.west_runner and not options.west_flash: | 
|  | logger.error("west-runner requires west-flash to be enabled") | 
|  | sys.exit(1) | 
|  |  | 
|  | if options.west_flash and not options.device_testing: | 
|  | logger.error("west-flash requires device-testing to be enabled") | 
|  | sys.exit(1) | 
|  |  | 
|  | if options.coverage: | 
|  | options.enable_coverage = True | 
|  |  | 
|  | if not options.coverage_platform: | 
|  | options.coverage_platform = options.platform | 
|  |  | 
|  | if options.size: | 
|  | for fn in options.size: | 
|  | size_report(SizeCalculator(fn, [])) | 
|  | sys.exit(0) | 
|  |  | 
|  | if options.subset: | 
|  | subset, sets = options.subset.split("/") | 
|  | if int(subset) > 0 and int(sets) >= int(subset): | 
|  | logger.info("Running only a subset: %s/%s" % (subset, sets)) | 
|  | else: | 
|  | logger.error("You have provided a wrong subset value: %s." % options.subset) | 
|  | return | 
|  |  | 
|  | if not options.testcase_root: | 
|  | options.testcase_root = [os.path.join(ZEPHYR_BASE, "tests"), | 
|  | os.path.join(ZEPHYR_BASE, "samples")] | 
|  |  | 
|  | if options.show_footprint or options.compare_report or options.release: | 
|  | options.enable_size_report = True | 
|  |  | 
|  | suite = TestSuite(options.board_root, options.testcase_root, options.outdir) | 
|  |  | 
|  | # Set testsuite options from command line. | 
|  | suite.build_only = options.build_only | 
|  | suite.cmake_only = options.cmake_only | 
|  | suite.cleanup = options.runtime_artifact_cleanup | 
|  | suite.test_only = options.test_only | 
|  | suite.enable_slow = options.enable_slow | 
|  | suite.device_testing = options.device_testing | 
|  | suite.fixtures = options.fixture | 
|  | suite.enable_asan = options.enable_asan | 
|  | suite.enable_lsan = options.enable_lsan | 
|  | suite.enable_ubsan = options.enable_ubsan | 
|  | suite.enable_coverage = options.enable_coverage | 
|  | suite.enable_valgrind = options.enable_valgrind | 
|  | suite.coverage_platform = options.coverage_platform | 
|  | suite.inline_logs = options.inline_logs | 
|  | suite.enable_size_report = options.enable_size_report | 
|  | suite.extra_args = options.extra_args | 
|  | suite.west_flash = options.west_flash | 
|  | suite.west_runner = options.west_runner | 
|  | suite.verbose = VERBOSE | 
|  | suite.warnings_as_errors = not options.disable_warnings_as_errors | 
|  | suite.integration = options.integration | 
|  |  | 
|  | if options.ninja: | 
|  | suite.generator_cmd = "ninja" | 
|  | suite.generator = "Ninja" | 
|  | else: | 
|  | suite.generator_cmd = "make" | 
|  | suite.generator = "Unix Makefiles" | 
|  |  | 
|  | # Set number of jobs | 
|  | if options.jobs: | 
|  | suite.jobs = options.jobs | 
|  | elif options.build_only: | 
|  | suite.jobs = multiprocessing.cpu_count() * 2 | 
|  | else: | 
|  | suite.jobs = multiprocessing.cpu_count() | 
|  | logger.info("JOBS: %d" % suite.jobs) | 
|  |  | 
|  | run_individual_tests = [] | 
|  |  | 
|  | if options.test: | 
|  | run_individual_tests = options.test | 
|  |  | 
|  | suite.add_testcases(testcase_filter=run_individual_tests) | 
|  | suite.add_configurations() | 
|  |  | 
|  | if options.device_testing: | 
|  | if options.hardware_map: | 
|  | hwm.load_hardware_map(options.hardware_map) | 
|  | suite.connected_hardware = hwm.connected_hardware | 
|  | if not options.platform: | 
|  | options.platform = [] | 
|  | for platform in hwm.connected_hardware: | 
|  | if platform['connected']: | 
|  | options.platform.append(platform['platform']) | 
|  |  | 
|  | elif options.device_serial or options.device_serial_pty: | 
|  | if options.platform and len(options.platform) == 1: | 
|  | if options.device_serial: | 
|  | hwm.load_device_from_cmdline(options.device_serial, | 
|  | options.platform[0], | 
|  | options.pre_script, | 
|  | False) | 
|  | else: | 
|  | hwm.load_device_from_cmdline(options.device_serial_pty, | 
|  | options.platform[0], | 
|  | options.pre_script, | 
|  | True) | 
|  |  | 
|  | suite.connected_hardware = hwm.connected_hardware | 
|  | else: | 
|  | logger.error("""When --device-testing is used with | 
|  | --device-serial or --device-serial-pty, | 
|  | only one platform is allowed""") | 
|  |  | 
|  | if suite.load_errors: | 
|  | sys.exit(1) | 
|  |  | 
|  | if options.list_tags: | 
|  | tags = set() | 
|  | for _, tc in suite.testcases.items(): | 
|  | tags = tags.union(tc.tags) | 
|  |  | 
|  | for t in tags: | 
|  | print("- {}".format(t)) | 
|  |  | 
|  | return | 
|  |  | 
|  | if not options.platform and (options.list_tests or options.test_tree or options.list_test_duplicates \ | 
|  | or options.sub_test or options.export_tests): | 
|  | cnt = 0 | 
|  | all_tests = suite.get_all_tests() | 
|  |  | 
|  | if options.export_tests: | 
|  | export_tests(options.export_tests, all_tests) | 
|  | return | 
|  |  | 
|  | if options.list_test_duplicates: | 
|  | import collections | 
|  | dupes = [item for item, count in collections.Counter(all_tests).items() if count > 1] | 
|  | if dupes: | 
|  | print("Tests with duplicate identifiers:") | 
|  | for dupe in dupes: | 
|  | print("- {}".format(dupe)) | 
|  | for dc in suite.get_testcase(dupe): | 
|  | print("  - {}".format(dc)) | 
|  | else: | 
|  | print("No duplicates found.") | 
|  | return | 
|  |  | 
|  | if options.sub_test: | 
|  | for st in options.sub_test: | 
|  | subtests = suite.get_testcase(st) | 
|  | for sti in subtests: | 
|  | run_individual_tests.append(sti.name) | 
|  |  | 
|  | if run_individual_tests: | 
|  | logger.info("Running the following tests:") | 
|  | for test in run_individual_tests: | 
|  | print(" - {}".format(test)) | 
|  | else: | 
|  | logger.info("Tests not found") | 
|  | return | 
|  |  | 
|  | elif options.list_tests or options.test_tree: | 
|  | if options.test_tree: | 
|  | testsuite = Node("Testsuite") | 
|  | samples = Node("Samples", parent=testsuite) | 
|  | tests = Node("Tests", parent=testsuite) | 
|  |  | 
|  | for test in sorted(all_tests): | 
|  | cnt = cnt + 1 | 
|  | if options.list_tests: | 
|  | print(" - {}".format(test)) | 
|  |  | 
|  | if options.test_tree: | 
|  | if test.startswith("sample."): | 
|  | sec = test.split(".") | 
|  | area = find(samples, lambda node: node.name == sec[1] and node.parent == samples) | 
|  | if not area: | 
|  | area = Node(sec[1], parent=samples) | 
|  |  | 
|  | t = Node(test, parent=area) | 
|  | else: | 
|  | sec = test.split(".") | 
|  | area = find(tests, lambda node: node.name == sec[0] and node.parent == tests) | 
|  | if not area: | 
|  | area = Node(sec[0], parent=tests) | 
|  |  | 
|  | if area and len(sec) > 2: | 
|  | subarea = find(area, lambda node: node.name == sec[1] and node.parent == area) | 
|  | if not subarea: | 
|  | subarea = Node(sec[1], parent=area) | 
|  |  | 
|  | t = Node(test, parent=subarea) | 
|  |  | 
|  | if options.list_tests: | 
|  | print("{} total.".format(cnt)) | 
|  |  | 
|  | if options.test_tree: | 
|  | for pre, _, node in RenderTree(testsuite): | 
|  | print("%s%s" % (pre, node.name)) | 
|  | return | 
|  |  | 
|  | discards = [] | 
|  |  | 
|  | if options.report_suffix: | 
|  | last_run = os.path.join(options.outdir, "sanitycheck_{}.csv".format(options.report_suffix)) | 
|  | else: | 
|  | last_run = os.path.join(options.outdir, "sanitycheck.csv") | 
|  |  | 
|  | if options.only_failed: | 
|  | suite.load_from_file(last_run, filter_status=['skipped', 'passed']) | 
|  | suite.selected_platforms = set(p.platform.name for p in suite.instances.values()) | 
|  | elif options.load_tests: | 
|  | suite.load_from_file(options.load_tests, filter_status=['skipped']) | 
|  | suite.selected_platforms = set(p.platform.name for p in suite.instances.values()) | 
|  | elif options.test_only: | 
|  | suite.load_from_file(last_run, filter_status=['skipped']) | 
|  | suite.selected_platforms = set(p.platform.name for p in suite.instances.values()) | 
|  | else: | 
|  | discards = suite.apply_filters( | 
|  | build_only=options.build_only, | 
|  | enable_slow=options.enable_slow, | 
|  | platform=options.platform, | 
|  | exclude_platform=options.exclude_platform, | 
|  | arch=options.arch, | 
|  | tag=options.tag, | 
|  | exclude_tag=options.exclude_tag, | 
|  | force_toolchain=options.force_toolchain, | 
|  | all=options.all, | 
|  | emulation_only=options.emulation_only, | 
|  | run_individual_tests=run_individual_tests, | 
|  | device_testing=options.device_testing, | 
|  | force_platform=options.force_platform | 
|  |  | 
|  | ) | 
|  |  | 
|  | if (options.export_tests or options.list_tests) and options.platform: | 
|  | if len(options.platform) > 1: | 
|  | logger.error("When exporting tests, only one platform " | 
|  | "should be specified.") | 
|  | return | 
|  |  | 
|  | for p in options.platform: | 
|  | inst = suite.get_platform_instances(p) | 
|  | if options.export_tests: | 
|  | tests = [x.testcase.cases for x in inst.values()] | 
|  | merged = list(itertools.chain(*tests)) | 
|  | export_tests(options.export_tests, merged) | 
|  | return | 
|  |  | 
|  | count = 0 | 
|  | for i in inst.values(): | 
|  | for c in i.testcase.cases: | 
|  | print(f"- {c}") | 
|  | count += 1 | 
|  |  | 
|  | print(f"Tests found: {count}") | 
|  | return | 
|  |  | 
|  | if VERBOSE > 1 and discards: | 
|  | # if we are using command line platform filter, no need to list every | 
|  | # other platform as excluded, we know that already. | 
|  | # Show only the discards that apply to the selected platforms on the | 
|  | # command line | 
|  |  | 
|  | for i, reason in discards.items(): | 
|  | if options.platform and i.platform.name not in options.platform: | 
|  | continue | 
|  | logger.debug( | 
|  | "{:<25} {:<50} {}SKIPPED{}: {}".format( | 
|  | i.platform.name, | 
|  | i.testcase.name, | 
|  | Fore.YELLOW, | 
|  | Fore.RESET, | 
|  | reason)) | 
|  |  | 
|  | if options.report_excluded: | 
|  | all_tests = suite.get_all_tests() | 
|  | to_be_run = set() | 
|  | for i, p in suite.instances.items(): | 
|  | to_be_run.update(p.testcase.cases) | 
|  |  | 
|  | if all_tests - to_be_run: | 
|  | print("Tests that never build or run:") | 
|  | for not_run in all_tests - to_be_run: | 
|  | print("- {}".format(not_run)) | 
|  |  | 
|  | return | 
|  |  | 
|  | if options.subset: | 
|  | suite.instances = OrderedDict(sorted(suite.instances.items(), | 
|  | key=lambda x: x[0][x[0].find("/") + 1:])) | 
|  |  | 
|  | subset, sets = options.subset.split("/") | 
|  | subset = int(subset) | 
|  | sets = int(sets) | 
|  | total = len(suite.instances) | 
|  | per_set = int(total / sets) | 
|  | num_extra_sets = total - (per_set * sets) | 
|  |  | 
|  | # Try and be more fair for rounding error with integer division | 
|  | # so the last subset doesn't get overloaded, we add 1 extra to | 
|  | # subsets 1..num_extra_sets. | 
|  | if subset <= num_extra_sets: | 
|  | start = (subset - 1) * (per_set + 1) | 
|  | end = start + per_set + 1 | 
|  | else: | 
|  | base = num_extra_sets * (per_set + 1) | 
|  | start = ((subset - num_extra_sets - 1) * per_set) + base | 
|  | end = start + per_set | 
|  |  | 
|  | sliced_instances = islice(suite.instances.items(), start, end) | 
|  | suite.instances = OrderedDict(sliced_instances) | 
|  |  | 
|  | if options.save_tests: | 
|  | suite.csv_report(options.save_tests) | 
|  | return | 
|  |  | 
|  | logger.info("%d test configurations selected, %d configurations discarded due to filters." % | 
|  | (len(suite.instances), len(discards))) | 
|  |  | 
|  | if options.device_testing: | 
|  | print("\nDevice testing on:") | 
|  | hwm.dump(suite.connected_hardware, suite.selected_platforms) | 
|  | print("") | 
|  |  | 
|  | if options.dry_run: | 
|  | duration = time.time() - start_time | 
|  | logger.info("Completed in %d seconds" % (duration)) | 
|  | return | 
|  |  | 
|  | retries = options.retry_failed + 1 | 
|  | completed = 0 | 
|  |  | 
|  | suite.update_counting() | 
|  | suite.start_time = start_time | 
|  |  | 
|  | while True: | 
|  | completed += 1 | 
|  |  | 
|  | if completed > 1: | 
|  | logger.info("%d Iteration:" % (completed)) | 
|  | time.sleep(options.retry_interval)  # waiting for the system to settle down | 
|  | suite.total_done = suite.total_tests - suite.total_failed | 
|  | suite.total_failed = suite.total_errors | 
|  |  | 
|  | suite.execute() | 
|  | print("") | 
|  |  | 
|  | retries = retries - 1 | 
|  | if retries == 0 or suite.total_failed == suite.total_errors: | 
|  | break | 
|  |  | 
|  |  | 
|  | # figure out which report to use for size comparison | 
|  | if options.compare_report: | 
|  | report_to_use = options.compare_report | 
|  | elif options.last_metrics: | 
|  | report_to_use = previous_results_file | 
|  | else: | 
|  | report_to_use = suite.RELEASE_DATA | 
|  |  | 
|  | suite.footprint_reports(report_to_use, | 
|  | options.show_footprint, | 
|  | options.all_deltas, | 
|  | options.footprint_threshold, | 
|  | options.last_metrics) | 
|  |  | 
|  | suite.duration = time.time() - start_time | 
|  | suite.update_counting() | 
|  | suite.summary(options.disable_unrecognized_section_test) | 
|  |  | 
|  | if options.coverage: | 
|  | if not options.gcov_tool: | 
|  | use_system_gcov = False | 
|  |  | 
|  | for plat in options.coverage_platform: | 
|  | ts_plat = suite.get_platform(plat) | 
|  | if ts_plat and (ts_plat.type in {"native", "unit"}): | 
|  | use_system_gcov = True | 
|  |  | 
|  | if use_system_gcov or "ZEPHYR_SDK_INSTALL_DIR" not in os.environ: | 
|  | options.gcov_tool = "gcov" | 
|  | else: | 
|  | options.gcov_tool = os.path.join(os.environ["ZEPHYR_SDK_INSTALL_DIR"], | 
|  | "x86_64-zephyr-elf/bin/x86_64-zephyr-elf-gcov") | 
|  |  | 
|  | logger.info("Generating coverage files...") | 
|  | coverage_tool = CoverageTool.factory(options.coverage_tool) | 
|  | coverage_tool.gcov_tool = options.gcov_tool | 
|  | coverage_tool.base_dir = ZEPHYR_BASE | 
|  | coverage_tool.add_ignore_file('generated') | 
|  | coverage_tool.add_ignore_directory('tests') | 
|  | coverage_tool.add_ignore_directory('samples') | 
|  | coverage_tool.generate(options.outdir) | 
|  |  | 
|  | if options.device_testing: | 
|  | print("\nHardware distribution summary:\n") | 
|  | table = [] | 
|  | header = ['Board', 'ID', 'Counter'] | 
|  | for p in hwm.connected_hardware: | 
|  | if p['connected'] and p['platform'] in suite.selected_platforms: | 
|  | row = [p['platform'], p.get('id', None), p['counter']] | 
|  | table.append(row) | 
|  | print(tabulate(table, headers=header, tablefmt="github")) | 
|  |  | 
|  | suite.save_reports(options.report_name, | 
|  | options.report_suffix, | 
|  | options.report_dir, | 
|  | options.no_update, | 
|  | options.release, | 
|  | options.only_failed) | 
|  |  | 
|  | if suite.total_failed or (suite.warnings and options.warnings_as_errors): | 
|  | sys.exit(1) | 
|  |  | 
|  |  | 
|  | if __name__ == "__main__": | 
|  | try: | 
|  | main() | 
|  | finally: | 
|  | if os.isatty(1): # stdout is interactive | 
|  | os.system("stty sane") |