|  | #!/usr/bin/env python3 | 
|  | """Zephyr Sanity Tests | 
|  |  | 
|  | This script scans for the set of unit test applications in the git | 
|  | repository and attempts to execute them. By default, it tries to | 
|  | build each test case on one platform per architecture, using a precedence | 
|  | list defined in an archtecture configuration file, and if possible | 
|  | run the tests in the QEMU emulator. | 
|  |  | 
|  | Test cases are detected by the presence of a 'testcase.ini' file in | 
|  | the application's project directory. This file may contain one or | 
|  | more blocks, each identifying a test scenario. The title of the block | 
|  | is a name for the test case, which only needs to be unique for the | 
|  | test cases specified in that testcase.ini file. The full canonical | 
|  | name for each test case is <path to test case under samples/>/<block>. | 
|  |  | 
|  | Each testcase.ini block can define the following key/value pairs: | 
|  |  | 
|  | tags = <list of tags> (required) | 
|  | A set of string tags for the testcase. Usually pertains to | 
|  | functional domains but can be anything. Command line invocations | 
|  | of this script can filter the set of tests to run based on tag. | 
|  |  | 
|  | skip = <True|False> (default False) | 
|  | skip testcase unconditionally. This can be used for broken tests. | 
|  |  | 
|  | slow = <True|False> (default False) | 
|  | Don't run this test case unless --enable-slow was passed in on the | 
|  | command line. Intended for time-consuming test cases that are only | 
|  | run under certain circumstances, like daily builds. These test cases | 
|  | are still compiled. | 
|  |  | 
|  | extra_args = <list of extra arguments> | 
|  | Extra arguments to pass to Make when building or running the | 
|  | test case. | 
|  |  | 
|  | build_only = <True|False> (default False) | 
|  | If true, don't try to run the test under QEMU even if the | 
|  | selected platform supports it. | 
|  |  | 
|  | timeout = <number of seconds> | 
|  | Length of time to run test in QEMU before automatically killing it. | 
|  | Default to 60 seconds. | 
|  |  | 
|  | arch_whitelist = <list of arches, such as x86, arm, arc> | 
|  | Set of architectures that this test case should only be run for. | 
|  |  | 
|  | arch_exclude = <list of arches, such as x86, arm, arc> | 
|  | Set of architectures that this test case should not run on. | 
|  |  | 
|  | platform_whitelist = <list of platforms> | 
|  | Set of platforms that this test case should only be run for. | 
|  |  | 
|  | platform_exclude = <list of platforms> | 
|  | Set of platforms that this test case should not run on. | 
|  |  | 
|  | extra_sections = <list of extra binary sections> | 
|  | When computing sizes, sanitycheck will report errors if it finds | 
|  | extra, unexpected sections in the Zephyr binary unless they are named | 
|  | here. They will not be included in the size calculation. | 
|  |  | 
|  | filter = <expression> | 
|  | Filter whether the testcase should be run by evaluating an expression | 
|  | against an environment containing the following values: | 
|  |  | 
|  | { ARCH : <architecture>, | 
|  | PLATFORM : <platform>, | 
|  | <all CONFIG_* key/value pairs in the test's generated defconfig>, | 
|  | *<env>: any environment variable available | 
|  | } | 
|  |  | 
|  | The grammar for the expression language is as follows: | 
|  |  | 
|  | expression ::= expression "and" expression | 
|  | | expression "or" expression | 
|  | | "not" expression | 
|  | | "(" expression ")" | 
|  | | symbol "==" constant | 
|  | | symbol "!=" constant | 
|  | | symbol "<" number | 
|  | | symbol ">" number | 
|  | | symbol ">=" number | 
|  | | symbol "<=" number | 
|  | | symbol "in" list | 
|  | | symbol ":" string | 
|  | | symbol | 
|  |  | 
|  | list ::= "[" list_contents "]" | 
|  |  | 
|  | list_contents ::= constant | 
|  | | list_contents "," constant | 
|  |  | 
|  | constant ::= number | 
|  | | string | 
|  |  | 
|  |  | 
|  | For the case where expression ::= symbol, it evaluates to true | 
|  | if the symbol is defined to a non-empty string. | 
|  |  | 
|  | Operator precedence, starting from lowest to highest: | 
|  |  | 
|  | or (left associative) | 
|  | and (left associative) | 
|  | not (right associative) | 
|  | all comparison operators (non-associative) | 
|  |  | 
|  | arch_whitelist, arch_exclude, platform_whitelist, platform_exclude | 
|  | are all syntactic sugar for these expressions. For instance | 
|  |  | 
|  | arch_exclude = x86 arc | 
|  |  | 
|  | Is the same as: | 
|  |  | 
|  | filter = not ARCH in ["x86", "arc"] | 
|  |  | 
|  | The ':' operator compiles the string argument as a regular expression, | 
|  | and then returns a true value only if the symbol's value in the environment | 
|  | matches. For example, if CONFIG_SOC="quark_se" then | 
|  |  | 
|  | filter = CONFIG_SOC : "quark.*" | 
|  |  | 
|  | Would match it. | 
|  |  | 
|  | Architectures and platforms are defined in an archtecture configuration | 
|  | file which are stored by default in scripts/sanity_chk/arches/. These | 
|  | each define an [arch] block with the following key/value pairs: | 
|  |  | 
|  | name = <arch name> | 
|  | The name of the arch. Example: x86 | 
|  |  | 
|  | platforms = <list of supported platforms in order of precedence> | 
|  | List of supported platforms for this arch. The ordering here | 
|  | is used to select a default platform to build for that arch. | 
|  |  | 
|  | For every platform defined, there must be a corresponding block for it | 
|  | in the arch configuration file. This block can be empty if there are | 
|  | no special definitions for that arch. Options are: | 
|  |  | 
|  | qemu_support = <True|False> (default False) | 
|  | Indicates whether binaries for this platform can run under QEMU | 
|  |  | 
|  | The set of test cases that actually run depends on directives in the | 
|  | testcase and architecture .ini file and options passed in on the command | 
|  | line. If there is any confusion, running with -v or --discard-report | 
|  | can help show why particular test cases were skipped. | 
|  |  | 
|  | Metrics (such as pass/fail state and binary size) for the last code | 
|  | release are stored in scripts/sanity_chk/sanity_last_release.csv. | 
|  | To update this, pass the --all --release options. | 
|  |  | 
|  | To load arguments from a file, write '+' before the file name, e.g., | 
|  | +file_name. File content must be one or more valid arguments separated by | 
|  | line break instead of white spaces. | 
|  |  | 
|  | Most everyday users will run with no arguments. | 
|  | """ | 
|  |  | 
|  | import argparse | 
|  | import os | 
|  | import sys | 
|  | import configparser | 
|  | import re | 
|  | import tempfile | 
|  | import subprocess | 
|  | import multiprocessing | 
|  | import select | 
|  | import shutil | 
|  | import signal | 
|  | import threading | 
|  | import time | 
|  | import csv | 
|  | import glob | 
|  | import concurrent | 
|  | import concurrent.futures | 
|  |  | 
|  | if "ZEPHYR_BASE" not in os.environ: | 
|  | sys.stderr.write("$ZEPHYR_BASE environment variable undefined.\n") | 
|  | exit(1) | 
|  | ZEPHYR_BASE = os.environ["ZEPHYR_BASE"] | 
|  |  | 
|  | sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/")) | 
|  |  | 
|  | import expr_parser | 
|  |  | 
|  | VERBOSE = 0 | 
|  | LAST_SANITY = os.path.join(ZEPHYR_BASE, "scripts", "sanity_chk", | 
|  | "last_sanity.csv") | 
|  | RELEASE_DATA = os.path.join(ZEPHYR_BASE, "scripts", "sanity_chk", | 
|  | "sanity_last_release.csv") | 
|  | CPU_COUNTS = multiprocessing.cpu_count() | 
|  |  | 
|  | if os.isatty(sys.stdout.fileno()): | 
|  | TERMINAL = True | 
|  | COLOR_NORMAL = '\033[0m' | 
|  | COLOR_RED = '\033[91m' | 
|  | COLOR_GREEN = '\033[92m' | 
|  | COLOR_YELLOW = '\033[93m' | 
|  | else: | 
|  | TERMINAL = False | 
|  | COLOR_NORMAL = "" | 
|  | COLOR_RED = "" | 
|  | COLOR_GREEN = "" | 
|  | COLOR_YELLOW = "" | 
|  |  | 
|  | class SanityCheckException(Exception): | 
|  | pass | 
|  |  | 
|  | class SanityRuntimeError(SanityCheckException): | 
|  | pass | 
|  |  | 
|  | class ConfigurationError(SanityCheckException): | 
|  | def __init__(self, cfile, message): | 
|  | self.cfile = cfile | 
|  | self.message = message | 
|  |  | 
|  | def __str__(self): | 
|  | return repr(self.cfile + ": " + self.message) | 
|  |  | 
|  | class MakeError(SanityCheckException): | 
|  | pass | 
|  |  | 
|  | class BuildError(MakeError): | 
|  | pass | 
|  |  | 
|  | class ExecutionError(MakeError): | 
|  | pass | 
|  |  | 
|  | log_file = None | 
|  |  | 
|  | # Debug Functions | 
|  | def info(what): | 
|  | sys.stdout.write(what + "\n") | 
|  | if log_file: | 
|  | log_file.write(what + "\n") | 
|  | log_file.flush() | 
|  |  | 
|  | def error(what): | 
|  | sys.stderr.write(COLOR_RED + what + COLOR_NORMAL + "\n") | 
|  | if log_file: | 
|  | log_file(what + "\n") | 
|  | log_file.flush() | 
|  |  | 
|  | def debug(what): | 
|  | if VERBOSE >= 1: | 
|  | info(what) | 
|  |  | 
|  | def verbose(what): | 
|  | if VERBOSE >= 2: | 
|  | info(what) | 
|  |  | 
|  | class Handler: | 
|  | RUN_PASSED = "PROJECT EXECUTION SUCCESSFUL" | 
|  | RUN_FAILED = "PROJECT EXECUTION FAILED" | 
|  | def __init__(self, name, outdir, log_fn, timeout, unit=False): | 
|  | """Constructor | 
|  |  | 
|  | @param name Arbitrary name of the created thread | 
|  | @param outdir Working directory, should be where qemu.pid gets created | 
|  | by kbuild | 
|  | @param log_fn Absolute path to write out QEMU's log data | 
|  | @param timeout Kill the QEMU process if it doesn't finish up within | 
|  | the given number of seconds | 
|  | """ | 
|  | self.lock = threading.Lock() | 
|  | self.state = "waiting" | 
|  | self.metrics = {} | 
|  | self.metrics["qemu_time"] = 0 | 
|  | self.metrics["ram_size"] = 0 | 
|  | self.metrics["rom_size"] = 0 | 
|  | self.unit = unit | 
|  |  | 
|  | def set_state(self, state, metrics): | 
|  | self.lock.acquire() | 
|  | self.state = state | 
|  | self.metrics.update(metrics) | 
|  | self.lock.release() | 
|  |  | 
|  | def get_state(self): | 
|  | self.lock.acquire() | 
|  | ret = (self.state, self.metrics) | 
|  | self.lock.release() | 
|  | return ret | 
|  |  | 
|  | class UnitHandler(Handler): | 
|  | def __init__(self, name, sourcedir, outdir, run_log, valgrind_log, timeout): | 
|  | """Constructor | 
|  |  | 
|  | @param name Arbitrary name of the created thread | 
|  | @param outdir Working directory containing the test binary | 
|  | @param run_log Absolute path to runtime logs | 
|  | @param valgrind Absolute path to valgrind's log | 
|  | @param timeout Kill the QEMU process if it doesn't finish up within | 
|  | the given number of seconds | 
|  | """ | 
|  | super().__init__(name, outdir, run_log, timeout, True) | 
|  |  | 
|  | self.timeout = timeout | 
|  | self.sourcedir = sourcedir | 
|  | self.outdir = outdir | 
|  | self.run_log = run_log | 
|  | self.valgrind_log = valgrind_log | 
|  | self.returncode = 0 | 
|  | self.set_state("running", {}) | 
|  |  | 
|  | def handle(self): | 
|  | out_state = "failed" | 
|  |  | 
|  | with open(self.run_log, "wt") as rl, open(self.valgrind_log, "wt") as vl: | 
|  | try: | 
|  | binary = os.path.join(self.outdir, "testbinary") | 
|  | command = [binary] | 
|  | if shutil.which("valgrind"): | 
|  | command = ["valgrind", "--error-exitcode=2", | 
|  | "--leak-check=full"] + command | 
|  | returncode = subprocess.call(command, timeout=self.timeout, | 
|  | stdout=rl, stderr=vl) | 
|  | self.returncode = returncode | 
|  | if returncode != 0: | 
|  | if self.returncode == 1: | 
|  | out_state = "failed" | 
|  | else: | 
|  | out_state = "failed valgrind" | 
|  | else: | 
|  | out_state = "passed" | 
|  | except subprocess.TimeoutExpired: | 
|  | out_state = "timeout" | 
|  | self.returncode = 1 | 
|  |  | 
|  | returncode = subprocess.call(["GCOV_PREFIX=" + self.outdir, "gcov", self.sourcedir, "-s", self.outdir], shell=True) | 
|  |  | 
|  | self.set_state(out_state, {}) | 
|  |  | 
|  | class QEMUHandler(Handler): | 
|  | """Spawns a thread to monitor QEMU output from pipes | 
|  |  | 
|  | We pass QEMU_PIPE to 'make qemu' and monitor the pipes for output. | 
|  | We need to do this as once qemu starts, it runs forever until killed. | 
|  | Test cases emit special messages to the console as they run, we check | 
|  | for these to collect whether the test passed or failed. | 
|  | """ | 
|  |  | 
|  | @staticmethod | 
|  | def _thread(handler, timeout, outdir, logfile, fifo_fn, pid_fn, results): | 
|  | fifo_in = fifo_fn + ".in" | 
|  | fifo_out = fifo_fn + ".out" | 
|  |  | 
|  | # These in/out nodes are named from QEMU's perspective, not ours | 
|  | if os.path.exists(fifo_in): | 
|  | os.unlink(fifo_in) | 
|  | os.mkfifo(fifo_in) | 
|  | if os.path.exists(fifo_out): | 
|  | os.unlink(fifo_out) | 
|  | os.mkfifo(fifo_out) | 
|  |  | 
|  | # We don't do anything with out_fp but we need to open it for | 
|  | # writing so that QEMU doesn't block, due to the way pipes work | 
|  | out_fp = open(fifo_in, "wb") | 
|  | # Disable internal buffering, we don't | 
|  | # want read() or poll() to ever block if there is data in there | 
|  | in_fp = open(fifo_out, "rb", buffering=0) | 
|  | log_out_fp = open(logfile, "wt") | 
|  |  | 
|  | start_time = time.time() | 
|  | timeout_time = start_time + timeout | 
|  | p = select.poll() | 
|  | p.register(in_fp, select.POLLIN) | 
|  |  | 
|  | metrics = {} | 
|  | line = "" | 
|  | while True: | 
|  | this_timeout = int((timeout_time - time.time()) * 1000) | 
|  | if this_timeout < 0 or not p.poll(this_timeout): | 
|  | out_state = "timeout" | 
|  | break | 
|  |  | 
|  | try: | 
|  | c = in_fp.read(1).decode("utf-8") | 
|  | except UnicodeDecodeError: | 
|  | # Test is writing something weird, fail | 
|  | out_state = "unexpected byte" | 
|  | break | 
|  |  | 
|  | if c == "": | 
|  | # EOF, this shouldn't happen unless QEMU crashes | 
|  | out_state = "unexpected eof" | 
|  | break | 
|  | line = line + c | 
|  | if c != "\n": | 
|  | continue | 
|  |  | 
|  | # line contains a full line of data output from QEMU | 
|  | log_out_fp.write(line) | 
|  | log_out_fp.flush() | 
|  | line = line.strip() | 
|  | verbose("QEMU: %s" % line) | 
|  |  | 
|  | if line == handler.RUN_PASSED: | 
|  | out_state = "passed" | 
|  | break | 
|  |  | 
|  | if line == handler.RUN_FAILED: | 
|  | out_state = "failed" | 
|  | break | 
|  |  | 
|  | # TODO: Add support for getting numerical performance data | 
|  | # from test cases. Will involve extending test case reporting | 
|  | # APIs. Add whatever gets reported to the metrics dictionary | 
|  | line = "" | 
|  |  | 
|  | metrics["qemu_time"] = time.time() - start_time | 
|  | verbose("QEMU complete (%s) after %f seconds" % | 
|  | (out_state, metrics["qemu_time"])) | 
|  | handler.set_state(out_state, metrics) | 
|  |  | 
|  | log_out_fp.close() | 
|  | out_fp.close() | 
|  | in_fp.close() | 
|  |  | 
|  | pid = int(open(pid_fn).read()) | 
|  | os.unlink(pid_fn) | 
|  | try: | 
|  | os.kill(pid, signal.SIGTERM) | 
|  | except ProcessLookupError: | 
|  | # Oh well, as long as it's dead! User probably sent Ctrl-C | 
|  | pass | 
|  |  | 
|  | os.unlink(fifo_in) | 
|  | os.unlink(fifo_out) | 
|  |  | 
|  | def __init__(self, name, outdir, log_fn, timeout): | 
|  | """Constructor | 
|  |  | 
|  | @param name Arbitrary name of the created thread | 
|  | @param outdir Working directory, should be where qemu.pid gets created | 
|  | by kbuild | 
|  | @param log_fn Absolute path to write out QEMU's log data | 
|  | @param timeout Kill the QEMU process if it doesn't finish up within | 
|  | the given number of seconds | 
|  | """ | 
|  | super().__init__(name, outdir, log_fn, timeout) | 
|  | self.results = {} | 
|  |  | 
|  | # We pass this to QEMU which looks for fifos with .in and .out | 
|  | # suffixes. | 
|  | self.fifo_fn = os.path.join(outdir, "qemu-fifo") | 
|  |  | 
|  | self.pid_fn = os.path.join(outdir, "qemu.pid") | 
|  | if os.path.exists(self.pid_fn): | 
|  | os.unlink(self.pid_fn) | 
|  |  | 
|  | self.log_fn = log_fn | 
|  | self.thread = threading.Thread(name=name, target=QEMUHandler._thread, | 
|  | args=(self, timeout, outdir, | 
|  | self.log_fn, self.fifo_fn, | 
|  | self.pid_fn, self.results)) | 
|  | self.thread.daemon = True | 
|  | verbose("Spawning QEMU process for %s" % name) | 
|  | self.thread.start() | 
|  |  | 
|  | def get_fifo(self): | 
|  | return self.fifo_fn | 
|  |  | 
|  | class SizeCalculator: | 
|  |  | 
|  | alloc_sections = ["bss", "noinit"] | 
|  | rw_sections = ["datas", "initlevel", "_k_task_list", "_k_event_list", | 
|  | "_k_memory_pool", "exceptions", "initshell", | 
|  | "_static_thread_area", "_k_timer_area", | 
|  | "_k_mem_slab_area", "_k_mem_pool_area", | 
|  | "_k_sem_area", "_k_mutex_area", "_k_alert_area", | 
|  | "_k_fifo_area", "_k_lifo_area", "_k_stack_area", | 
|  | "_k_msgq_area", "_k_mbox_area", "_k_pipe_area", | 
|  | "net_if", "net_if_event", "net_stack", "net_l2_data", | 
|  | "_k_queue_area"] | 
|  | # These get copied into RAM only on non-XIP | 
|  | ro_sections = ["text", "ctors", "init_array", "reset", | 
|  | "rodata", "devconfig", "net_l2", "vector"] | 
|  |  | 
|  | def __init__(self, filename, extra_sections): | 
|  | """Constructor | 
|  |  | 
|  | @param filename Path to the output binary | 
|  | The <filename> is parsed by objdump to determine section sizes | 
|  | """ | 
|  | # Make sure this is an ELF binary | 
|  | with open(filename, "rb") as f: | 
|  | magic = f.read(4) | 
|  |  | 
|  | if (magic != b'\x7fELF'): | 
|  | raise SanityRuntimeError("%s is not an ELF binary" % filename) | 
|  |  | 
|  | # Search for CONFIG_XIP in the ELF's list of symbols using NM and AWK. | 
|  | # GREP can not be used as it returns an error if the symbol is not found. | 
|  | is_xip_command = "nm " + filename + " | awk '/CONFIG_XIP/ { print $3 }'" | 
|  | is_xip_output = subprocess.check_output(is_xip_command, shell=True, | 
|  | stderr=subprocess.STDOUT).decode("utf-8").strip() | 
|  | if is_xip_output.endswith("no symbols"): | 
|  | raise SanityRuntimeError("%s has no symbol information" % filename) | 
|  | self.is_xip = (len(is_xip_output) != 0) | 
|  |  | 
|  | self.filename = filename | 
|  | self.sections = [] | 
|  | self.rom_size = 0 | 
|  | self.ram_size = 0 | 
|  | self.extra_sections = extra_sections | 
|  |  | 
|  | self._calculate_sizes() | 
|  |  | 
|  | def get_ram_size(self): | 
|  | """Get the amount of RAM the application will use up on the device | 
|  |  | 
|  | @return amount of RAM, in bytes | 
|  | """ | 
|  | return self.ram_size | 
|  |  | 
|  | def get_rom_size(self): | 
|  | """Get the size of the data that this application uses on device's flash | 
|  |  | 
|  | @return amount of ROM, in bytes | 
|  | """ | 
|  | return self.rom_size | 
|  |  | 
|  | def unrecognized_sections(self): | 
|  | """Get a list of sections inside the binary that weren't recognized | 
|  |  | 
|  | @return list of unrecogized section names | 
|  | """ | 
|  | slist = [] | 
|  | for v in self.sections: | 
|  | if not v["recognized"]: | 
|  | slist.append(v["name"]) | 
|  | return slist | 
|  |  | 
|  | def _calculate_sizes(self): | 
|  | """ Calculate RAM and ROM usage by section """ | 
|  | objdump_command = "objdump -h " + self.filename | 
|  | objdump_output = subprocess.check_output(objdump_command, | 
|  | shell=True).decode("utf-8").splitlines() | 
|  |  | 
|  | for line in objdump_output: | 
|  | words = line.split() | 
|  |  | 
|  | if (len(words) == 0):               # Skip lines that are too short | 
|  | continue | 
|  |  | 
|  | index = words[0] | 
|  | if (not index[0].isdigit()):        # Skip lines that do not start | 
|  | continue                        # with a digit | 
|  |  | 
|  | name = words[1]                     # Skip lines with section names | 
|  | if (name[0] == '.'):                # starting with '.' | 
|  | continue | 
|  |  | 
|  | # TODO this doesn't actually reflect the size in flash or RAM as | 
|  | # it doesn't include linker-imposed padding between sections. | 
|  | # It is close though. | 
|  | size = int(words[2], 16) | 
|  | if size == 0: | 
|  | continue | 
|  |  | 
|  | load_addr = int(words[4], 16) | 
|  | virt_addr = int(words[3], 16) | 
|  |  | 
|  | # Add section to memory use totals (for both non-XIP and XIP scenarios) | 
|  | # Unrecognized section names are not included in the calculations. | 
|  | recognized = True | 
|  | if name in SizeCalculator.alloc_sections: | 
|  | self.ram_size += size | 
|  | stype = "alloc" | 
|  | elif name in SizeCalculator.rw_sections: | 
|  | self.ram_size += size | 
|  | self.rom_size += size | 
|  | stype = "rw" | 
|  | elif name in SizeCalculator.ro_sections: | 
|  | self.rom_size += size | 
|  | if not self.is_xip: | 
|  | self.ram_size += size | 
|  | stype = "ro" | 
|  | else: | 
|  | stype = "unknown" | 
|  | if name not in self.extra_sections: | 
|  | recognized = False | 
|  |  | 
|  | self.sections.append({"name" : name, "load_addr" : load_addr, | 
|  | "size" : size, "virt_addr" : virt_addr, | 
|  | "type" : stype, "recognized" : recognized}) | 
|  |  | 
|  |  | 
|  | class MakeGoal: | 
|  | """Metadata class representing one of the sub-makes called by MakeGenerator | 
|  |  | 
|  | MakeGenerator returns a dictionary of these which can then be associdated | 
|  | with TestInstances to get a complete picture of what happened during a test. | 
|  | MakeGenerator is used for tasks outside of building tests (such as | 
|  | defconfigs) which is why MakeGoal is a separate class from TestInstance. | 
|  | """ | 
|  | def __init__(self, name, text, qemu, make_log, build_log, run_log, | 
|  | qemu_log): | 
|  | self.name = name | 
|  | self.text = text | 
|  | self.qemu = qemu | 
|  | self.make_log = make_log | 
|  | self.build_log = build_log | 
|  | self.run_log = run_log | 
|  | self.qemu_log = qemu_log | 
|  | self.make_state = "waiting" | 
|  | self.failed = False | 
|  | self.finished = False | 
|  | self.reason = None | 
|  | self.metrics = {} | 
|  |  | 
|  | def get_error_log(self): | 
|  | if self.make_state == "waiting": | 
|  | # Shouldn't ever see this; breakage in the main Makefile itself. | 
|  | return self.make_log | 
|  | elif self.make_state == "building": | 
|  | # Failure when calling the sub-make to build the code | 
|  | return self.build_log | 
|  | elif self.make_state == "running": | 
|  | # Failure in sub-make for "make qemu", qemu probably failed to start | 
|  | return self.run_log | 
|  | elif self.make_state == "finished": | 
|  | # QEMU finished, but timed out or otherwise wasn't successful | 
|  | return self.qemu_log | 
|  |  | 
|  | def fail(self, reason): | 
|  | self.failed = True | 
|  | self.finished = True | 
|  | self.reason = reason | 
|  |  | 
|  | def success(self): | 
|  | self.finished = True | 
|  |  | 
|  | def __str__(self): | 
|  | if self.finished: | 
|  | if self.failed: | 
|  | return "[%s] failed (%s: see %s)" % (self.name, self.reason, | 
|  | self.get_error_log()) | 
|  | else: | 
|  | return "[%s] passed" % self.name | 
|  | else: | 
|  | return "[%s] in progress (%s)" % (self.name, self.make_state) | 
|  |  | 
|  |  | 
|  | class MakeGenerator: | 
|  | """Generates a Makefile which just calls a bunch of sub-make sessions | 
|  |  | 
|  | In any given test suite we may need to build dozens if not hundreds of | 
|  | test cases. The cleanest way to parallelize this is to just let Make | 
|  | do the parallelization, sharing the jobserver among all the different | 
|  | sub-make targets. | 
|  | """ | 
|  |  | 
|  | GOAL_HEADER_TMPL = """.PHONY: {goal} | 
|  | {goal}: | 
|  | """ | 
|  |  | 
|  | MAKE_RULE_TMPL = """\t@echo sanity_test_{phase} {goal} >&2 | 
|  | \t$(MAKE) -C {directory} O={outdir} V={verb} EXTRA_CFLAGS="-Werror {cflags}" EXTRA_ASMFLAGS=-Wa,--fatal-warnings EXTRA_LDFLAGS=--fatal-warnings {args} >{logfile} 2>&1 | 
|  | """ | 
|  |  | 
|  | GOAL_FOOTER_TMPL = "\t@echo sanity_test_finished {goal} >&2\n\n" | 
|  |  | 
|  | re_make = re.compile("sanity_test_([A-Za-z0-9]+) (.+)|$|make[:] \*\*\* \[(.+:.+: )?(.+)\] Error.+$") | 
|  |  | 
|  | def __init__(self, base_outdir, asserts=False,  deprecations=False, ccache=0): | 
|  | """MakeGenerator constructor | 
|  |  | 
|  | @param base_outdir Intended to be the base out directory. A make.log | 
|  | file will be created here which contains the output of the | 
|  | top-level Make session, as well as the dynamic control Makefile | 
|  | @param verbose If true, pass V=1 to all the sub-makes which greatly | 
|  | increases their verbosity | 
|  | """ | 
|  | self.goals = {} | 
|  | if not os.path.exists(base_outdir): | 
|  | os.makedirs(base_outdir) | 
|  | self.logfile = os.path.join(base_outdir, "make.log") | 
|  | self.makefile = os.path.join(base_outdir, "Makefile") | 
|  | self.asserts = asserts | 
|  | self.deprecations = deprecations | 
|  | self.ccache = ccache | 
|  |  | 
|  | def _get_rule_header(self, name): | 
|  | return MakeGenerator.GOAL_HEADER_TMPL.format(goal=name) | 
|  |  | 
|  | def _get_sub_make(self, name, phase, workdir, outdir, logfile, args): | 
|  | verb = "1" if VERBOSE else "0" | 
|  | args = " ".join(args) | 
|  |  | 
|  | if self.asserts: | 
|  | cflags="-DCONFIG_ASSERT=1 -D__ASSERT_ON=2" | 
|  | else: | 
|  | cflags="" | 
|  |  | 
|  | if self.deprecations: | 
|  | cflags = cflags + "  -Wno-deprecated-declarations" | 
|  |  | 
|  | if self.ccache: | 
|  | args = args + " USE_CCACHE=1" | 
|  |  | 
|  | return MakeGenerator.MAKE_RULE_TMPL.format(phase=phase, goal=name, | 
|  | outdir=outdir, cflags=cflags, | 
|  | directory=workdir, verb=verb, | 
|  | args=args, logfile=logfile) | 
|  |  | 
|  | def _get_rule_footer(self, name): | 
|  | return MakeGenerator.GOAL_FOOTER_TMPL.format(goal=name) | 
|  |  | 
|  | def _add_goal(self, outdir): | 
|  | if not os.path.exists(outdir): | 
|  | os.makedirs(outdir) | 
|  |  | 
|  | def add_build_goal(self, name, directory, outdir, args): | 
|  | """Add a goal to invoke a Kbuild session | 
|  |  | 
|  | @param name A unique string name for this build goal. The results | 
|  | dictionary returned by execute() will be keyed by this name. | 
|  | @param directory Absolute path to working directory, will be passed | 
|  | to make -C | 
|  | @param outdir Absolute path to output directory, will be passed to | 
|  | Kbuild via -O=<path> | 
|  | @param args Extra command line arguments to pass to 'make', typically | 
|  | environment variables or specific Make goals | 
|  | """ | 
|  | self._add_goal(outdir) | 
|  | build_logfile = os.path.join(outdir, "build.log") | 
|  | text = (self._get_rule_header(name) + | 
|  | self._get_sub_make(name, "building", directory, | 
|  | outdir, build_logfile, args) + | 
|  | self._get_rule_footer(name)) | 
|  | self.goals[name] = MakeGoal(name, text, None, self.logfile, build_logfile, | 
|  | None, None) | 
|  |  | 
|  | def add_qemu_goal(self, name, directory, outdir, args, timeout=30): | 
|  | """Add a goal to build a Zephyr project and then run it under QEMU | 
|  |  | 
|  | The generated make goal invokes Make twice, the first time it will | 
|  | build the default goal, and the second will invoke the 'qemu' goal. | 
|  | The output of the QEMU session will be monitored, and terminated | 
|  | either upon pass/fail result of the test program, or the timeout | 
|  | is reached. | 
|  |  | 
|  | @param name A unique string name for this build goal. The results | 
|  | dictionary returned by execute() will be keyed by this name. | 
|  | @param directory Absolute path to working directory, will be passed | 
|  | to make -C | 
|  | @param outdir Absolute path to output directory, will be passed to | 
|  | Kbuild via -O=<path> | 
|  | @param args Extra command line arguments to pass to 'make', typically | 
|  | environment variables. Do not pass specific Make goals here. | 
|  | @param timeout Maximum length of time QEMU session should be allowed | 
|  | to run before automatically killing it. Default is 30 seconds. | 
|  | """ | 
|  |  | 
|  | self._add_goal(outdir) | 
|  | build_logfile = os.path.join(outdir, "build.log") | 
|  | run_logfile = os.path.join(outdir, "run.log") | 
|  | qemu_logfile = os.path.join(outdir, "qemu.log") | 
|  |  | 
|  | q = QEMUHandler(name, outdir, qemu_logfile, timeout) | 
|  | args.append("QEMU_PIPE=%s" % q.get_fifo()) | 
|  | text = (self._get_rule_header(name) + | 
|  | self._get_sub_make(name, "building", directory, | 
|  | outdir, build_logfile, args) + | 
|  | self._get_sub_make(name, "running", directory, | 
|  | outdir, run_logfile, | 
|  | args + ["run"]) + | 
|  | self._get_rule_footer(name)) | 
|  | self.goals[name] = MakeGoal(name, text, q, self.logfile, build_logfile, | 
|  | run_logfile, qemu_logfile) | 
|  |  | 
|  | def add_unit_goal(self, name, directory, outdir, args, timeout=30, coverage=False): | 
|  | self._add_goal(outdir) | 
|  | build_logfile = os.path.join(outdir, "build.log") | 
|  | run_logfile = os.path.join(outdir, "run.log") | 
|  | qemu_logfile = os.path.join(outdir, "qemu.log") | 
|  | valgrind_logfile = os.path.join(outdir, "valgrind.log") | 
|  | if coverage: | 
|  | args += ["COVERAGE=1"] | 
|  |  | 
|  | # we handle running in the UnitHandler class | 
|  | text = (self._get_rule_header(name) + | 
|  | self._get_sub_make(name, "building", directory, | 
|  | outdir, build_logfile, args) + | 
|  | self._get_rule_footer(name)) | 
|  | q = UnitHandler(name, directory, outdir, run_logfile, valgrind_logfile, timeout) | 
|  | self.goals[name] = MakeGoal(name, text, q, self.logfile, build_logfile, | 
|  | run_logfile, valgrind_logfile) | 
|  |  | 
|  |  | 
|  | def add_test_instance(self, ti, build_only=False, enable_slow=False, coverage=False, | 
|  | extra_args=[]): | 
|  | """Add a goal to build/test a TestInstance object | 
|  |  | 
|  | @param ti TestInstance object to build. The status dictionary returned | 
|  | by execute() will be keyed by its .name field. | 
|  | """ | 
|  | args = ti.test.extra_args[:] | 
|  | args.extend(["ARCH=%s" % ti.platform.arch.name, | 
|  | "BOARD=%s" % ti.platform.name]) | 
|  | args.extend(extra_args) | 
|  | if (ti.platform.qemu_support and (not ti.build_only) and | 
|  | (not build_only) and (enable_slow or not ti.test.slow)): | 
|  | self.add_qemu_goal(ti.name, ti.test.code_location, ti.outdir, | 
|  | args, ti.test.timeout) | 
|  | elif ti.test.type == "unit": | 
|  | self.add_unit_goal(ti.name, ti.test.code_location, ti.outdir, | 
|  | args, ti.test.timeout, coverage) | 
|  | else: | 
|  | self.add_build_goal(ti.name, ti.test.code_location, ti.outdir, args) | 
|  |  | 
|  | def execute(self, callback_fn=None, context=None): | 
|  | """Execute all the registered build goals | 
|  |  | 
|  | @param callback_fn If not None, a callback function will be called | 
|  | as individual goals transition between states. This function | 
|  | should accept two parameters: a string state and an arbitrary | 
|  | context object, supplied here | 
|  | @param context Context object to pass to the callback function. | 
|  | Type and semantics are specific to that callback function. | 
|  | @return A dictionary mapping goal names to final status. | 
|  | """ | 
|  |  | 
|  | with open(self.makefile, "wt") as tf, \ | 
|  | open(os.devnull, "wb") as devnull, \ | 
|  | open(self.logfile, "wt") as make_log: | 
|  | # Create our dynamic Makefile and execute it. | 
|  | # Watch stderr output which is where we will keep | 
|  | # track of build state | 
|  | for name, goal in self.goals.items(): | 
|  | tf.write(goal.text) | 
|  | tf.write("all: %s\n" % (" ".join(self.goals.keys()))) | 
|  | tf.flush() | 
|  |  | 
|  | cmd = ["make", "-k", "-j", str(CPU_COUNTS * 2), "-f", tf.name, "all"] | 
|  | p = subprocess.Popen(cmd, stderr=subprocess.PIPE, | 
|  | stdout=devnull) | 
|  |  | 
|  | for line in iter(p.stderr.readline, b''): | 
|  | line = line.decode("utf-8") | 
|  | make_log.write(line) | 
|  | verbose("MAKE: " + repr(line.strip())) | 
|  | m = MakeGenerator.re_make.match(line) | 
|  | if not m: | 
|  | continue | 
|  |  | 
|  | state, name, _, error = m.groups() | 
|  | if error: | 
|  | goal = self.goals[error] | 
|  | else: | 
|  | goal = self.goals[name] | 
|  | goal.make_state = state | 
|  |  | 
|  |  | 
|  | if error: | 
|  | # Sometimes QEMU will run an image and then crash out, which | 
|  | # will cause the 'make qemu' invocation to exit with | 
|  | # nonzero status. | 
|  | # Need to distinguish this case from a compilation failure. | 
|  | if goal.qemu: | 
|  | goal.fail("qemu_crash") | 
|  | else: | 
|  | goal.fail("build_error") | 
|  | else: | 
|  | if state == "finished": | 
|  | if goal.qemu: | 
|  | if goal.qemu.unit: | 
|  | # We can't run unit tests with Make | 
|  | goal.qemu.handle() | 
|  | if goal.qemu.returncode == 2: | 
|  | goal.qemu_log = goal.qemu.valgrind_log | 
|  | elif goal.qemu.returncode: | 
|  | goal.qemu_log = goal.qemu.run_log | 
|  | thread_status, metrics = goal.qemu.get_state() | 
|  | goal.metrics.update(metrics) | 
|  | if thread_status == "passed": | 
|  | goal.success() | 
|  | else: | 
|  | goal.fail(thread_status) | 
|  | else: | 
|  | goal.success() | 
|  |  | 
|  | if callback_fn: | 
|  | callback_fn(context, self.goals, goal) | 
|  |  | 
|  | p.wait() | 
|  | return self.goals | 
|  |  | 
|  |  | 
|  | # "list" - List of strings | 
|  | # "list:<type>" - List of <type> | 
|  | # "set" - Set of unordered, unique strings | 
|  | # "set:<type>" - Set of <type> | 
|  | # "float" - Floating point | 
|  | # "int" - Integer | 
|  | # "bool" - Boolean | 
|  | # "str" - String | 
|  |  | 
|  | # XXX Be sure to update __doc__ if you change any of this!! | 
|  |  | 
|  | arch_valid_keys = {"name" : {"type" : "str", "required" : True}, | 
|  | "platforms" : {"type" : "list", "required" : True}, | 
|  | "supported_toolchains" : {"type" : "list", "required" : True}} | 
|  |  | 
|  | platform_valid_keys = {"qemu_support" : {"type" : "bool", "default" : False}, | 
|  | "supported_toolchains" : {"type" : "list", "default" : []}} | 
|  |  | 
|  | testcase_valid_keys = {"tags" : {"type" : "set", "required" : True}, | 
|  | "type" : {"type" : "str", "default": "integration"}, | 
|  | "extra_args" : {"type" : "list"}, | 
|  | "build_only" : {"type" : "bool", "default" : False}, | 
|  | "skip" : {"type" : "bool", "default" : False}, | 
|  | "slow" : {"type" : "bool", "default" : False}, | 
|  | "timeout" : {"type" : "int", "default" : 60}, | 
|  | "arch_whitelist" : {"type" : "set"}, | 
|  | "arch_exclude" : {"type" : "set"}, | 
|  | "extra_sections" : {"type" : "list", "default" : []}, | 
|  | "platform_exclude" : {"type" : "set"}, | 
|  | "platform_whitelist" : {"type" : "set"}, | 
|  | "filter" : {"type" : "str"}} | 
|  |  | 
|  |  | 
|  | class SanityConfigParser: | 
|  | """Class to read architecture and test case .ini files with semantic checking | 
|  | """ | 
|  | def __init__(self, filename): | 
|  | """Instantiate a new SanityConfigParser object | 
|  |  | 
|  | @param filename Source .ini file to read | 
|  | """ | 
|  | cp = configparser.SafeConfigParser() | 
|  | cp.readfp(open(filename)) | 
|  | self.filename = filename | 
|  | self.cp = cp | 
|  |  | 
|  | def _cast_value(self, value, typestr): | 
|  | v = value.strip() | 
|  | if typestr == "str": | 
|  | return v | 
|  |  | 
|  | elif typestr == "float": | 
|  | return float(v) | 
|  |  | 
|  | elif typestr == "int": | 
|  | return int(v) | 
|  |  | 
|  | elif typestr == "bool": | 
|  | v = v.lower() | 
|  | if v == "true" or v == "1": | 
|  | return True | 
|  | elif v == "" or v == "false" or v == "0": | 
|  | return False | 
|  | raise ConfigurationError(self.filename, | 
|  | "bad value for boolean: '%s'" % value) | 
|  |  | 
|  | elif typestr.startswith("list"): | 
|  | vs = v.split() | 
|  | if len(typestr) > 4 and typestr[4] == ":": | 
|  | return [self._cast_value(vsi, typestr[5:]) for vsi in vs] | 
|  | else: | 
|  | return vs | 
|  |  | 
|  | elif typestr.startswith("set"): | 
|  | vs = v.split() | 
|  | if len(typestr) > 3 and typestr[3] == ":": | 
|  | return set([self._cast_value(vsi, typestr[4:]) for vsi in vs]) | 
|  | else: | 
|  | return set(vs) | 
|  |  | 
|  | else: | 
|  | raise ConfigurationError(self.filename, "unknown type '%s'" % value) | 
|  |  | 
|  |  | 
|  | def sections(self): | 
|  | """Get the set of sections within the .ini file | 
|  |  | 
|  | @return a list of string section names""" | 
|  | return self.cp.sections() | 
|  |  | 
|  | def get_section(self, section, valid_keys): | 
|  | """Get a dictionary representing the keys/values within a section | 
|  |  | 
|  | @param section The section in the .ini file to retrieve data from | 
|  | @param valid_keys A dictionary representing the intended semantics | 
|  | for this section. Each key in this dictionary is a key that could | 
|  | be specified, if a key is given in the .ini file which isn't in | 
|  | here, it will generate an error. Each value in this dictionary | 
|  | is another dictionary containing metadata: | 
|  |  | 
|  | "default" - Default value if not given | 
|  | "type" - Data type to convert the text value to. Simple types | 
|  | supported are "str", "float", "int", "bool" which will get | 
|  | converted to respective Python data types. "set" and "list" | 
|  | may also be specified which will split the value by | 
|  | whitespace (but keep the elements as strings). finally, | 
|  | "list:<type>" and "set:<type>" may be given which will | 
|  | perform a type conversion after splitting the value up. | 
|  | "required" - If true, raise an error if not defined. If false | 
|  | and "default" isn't specified, a type conversion will be | 
|  | done on an empty string | 
|  | @return A dictionary containing the section key-value pairs with | 
|  | type conversion and default values filled in per valid_keys | 
|  | """ | 
|  |  | 
|  | d = {} | 
|  | cp = self.cp | 
|  |  | 
|  | if not cp.has_section(section): | 
|  | # Just fill it with defaults | 
|  | cp.add_section(section) | 
|  |  | 
|  | for k, v in cp.items(section): | 
|  | if k not in valid_keys: | 
|  | raise ConfigurationError(self.filename, | 
|  | "Unknown config key '%s' in defintiion for '%s'" | 
|  | % (k, section)) | 
|  | d[k] = v | 
|  |  | 
|  | for k, kinfo in valid_keys.items(): | 
|  | if k not in d: | 
|  | if "required" in kinfo: | 
|  | required = kinfo["required"] | 
|  | else: | 
|  | required = False | 
|  |  | 
|  | if required: | 
|  | raise ConfigurationError(self.filename, | 
|  | "missing required value for '%s' in section '%s'" | 
|  | % (k, section)) | 
|  | else: | 
|  | if "default" in kinfo: | 
|  | default = kinfo["default"] | 
|  | else: | 
|  | default = self._cast_value("", kinfo["type"]) | 
|  | d[k] = default | 
|  | else: | 
|  | try: | 
|  | d[k] = self._cast_value(d[k], kinfo["type"]) | 
|  | except ValueError as ve: | 
|  | raise ConfigurationError(self.filename, | 
|  | "bad %s value '%s' for key '%s' in section '%s'" | 
|  | % (kinfo["type"], d[k], k, section)) | 
|  |  | 
|  | return d | 
|  |  | 
|  |  | 
|  | class Platform: | 
|  | """Class representing metadata for a particular platform | 
|  |  | 
|  | Maps directly to BOARD when building""" | 
|  | def __init__(self, arch, name, plat_dict): | 
|  | """Constructor. | 
|  |  | 
|  | @param arch Architecture object for this platform | 
|  | @param name String name for this platform, same as BOARD | 
|  | @param plat_dict SanityConfigParser output on the relevant section | 
|  | in the architecture configuration file which has lots of metadata. | 
|  | See the Architecture class. | 
|  | """ | 
|  | self.name = name | 
|  | self.qemu_support = plat_dict["qemu_support"] | 
|  | self.arch = arch | 
|  | self.supported_toolchains = arch.supported_toolchains | 
|  | if plat_dict["supported_toolchains"]: | 
|  | self.supported_toolchains = plat_dict["supported_toolchains"] | 
|  | # Gets populated in a separate step | 
|  | self.defconfig = None | 
|  | pass | 
|  |  | 
|  | def __repr__(self): | 
|  | return "<%s on %s>" % (self.name, self.arch.name) | 
|  |  | 
|  |  | 
|  | class Architecture: | 
|  | """Class representing metadata for a particular architecture | 
|  | """ | 
|  | def __init__(self, cfile): | 
|  | """Architecture constructor | 
|  |  | 
|  | @param cfile Path to Architecture configuration file, which gives | 
|  | info about the arch and all the platforms for it | 
|  | """ | 
|  | cp = SanityConfigParser(cfile) | 
|  | self.platforms = [] | 
|  |  | 
|  | arch = cp.get_section("arch", arch_valid_keys) | 
|  |  | 
|  | self.name = arch["name"] | 
|  | self.supported_toolchains = arch["supported_toolchains"] | 
|  |  | 
|  | for plat_name in arch["platforms"]: | 
|  | verbose("Platform: %s" % plat_name) | 
|  | plat_dict = cp.get_section(plat_name, platform_valid_keys) | 
|  | self.platforms.append(Platform(self, plat_name, plat_dict)) | 
|  |  | 
|  | def __repr__(self): | 
|  | return "<arch %s>" % self.name | 
|  |  | 
|  |  | 
|  | class TestCase: | 
|  | """Class representing a test application | 
|  | """ | 
|  | def __init__(self, testcase_root, workdir, name, tc_dict, inifile): | 
|  | """TestCase constructor. | 
|  |  | 
|  | This gets called by TestSuite as it finds and reads testcase.ini files. | 
|  | Multiple TestCase instances may be generated from a single testcase.ini, | 
|  | each one corresponds to a section within that file. | 
|  |  | 
|  | We need to have a unique name for every single test case. Since | 
|  | a testcase.ini can define multiple tests, the canonical name for | 
|  | the test case is <workdir>/<name>. | 
|  |  | 
|  | @param testcase_root Absolute path to the root directory where | 
|  | all the test cases live | 
|  | @param workdir Relative path to the project directory for this | 
|  | test application from the test_case root. | 
|  | @param name Name of this test case, corresponding to the section name | 
|  | in the test case configuration file. For many test cases that just | 
|  | define one test, can be anything and is usually "test". This is | 
|  | really only used to distinguish between different cases when | 
|  | the testcase.ini defines multiple tests | 
|  | @param tc_dict Dictionary with section values for this test case | 
|  | from the testcase.ini file | 
|  | """ | 
|  | self.code_location = os.path.join(testcase_root, workdir) | 
|  | self.type = tc_dict["type"] | 
|  | self.tags = tc_dict["tags"] | 
|  | self.extra_args = tc_dict["extra_args"] | 
|  | self.arch_whitelist = tc_dict["arch_whitelist"] | 
|  | self.arch_exclude = tc_dict["arch_exclude"] | 
|  | self.skip = tc_dict["skip"] | 
|  | self.platform_exclude = tc_dict["platform_exclude"] | 
|  | self.platform_whitelist = tc_dict["platform_whitelist"] | 
|  | self.tc_filter = tc_dict["filter"] | 
|  | self.timeout = tc_dict["timeout"] | 
|  | self.build_only = tc_dict["build_only"] | 
|  | self.slow = tc_dict["slow"] | 
|  | self.extra_sections = tc_dict["extra_sections"] | 
|  | self.path = os.path.join(os.path.basename(os.path.abspath(testcase_root)), | 
|  | workdir, name) | 
|  | self.name = self.path # for now | 
|  | self.defconfig = {} | 
|  | self.inifile = inifile | 
|  |  | 
|  | def __repr__(self): | 
|  | return self.name | 
|  |  | 
|  |  | 
|  |  | 
|  | class TestInstance: | 
|  | """Class representing the execution of a particular TestCase on a platform | 
|  |  | 
|  | @param test The TestCase object we want to build/execute | 
|  | @param platform Platform object that we want to build and run against | 
|  | @param base_outdir Base directory for all test results. The actual | 
|  | out directory used is <outdir>/<platform>/<test case name> | 
|  | """ | 
|  | def __init__(self, test, platform, base_outdir, build_only=False, | 
|  | slow=False, coverage=False): | 
|  | self.test = test | 
|  | self.platform = platform | 
|  | self.name = os.path.join(platform.name, test.path) | 
|  | self.outdir = os.path.join(base_outdir, platform.name, test.path) | 
|  | self.build_only = build_only or test.build_only | 
|  |  | 
|  | def calculate_sizes(self): | 
|  | """Get the RAM/ROM sizes of a test case. | 
|  |  | 
|  | This can only be run after the instance has been executed by | 
|  | MakeGenerator, otherwise there won't be any binaries to measure. | 
|  |  | 
|  | @return A SizeCalculator object | 
|  | """ | 
|  | fns = glob.glob(os.path.join(self.outdir, "*.elf")) | 
|  | fns = [x for x in fns if not x.endswith('_prebuilt.elf')] | 
|  | if (len(fns) != 1): | 
|  | raise BuildError("Missing/multiple output ELF binary") | 
|  | return SizeCalculator(fns[0], self.test.extra_sections) | 
|  |  | 
|  | def __repr__(self): | 
|  | return "<TestCase %s on %s>" % (self.test.name, self.platform.name) | 
|  |  | 
|  |  | 
|  | def defconfig_cb(context, goals, goal): | 
|  | if not goal.failed: | 
|  | return | 
|  |  | 
|  |  | 
|  | info("%sCould not build defconfig for %s%s" % | 
|  | (COLOR_RED, goal.name, COLOR_NORMAL)); | 
|  | if INLINE_LOGS: | 
|  | with open(goal.get_error_log()) as fp: | 
|  | data = fp.read() | 
|  | sys.stdout.write(data) | 
|  | if log_file: | 
|  | log_file.write(data) | 
|  | else: | 
|  | info("\tsee: " + COLOR_YELLOW + goal.get_error_log() + COLOR_NORMAL) | 
|  |  | 
|  |  | 
|  | class TestSuite: | 
|  | config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$') | 
|  |  | 
|  | def __init__(self, arch_root, testcase_roots, outdir, coverage): | 
|  | # Keep track of which test cases we've filtered out and why | 
|  | discards = {} | 
|  | self.arches = {} | 
|  | self.testcases = {} | 
|  | self.platforms = [] | 
|  | self.outdir = os.path.abspath(outdir) | 
|  | self.instances = {} | 
|  | self.goals = None | 
|  | self.discards = None | 
|  | self.coverage = coverage | 
|  |  | 
|  | arch_root = os.path.abspath(arch_root) | 
|  |  | 
|  | for testcase_root in testcase_roots: | 
|  | testcase_root = os.path.abspath(testcase_root) | 
|  |  | 
|  | debug("Reading test case configuration files under %s..." % | 
|  | testcase_root) | 
|  | for dirpath, dirnames, filenames in os.walk(testcase_root, | 
|  | topdown=True): | 
|  | verbose("scanning %s" % dirpath) | 
|  | if "testcase.ini" in filenames: | 
|  | verbose("Found test case in " + dirpath) | 
|  | dirnames[:] = [] | 
|  | ini_path = os.path.join(dirpath, "testcase.ini") | 
|  | cp = SanityConfigParser(ini_path) | 
|  | workdir = os.path.relpath(dirpath, testcase_root) | 
|  |  | 
|  | for section in cp.sections(): | 
|  | tc_dict = cp.get_section(section, testcase_valid_keys) | 
|  | tc = TestCase(testcase_root, workdir, section, tc_dict, | 
|  | ini_path) | 
|  | self.testcases[tc.name] = tc | 
|  |  | 
|  | debug("Reading architecture configuration files under %s..." % arch_root) | 
|  | for dirpath, dirnames, filenames in os.walk(arch_root): | 
|  | for filename in filenames: | 
|  | if filename.endswith(".ini"): | 
|  | fn = os.path.join(dirpath, filename) | 
|  | verbose("Found arch configuration " + fn) | 
|  | arch = Architecture(fn) | 
|  | self.arches[arch.name] = arch | 
|  | self.platforms.extend(arch.platforms) | 
|  |  | 
|  | # Build up a list of boards based on the presence of | 
|  | # boards/*/*_defconfig files. We want to make sure that the arch.ini | 
|  | # files are not missing any boards | 
|  | all_plats = [plat.name for plat in self.platforms] | 
|  | for dirpath, dirnames, filenames in os.walk(os.path.join(ZEPHYR_BASE, | 
|  | "boards")): | 
|  | for filename in filenames: | 
|  | if filename.endswith("_defconfig"): | 
|  | board_name = filename.replace("_defconfig", "") | 
|  | if board_name not in all_plats: | 
|  | error("Platform '%s' not specified in any arch .ini file and will not be tested" | 
|  | % board_name) | 
|  | self.instances = {} | 
|  |  | 
|  | def get_last_failed(self): | 
|  | if not os.path.exists(LAST_SANITY): | 
|  | raise SanityRuntimeError("Couldn't find last sanity run.") | 
|  | result = [] | 
|  | with open(LAST_SANITY, "r") as fp: | 
|  | cr = csv.DictReader(fp) | 
|  | for row in cr: | 
|  | if row["passed"] == "True": | 
|  | continue | 
|  | test = row["test"] | 
|  | platform = row["platform"] | 
|  | result.append((test, platform)) | 
|  | return result | 
|  |  | 
|  | def apply_filters(self, platform_filter, arch_filter, tag_filter, exclude_tag, | 
|  | config_filter, testcase_filter, last_failed, all_plats, | 
|  | platform_limit, toolchain, extra_args, enable_ccache): | 
|  | instances = [] | 
|  | discards = {} | 
|  | verbose("platform filter: " + str(platform_filter)) | 
|  | verbose("    arch_filter: " + str(arch_filter)) | 
|  | verbose("     tag_filter: " + str(tag_filter)) | 
|  | verbose("    exclude_tag: " + str(exclude_tag)) | 
|  | verbose("  config_filter: " + str(config_filter)) | 
|  | verbose("  enable_ccache: " + str(enable_ccache)) | 
|  |  | 
|  | if last_failed: | 
|  | failed_tests = self.get_last_failed() | 
|  |  | 
|  | default_platforms = False | 
|  |  | 
|  | if all_plats: | 
|  | info("Selecting all possible platforms per test case") | 
|  | # When --all used, any --platform arguments ignored | 
|  | platform_filter = [] | 
|  | elif not platform_filter: | 
|  | info("Selecting default platforms per test case") | 
|  | default_platforms = True | 
|  |  | 
|  | mg = MakeGenerator(self.outdir, ccache=enable_ccache) | 
|  | dlist = {} | 
|  | for tc_name, tc in self.testcases.items(): | 
|  | for arch_name, arch in self.arches.items(): | 
|  | instance_list = [] | 
|  | for plat in arch.platforms: | 
|  | instance = TestInstance(tc, plat, self.outdir) | 
|  |  | 
|  | if (arch_name == "unit") != (tc.type == "unit"): | 
|  | continue | 
|  |  | 
|  | if tc.skip: | 
|  | continue | 
|  |  | 
|  | if tag_filter and not tc.tags.intersection(tag_filter): | 
|  | continue | 
|  |  | 
|  | if exclude_tag and tc.tags.intersection(exclude_tag): | 
|  | continue | 
|  |  | 
|  | if testcase_filter and tc_name not in testcase_filter: | 
|  | continue | 
|  |  | 
|  | if last_failed and (tc.name, plat.name) not in failed_tests: | 
|  | continue | 
|  |  | 
|  | if arch_filter and arch_name not in arch_filter: | 
|  | continue | 
|  |  | 
|  | if tc.arch_whitelist and arch.name not in tc.arch_whitelist: | 
|  | continue | 
|  |  | 
|  | if tc.arch_exclude and arch.name in tc.arch_exclude: | 
|  | continue | 
|  |  | 
|  | if tc.platform_exclude and plat.name in tc.platform_exclude: | 
|  | continue | 
|  |  | 
|  | if platform_filter and plat.name not in platform_filter: | 
|  | continue | 
|  |  | 
|  | if tc.platform_whitelist and plat.name not in tc.platform_whitelist: | 
|  | continue | 
|  |  | 
|  | if tc.tc_filter and (plat in arch.platforms[:platform_limit] or all_plats or platform_filter): | 
|  | args = tc.extra_args[:] | 
|  | args.extend(["ARCH=" + plat.arch.name, | 
|  | "BOARD=" + plat.name, "initconfig"]) | 
|  | args.extend(extra_args) | 
|  | # FIXME would be nice to use a common outdir for this so that | 
|  | # conf, gen_idt, etc aren't rebuilt for every  combination, | 
|  | # need a way to avoid different Make processe from clobbering | 
|  | # each other since they all try to build them simultaneously | 
|  |  | 
|  | o = os.path.join(self.outdir, plat.name, tc.path) | 
|  | dlist[tc, plat, tc.name.split("/")[-1]] = os.path.join(o,".config") | 
|  | goal = "_".join([plat.name, "_".join(tc.name.split("/")), "initconfig"]) | 
|  | mg.add_build_goal(goal, os.path.join(ZEPHYR_BASE, tc.code_location), o, args) | 
|  |  | 
|  | info("Building testcase defconfigs...") | 
|  | results = mg.execute(defconfig_cb) | 
|  |  | 
|  | for name, goal in results.items(): | 
|  | if goal.failed: | 
|  | raise SanityRuntimeError("Couldn't build some defconfigs") | 
|  |  | 
|  | for k, out_config in dlist.items(): | 
|  | test, plat, name = k | 
|  | defconfig = {} | 
|  | with open(out_config, "r") as fp: | 
|  | for line in fp.readlines(): | 
|  | m = TestSuite.config_re.match(line) | 
|  | if not m: | 
|  | if line.strip() and not line.startswith("#"): | 
|  | sys.stderr.write("Unrecognized line %s\n" % line) | 
|  | continue | 
|  | defconfig[m.group(1)] = m.group(2).strip() | 
|  | test.defconfig[plat] = defconfig | 
|  |  | 
|  | for tc_name, tc in self.testcases.items(): | 
|  | for arch_name, arch in self.arches.items(): | 
|  | instance_list = [] | 
|  | for plat in arch.platforms: | 
|  | instance = TestInstance(tc, plat, self.outdir) | 
|  |  | 
|  | if (arch_name == "unit") != (tc.type == "unit"): | 
|  | # Discard silently | 
|  | continue | 
|  |  | 
|  | if tc.skip: | 
|  | discards[instance] = "Skip filter" | 
|  | continue | 
|  |  | 
|  | if tag_filter and not tc.tags.intersection(tag_filter): | 
|  | discards[instance] = "Command line testcase tag filter" | 
|  | continue | 
|  |  | 
|  | if exclude_tag and tc.tags.intersection(exclude_tag): | 
|  | discards[instance] = "Command line testcase exclude filter" | 
|  | continue | 
|  |  | 
|  | if testcase_filter and tc_name not in testcase_filter: | 
|  | discards[instance] = "Testcase name filter" | 
|  | continue | 
|  |  | 
|  | if last_failed and (tc.name, plat.name) not in failed_tests: | 
|  | discards[instance] = "Passed or skipped during last run" | 
|  | continue | 
|  |  | 
|  | if arch_filter and arch_name not in arch_filter: | 
|  | discards[instance] = "Command line testcase arch filter" | 
|  | continue | 
|  |  | 
|  | if tc.arch_whitelist and arch.name not in tc.arch_whitelist: | 
|  | discards[instance] = "Not in test case arch whitelist" | 
|  | continue | 
|  |  | 
|  | if tc.arch_exclude and arch.name in tc.arch_exclude: | 
|  | discards[instance] = "In test case arch exclude" | 
|  | continue | 
|  |  | 
|  | if tc.platform_exclude and plat.name in tc.platform_exclude: | 
|  | discards[instance] = "In test case platform exclude" | 
|  | continue | 
|  |  | 
|  | if platform_filter and plat.name not in platform_filter: | 
|  | discards[instance] = "Command line platform filter" | 
|  | continue | 
|  |  | 
|  | if tc.platform_whitelist and plat.name not in tc.platform_whitelist: | 
|  | discards[instance] = "Not in testcase platform whitelist" | 
|  | continue | 
|  |  | 
|  | if toolchain and toolchain not in plat.supported_toolchains: | 
|  | discards[instance] = "Not supported by the toolchain" | 
|  | continue | 
|  |  | 
|  | defconfig = {"ARCH" : arch.name, "PLATFORM" : plat.name} | 
|  | defconfig.update(os.environ) | 
|  | for p, tdefconfig in tc.defconfig.items(): | 
|  | if p == plat: | 
|  | defconfig.update(tdefconfig) | 
|  | break | 
|  |  | 
|  | if tc.tc_filter: | 
|  | try: | 
|  | res = expr_parser.parse(tc.tc_filter, defconfig) | 
|  | except SyntaxError as se: | 
|  | sys.stderr.write("Failed processing %s\n" % tc.inifile) | 
|  | raise se | 
|  | if not res: | 
|  | discards[instance] = ("defconfig doesn't satisfy expression '%s'" % | 
|  | tc.tc_filter) | 
|  | continue | 
|  |  | 
|  | instance_list.append(instance) | 
|  |  | 
|  | if not instance_list: | 
|  | # Every platform in this arch was rejected already | 
|  | continue | 
|  |  | 
|  | if default_platforms: | 
|  | self.add_instances(instance_list[:platform_limit]) | 
|  | for instance in instance_list[platform_limit:]: | 
|  | discards[instance] = "Not in first %d platform(s) for arch" % platform_limit | 
|  | else: | 
|  | self.add_instances(instance_list) | 
|  | self.discards = discards | 
|  | return discards | 
|  |  | 
|  | def add_instances(self, ti_list): | 
|  | for ti in ti_list: | 
|  | self.instances[ti.name] = ti | 
|  |  | 
|  | def execute(self, cb, cb_context, build_only, enable_slow, enable_asserts, enable_deprecations, | 
|  | extra_args, enable_ccache): | 
|  |  | 
|  | def calc_one_elf_size(name, goal): | 
|  | if not goal.failed: | 
|  | i = self.instances[name] | 
|  | sc = i.calculate_sizes() | 
|  | goal.metrics["ram_size"] = sc.get_ram_size() | 
|  | goal.metrics["rom_size"] = sc.get_rom_size() | 
|  | goal.metrics["unrecognized"] = sc.unrecognized_sections() | 
|  |  | 
|  | mg = MakeGenerator(self.outdir, asserts=enable_asserts, deprecations=enable_deprecations, | 
|  | ccache=enable_ccache) | 
|  | for i in self.instances.values(): | 
|  | mg.add_test_instance(i, build_only, enable_slow, self.coverage, extra_args) | 
|  | self.goals = mg.execute(cb, cb_context) | 
|  |  | 
|  | # Parallelize size calculation | 
|  | executor = concurrent.futures.ThreadPoolExecutor(CPU_COUNTS) | 
|  | futures = [executor.submit(calc_one_elf_size, name, goal) \ | 
|  | for name, goal in self.goals.items()] | 
|  | concurrent.futures.wait(futures) | 
|  |  | 
|  | return self.goals | 
|  |  | 
|  | def discard_report(self, filename): | 
|  | if self.discards == None: | 
|  | raise SanityRuntimeException("apply_filters() hasn't been run!") | 
|  |  | 
|  | with open(filename, "wb") as csvfile: | 
|  | fieldnames = ["test", "arch", "platform", "reason"] | 
|  | cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep) | 
|  | cw.writeheader() | 
|  | for instance, reason in self.discards.items(): | 
|  | rowdict = {"test" : i.test.name, | 
|  | "arch" : i.platform.arch.name, | 
|  | "platform" : i.platform.name, | 
|  | "reason" : reason} | 
|  | cw.writerow(rowdict) | 
|  |  | 
|  | def compare_metrics(self, filename): | 
|  | # name, datatype, lower results better | 
|  | interesting_metrics = [("ram_size", int, True), | 
|  | ("rom_size", int, True)] | 
|  |  | 
|  | if self.goals == None: | 
|  | raise SanityRuntimeException("execute() hasn't been run!") | 
|  |  | 
|  | if not os.path.exists(filename): | 
|  | info("Cannot compare metrics, %s not found" % filename) | 
|  | return [] | 
|  |  | 
|  | results = [] | 
|  | saved_metrics = {} | 
|  | with open(filename) as fp: | 
|  | cr = csv.DictReader(fp) | 
|  | for row in cr: | 
|  | d = {} | 
|  | for m, _, _ in interesting_metrics: | 
|  | d[m] = row[m] | 
|  | saved_metrics[(row["test"], row["platform"])] = d | 
|  |  | 
|  | for name, goal in self.goals.items(): | 
|  | i = self.instances[name] | 
|  | mkey = (i.test.name, i.platform.name) | 
|  | if mkey not in saved_metrics: | 
|  | continue | 
|  | sm = saved_metrics[mkey] | 
|  | for metric, mtype, lower_better in interesting_metrics: | 
|  | if metric not in goal.metrics: | 
|  | continue | 
|  | if sm[metric] == "": | 
|  | continue | 
|  | delta = goal.metrics[metric] - mtype(sm[metric]) | 
|  | if delta == 0: | 
|  | continue | 
|  | results.append((i, metric, goal.metrics[metric], delta, | 
|  | lower_better)) | 
|  | return results | 
|  |  | 
|  | def testcase_report(self, filename): | 
|  | if self.goals == None: | 
|  | raise SanityRuntimeException("execute() hasn't been run!") | 
|  |  | 
|  | with open(filename, "wt") as csvfile: | 
|  | fieldnames = ["test", "arch", "platform", "passed", "status", | 
|  | "extra_args", "qemu", "qemu_time", "ram_size", | 
|  | "rom_size"] | 
|  | cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep) | 
|  | cw.writeheader() | 
|  | for name, goal in self.goals.items(): | 
|  | i = self.instances[name] | 
|  | rowdict = {"test" : i.test.name, | 
|  | "arch" : i.platform.arch.name, | 
|  | "platform" : i.platform.name, | 
|  | "extra_args" : " ".join(i.test.extra_args), | 
|  | "qemu" : i.platform.qemu_support} | 
|  | if goal.failed: | 
|  | rowdict["passed"] = False | 
|  | rowdict["status"] = goal.reason | 
|  | else: | 
|  | rowdict["passed"] = True | 
|  | if goal.qemu: | 
|  | rowdict["qemu_time"] = goal.metrics["qemu_time"] | 
|  | rowdict["ram_size"] = goal.metrics["ram_size"] | 
|  | rowdict["rom_size"] = goal.metrics["rom_size"] | 
|  | cw.writerow(rowdict) | 
|  |  | 
|  |  | 
|  | def parse_arguments(): | 
|  |  | 
|  | parser = argparse.ArgumentParser(description = __doc__, | 
|  | formatter_class = argparse.RawDescriptionHelpFormatter) | 
|  | parser.fromfile_prefix_chars = "+" | 
|  |  | 
|  | parser.add_argument("-p", "--platform", action="append", | 
|  | help="Platform filter for testing. This option may be used multiple " | 
|  | "times. Testcases will only be built/run on the platforms " | 
|  | "specified. If this option is not used, then N platforms will " | 
|  | "automatically be chosen from each arch to build and test, " | 
|  | "where N is provided by the --platform-limit option.") | 
|  | parser.add_argument("-L", "--platform-limit", action="store", type=int, | 
|  | metavar="N", default=1, | 
|  | help="Controls what platforms are tested if --platform or " | 
|  | "--all are not used. For each architecture specified by " | 
|  | "--arch (defaults to all of them), choose the first " | 
|  | "N platforms to test in the arch-specific .ini file " | 
|  | "'platforms' list. Defaults to 1.") | 
|  | parser.add_argument("-a", "--arch", action="append", | 
|  | help="Arch filter for testing. Takes precedence over --platform. " | 
|  | "If unspecified, test all arches. Multiple invocations " | 
|  | "are treated as a logical 'or' relationship") | 
|  | parser.add_argument("-t", "--tag", action="append", | 
|  | help="Specify tags to restrict which tests to run by tag value. " | 
|  | "Default is to not do any tag filtering. Multiple invocations " | 
|  | "are treated as a logical 'or' relationship") | 
|  | parser.add_argument("-e", "--exclude-tag", action="append", | 
|  | help="Specify tags of tests that should not run." | 
|  | "Default is to run all tests with all tags.") | 
|  | parser.add_argument("-f", "--only-failed", action="store_true", | 
|  | help="Run only those tests that failed the previous sanity check " | 
|  | "invocation.") | 
|  | parser.add_argument("-c", "--config", action="append", | 
|  | help="Specify platform configuration values filtering. This can be " | 
|  | "specified two ways: <config>=<value> or just <config>. The " | 
|  | "defconfig for all platforms will be " | 
|  | "checked. For the <config>=<value> case, only match defconfig " | 
|  | "that have that value defined. For the <config> case, match " | 
|  | "defconfig that have that value assigned to any value. " | 
|  | "Prepend a '!' to invert the match.") | 
|  | parser.add_argument("-s", "--test", action="append", | 
|  | help="Run only the specified test cases. These are named by " | 
|  | "<path to test project relative to " | 
|  | "--testcase-root>/<testcase.ini section name>") | 
|  | parser.add_argument("-l", "--all", action="store_true", | 
|  | help="Build/test on all platforms. Any --platform arguments " | 
|  | "ignored.") | 
|  |  | 
|  | parser.add_argument("-o", "--testcase-report", | 
|  | help="Output a CSV spreadsheet containing results of the test run") | 
|  | parser.add_argument("-d", "--discard-report", | 
|  | help="Output a CSV spreadhseet showing tests that were skipped " | 
|  | "and why") | 
|  | parser.add_argument("--compare-report", | 
|  | help="Use this report file for size comparision") | 
|  |  | 
|  | parser.add_argument("--ccache", action="store_const", const=1, default=0, | 
|  | help="Enable the use of ccache when building") | 
|  |  | 
|  | parser.add_argument("-y", "--dry-run", action="store_true", | 
|  | help="Create the filtered list of test cases, but don't actually " | 
|  | "run them. Useful if you're just interested in " | 
|  | "--discard-report") | 
|  |  | 
|  | parser.add_argument("-r", "--release", action="store_true", | 
|  | help="Update the benchmark database with the results of this test " | 
|  | "run. Intended to be run by CI when tagging an official " | 
|  | "release. This database is used as a basis for comparison " | 
|  | "when looking for deltas in metrics such as footprint") | 
|  | parser.add_argument("-w", "--warnings-as-errors", action="store_true", | 
|  | help="Treat warning conditions as errors") | 
|  | parser.add_argument("-v", "--verbose", action="count", default=0, | 
|  | help="Emit debugging information, call multiple times to increase " | 
|  | "verbosity") | 
|  | parser.add_argument("-i", "--inline-logs", action="store_true", | 
|  | help="Upon test failure, print relevant log data to stdout " | 
|  | "instead of just a path to it") | 
|  | parser.add_argument("--log-file", metavar="FILENAME", action="store", | 
|  | help="log also to file") | 
|  | parser.add_argument("-m", "--last-metrics", action="store_true", | 
|  | help="Instead of comparing metrics from the last --release, " | 
|  | "compare with the results of the previous sanity check " | 
|  | "invocation") | 
|  | parser.add_argument("-u", "--no-update", action="store_true", | 
|  | help="do not update the results of the last run of the sanity " | 
|  | "checks") | 
|  | parser.add_argument("-b", "--build-only", action="store_true", | 
|  | help="Only build the code, do not execute any of it in QEMU") | 
|  | parser.add_argument("-j", "--jobs", type=int, | 
|  | help="Number of cores to use when building, defaults to " | 
|  | "number of CPUs * 2") | 
|  | parser.add_argument("-H", "--footprint-threshold", type=float, default=5, | 
|  | help="When checking test case footprint sizes, warn the user if " | 
|  | "the new app size is greater then the specified percentage " | 
|  | "from the last release. Default is 5. 0 to warn on any " | 
|  | "increase on app size") | 
|  | parser.add_argument("-D", "--all-deltas", action="store_true", | 
|  | help="Show all footprint deltas, positive or negative. Implies " | 
|  | "--footprint-threshold=0") | 
|  | parser.add_argument("-O", "--outdir", | 
|  | default="%s/sanity-out" % ZEPHYR_BASE, | 
|  | help="Output directory for logs and binaries.") | 
|  | parser.add_argument("-n", "--no-clean", action="store_true", | 
|  | help="Do not delete the outdir before building. Will result in " | 
|  | "faster compilation since builds will be incremental") | 
|  | parser.add_argument("-T", "--testcase-root", action="append", default=[], | 
|  | help="Base directory to recursively search for test cases. All " | 
|  | "testcase.ini files under here will be processed. May be " | 
|  | "called multiple times. Defaults to the 'samples' and " | 
|  | "'tests' directories in the Zephyr tree.") | 
|  | parser.add_argument("-A", "--arch-root", | 
|  | default="%s/scripts/sanity_chk/arches" % ZEPHYR_BASE, | 
|  | help="Directory to search for arch configuration files. All .ini " | 
|  | "files in the directory will be processed.") | 
|  | parser.add_argument("-z", "--size", action="append", | 
|  | help="Don't run sanity  checks. Instead, produce a report to " | 
|  | "stdout detailing RAM/ROM sizes on the specified filenames. " | 
|  | "All other command line arguments ignored.") | 
|  | parser.add_argument("-S", "--enable-slow", action="store_true", | 
|  | help="Execute time-consuming test cases that have been marked " | 
|  | "as 'slow' in testcase.ini. Normally these are only built.") | 
|  | parser.add_argument("-R", "--enable-asserts", action="store_true", | 
|  | help="Build all test cases with assertions enabled.") | 
|  | parser.add_argument("-Q", "--error-on-deprecations", action="store_false", | 
|  | help="Error on deprecation warnings.") | 
|  | parser.add_argument("-x", "--extra-args", action="append", default=[], | 
|  | help="Extra arguments to pass to the build when compiling test " | 
|  | "cases. May be called multiple times. These will be passed " | 
|  | "in after any sanitycheck-supplied options.") | 
|  | parser.add_argument("-C", "--coverage", action="store_true", | 
|  | help="Scan for unit test coverage with gcov + lcov.") | 
|  |  | 
|  | return parser.parse_args() | 
|  |  | 
|  | def log_info(filename): | 
|  | filename = os.path.relpath(os.path.realpath(filename)) | 
|  | if INLINE_LOGS: | 
|  | info("{:-^100}".format(filename)) | 
|  |  | 
|  | try: | 
|  | with open(filename) as fp: | 
|  | data = fp.read() | 
|  | except Exception as e: | 
|  | data = "Unable to read log data (%s)\n" % (str(e)) | 
|  |  | 
|  | sys.stdout.write(data) | 
|  | if log_file: | 
|  | log_file.write(data) | 
|  | info("{:-^100}".format(filename)) | 
|  | else: | 
|  | info("\tsee: " + COLOR_YELLOW + filename + COLOR_NORMAL) | 
|  |  | 
|  | def terse_test_cb(instances, goals, goal): | 
|  | total_tests = len(goals) | 
|  | total_done = 0 | 
|  | total_failed = 0 | 
|  |  | 
|  | for k, g in goals.items(): | 
|  | if g.finished: | 
|  | total_done += 1 | 
|  | if g.failed: | 
|  | total_failed += 1 | 
|  |  | 
|  | if goal.failed: | 
|  | i = instances[goal.name] | 
|  | info("\n\n{:<25} {:<50} {}FAILED{}: {}".format(i.platform.name, | 
|  | i.test.name, COLOR_RED, COLOR_NORMAL, goal.reason)) | 
|  | log_info(goal.get_error_log()) | 
|  | info("") | 
|  |  | 
|  | sys.stdout.write("\rtotal complete: %s%4d/%4d%s  %2d%%  failed: %s%4d%s" % ( | 
|  | COLOR_GREEN, total_done, total_tests, COLOR_NORMAL, | 
|  | int((float(total_done) / total_tests) * 100), | 
|  | COLOR_RED if total_failed > 0 else COLOR_NORMAL, | 
|  | total_failed, COLOR_NORMAL)) | 
|  | sys.stdout.flush() | 
|  |  | 
|  | def chatty_test_cb(instances, goals, goal): | 
|  | i = instances[goal.name] | 
|  |  | 
|  | if VERBOSE < 2 and not goal.finished: | 
|  | return | 
|  |  | 
|  | if goal.failed: | 
|  | status = COLOR_RED + "FAILED" + COLOR_NORMAL + ": " + goal.reason | 
|  | elif goal.finished: | 
|  | status = COLOR_GREEN + "PASSED" + COLOR_NORMAL | 
|  | else: | 
|  | status = goal.make_state | 
|  |  | 
|  | info("{:<25} {:<50} {}".format(i.platform.name, i.test.name, status)) | 
|  | if goal.failed: | 
|  | log_info(goal.get_error_log()) | 
|  |  | 
|  |  | 
|  | def size_report(sc): | 
|  | info(sc.filename) | 
|  | info("SECTION NAME             VMA        LMA     SIZE  HEX SZ TYPE") | 
|  | for i in range(len(sc.sections)): | 
|  | v = sc.sections[i] | 
|  |  | 
|  | info("%-17s 0x%08x 0x%08x %8d 0x%05x %-7s" % | 
|  | (v["name"], v["virt_addr"], v["load_addr"], v["size"], v["size"], | 
|  | v["type"])) | 
|  |  | 
|  | info("Totals: %d bytes (ROM), %d bytes (RAM)" % | 
|  | (sc.rom_size, sc.ram_size)) | 
|  | info("") | 
|  |  | 
|  | def generate_coverage(outdir, ignores): | 
|  | with open(os.path.join(outdir, "coverage.log"), "a") as coveragelog: | 
|  | coveragefile = os.path.join(outdir, "coverage.info") | 
|  | ztestfile = os.path.join(outdir, "ztest.info") | 
|  | subprocess.call(["lcov", "--capture", "--directory", outdir, | 
|  | "--output-file", coveragefile], stdout=coveragelog) | 
|  | # We want to remove tests/* and tests/ztest/test/* but save tests/ztest | 
|  | subprocess.call(["lcov", "--extract", coveragefile, | 
|  | os.path.join(ZEPHYR_BASE, "tests", "ztest", "*"), | 
|  | "--output-file", ztestfile], stdout=coveragelog) | 
|  | subprocess.call(["lcov", "--remove", ztestfile, | 
|  | os.path.join(ZEPHYR_BASE, "tests/ztest/test/*"), | 
|  | "--output-file", ztestfile], stdout=coveragelog) | 
|  | for i in ignores: | 
|  | subprocess.call(["lcov", "--remove", coveragefile, i, | 
|  | "--output-file", coveragefile], stdout=coveragelog) | 
|  | subprocess.call(["genhtml", "-output-directory", | 
|  | os.path.join(outdir, "coverage"), | 
|  | coveragefile, ztestfile], stdout=coveragelog) | 
|  |  | 
|  | def main(): | 
|  | start_time = time.time() | 
|  | global VERBOSE, INLINE_LOGS, CPU_COUNTS, log_file | 
|  | args = parse_arguments() | 
|  | toolchain = os.environ.get("ZEPHYR_GCC_VARIANT", None) | 
|  | if toolchain == "zephyr": | 
|  | os.environ["DISABLE_TRYRUN"] = "1" | 
|  |  | 
|  | if args.size: | 
|  | for fn in args.size: | 
|  | size_report(SizeCalculator(fn, [])) | 
|  | sys.exit(0) | 
|  |  | 
|  | VERBOSE += args.verbose | 
|  | INLINE_LOGS = args.inline_logs | 
|  | if args.log_file: | 
|  | log_file = open(args.log_file, "w") | 
|  | if args.jobs: | 
|  | CPU_COUNTS = args.jobs | 
|  |  | 
|  | if os.path.exists(args.outdir) and not args.no_clean: | 
|  | info("Cleaning output directory " + args.outdir) | 
|  | shutil.rmtree(args.outdir) | 
|  |  | 
|  | if not args.testcase_root: | 
|  | args.testcase_root = [os.path.join(ZEPHYR_BASE, "tests"), | 
|  | os.path.join(ZEPHYR_BASE, "samples")] | 
|  |  | 
|  | ts = TestSuite(args.arch_root, args.testcase_root, args.outdir, args.coverage) | 
|  | discards = ts.apply_filters(args.platform, args.arch, args.tag, args.exclude_tag, args.config, | 
|  | args.test, args.only_failed, args.all, | 
|  | args.platform_limit, toolchain, args.extra_args, args.ccache) | 
|  |  | 
|  | if args.discard_report: | 
|  | ts.discard_report(args.discard_report) | 
|  |  | 
|  | if VERBOSE: | 
|  | for i, reason in discards.items(): | 
|  | debug("{:<25} {:<50} {}SKIPPED{}: {}".format(i.platform.name, | 
|  | i.test.name, COLOR_YELLOW, COLOR_NORMAL, reason)) | 
|  |  | 
|  | info("%d tests selected, %d tests discarded due to filters" % | 
|  | (len(ts.instances), len(discards))) | 
|  |  | 
|  | if args.dry_run: | 
|  | return | 
|  |  | 
|  | if VERBOSE or not TERMINAL: | 
|  | goals = ts.execute(chatty_test_cb, ts.instances, args.build_only, | 
|  | args.enable_slow, args.enable_asserts, args.error_on_deprecations, | 
|  | args.extra_args, args.ccache) | 
|  | else: | 
|  | goals = ts.execute(terse_test_cb, ts.instances, args.build_only, | 
|  | args.enable_slow, args.enable_asserts, args.error_on_deprecations, | 
|  | args.extra_args, args.ccache) | 
|  | info("") | 
|  |  | 
|  | # figure out which report to use for size comparison | 
|  | if args.compare_report: | 
|  | report_to_use = args.compare_report | 
|  | elif args.last_metrics: | 
|  | report_to_use = LAST_SANITY | 
|  | else: | 
|  | report_to_use = RELEASE_DATA | 
|  |  | 
|  | deltas = ts.compare_metrics(report_to_use) | 
|  | warnings = 0 | 
|  | if deltas: | 
|  | for i, metric, value, delta, lower_better in deltas: | 
|  | if not args.all_deltas and ((delta < 0 and lower_better) or | 
|  | (delta > 0 and not lower_better)): | 
|  | continue | 
|  |  | 
|  | percentage = (float(delta) / float(value - delta)) | 
|  | if not args.all_deltas and (percentage < | 
|  | (args.footprint_threshold / 100.0)): | 
|  | continue | 
|  |  | 
|  | info("{:<25} {:<60} {}{}{}: {} {:<+4}, is now {:6} {:+.2%}".format( | 
|  | i.platform.name, i.test.name, COLOR_YELLOW, | 
|  | "INFO" if args.all_deltas else "WARNING", COLOR_NORMAL, | 
|  | metric, delta, value, percentage)) | 
|  | warnings += 1 | 
|  |  | 
|  | if warnings: | 
|  | info("Deltas based on metrics from last %s" % | 
|  | ("release" if not args.last_metrics else "run")) | 
|  |  | 
|  | failed = 0 | 
|  | for name, goal in goals.items(): | 
|  | if goal.failed: | 
|  | failed += 1 | 
|  | elif goal.metrics.get("unrecognized"): | 
|  | info("%sFAILED%s: %s has unrecognized binary sections: %s" % | 
|  | (COLOR_RED, COLOR_NORMAL, goal.name, | 
|  | str(goal.metrics["unrecognized"]))) | 
|  | failed += 1 | 
|  |  | 
|  | if args.coverage: | 
|  | info("Generating coverage files...") | 
|  | generate_coverage(args.outdir, ["tests/*", "samples/*"]) | 
|  |  | 
|  | info("%s%d of %d%s tests passed with %s%d%s warnings in %d seconds" % | 
|  | (COLOR_RED if failed else COLOR_GREEN, len(goals) - failed, | 
|  | len(goals), COLOR_NORMAL, COLOR_YELLOW if warnings else COLOR_NORMAL, | 
|  | warnings, COLOR_NORMAL, time.time() - start_time)) | 
|  |  | 
|  | if args.testcase_report: | 
|  | ts.testcase_report(args.testcase_report) | 
|  | if not args.no_update: | 
|  | ts.testcase_report(LAST_SANITY) | 
|  | if args.release: | 
|  | ts.testcase_report(RELEASE_DATA) | 
|  | if log_file: | 
|  | log_file.close() | 
|  | if failed or (warnings and args.warnings_as_errors): | 
|  | sys.exit(1) | 
|  |  | 
|  | if __name__ == "__main__": | 
|  | main() |