blob: 3b4f6ed1125f4bc59d7c40cde9f68e2dd0d46033 [file] [log] [blame]
#!/usr/bin/env python3
# vim: set syntax=python ts=4 :
"""Zephyr Sanity Tests
This script scans for the set of unit test applications in the git
repository and attempts to execute them. By default, it tries to
build each test case on one platform per architecture, using a precedence
list defined in an architecture configuration file, and if possible
run the tests in the QEMU emulator.
Test cases are detected by the presence of a 'testcase.yaml' or a sample.yaml
files in the application's project directory. This file may contain one or more
blocks, each identifying a test scenario. The title of the block is a name for
the test case, which only needs to be unique for the test cases specified in
that testcase meta-data. The full canonical name for each test case is <path to
test case>/<block>.
Each test block in the testcase meta data can define the following key/value pairs:
tags: <list of tags> (required)
A set of string tags for the testcase. Usually pertains to
functional domains but can be anything. Command line invocations
of this script can filter the set of tests to run based on tag.
skip: <True|False> (default False)
skip testcase unconditionally. This can be used for broken tests.
slow: <True|False> (default False)
Don't run this test case unless --enable-slow was passed in on the
command line. Intended for time-consuming test cases that are only
run under certain circumstances, like daily builds. These test cases
are still compiled.
extra_args: <list of extra arguments>
Extra arguments to pass to Make when building or running the
test case.
extra_configs: <list of extra configurations>
Extra configuration options to be merged with a master prj.conf
when building or running the test case.
build_only: <True|False> (default False)
If true, don't try to run the test under QEMU even if the
selected platform supports it.
build_on_all: <True|False> (default False)
If true, attempt to build test on all available platforms.
depends_on: <list of features>
A board or platform can announce what features it supports, this option
will enable the test only those platforms that provide this feature.
min_ram: <integer>
minimum amount of RAM needed for this test to build and run. This is
compared with information provided by the board metadata.
min_flash: <integer>
minimum amount of ROM needed for this test to build and run. This is
compared with information provided by the board metadata.
timeout: <number of seconds>
Length of time to run test in QEMU before automatically killing it.
Default to 60 seconds.
arch_whitelist: <list of arches, such as x86, arm, arc>
Set of architectures that this test case should only be run for.
arch_exclude: <list of arches, such as x86, arm, arc>
Set of architectures that this test case should not run on.
platform_whitelist: <list of platforms>
Set of platforms that this test case should only be run for.
platform_exclude: <list of platforms>
Set of platforms that this test case should not run on.
extra_sections: <list of extra binary sections>
When computing sizes, sanitycheck will report errors if it finds
extra, unexpected sections in the Zephyr binary unless they are named
here. They will not be included in the size calculation.
filter: <expression>
Filter whether the testcase should be run by evaluating an expression
against an environment containing the following values:
{ ARCH : <architecture>,
PLATFORM : <platform>,
<all CONFIG_* key/value pairs in the test's generated defconfig>,
*<env>: any environment variable available
}
The grammar for the expression language is as follows:
expression ::= expression "and" expression
| expression "or" expression
| "not" expression
| "(" expression ")"
| symbol "==" constant
| symbol "!=" constant
| symbol "<" number
| symbol ">" number
| symbol ">=" number
| symbol "<=" number
| symbol "in" list
| symbol ":" string
| symbol
list ::= "[" list_contents "]"
list_contents ::= constant
| list_contents "," constant
constant ::= number
| string
For the case where expression ::= symbol, it evaluates to true
if the symbol is defined to a non-empty string.
Operator precedence, starting from lowest to highest:
or (left associative)
and (left associative)
not (right associative)
all comparison operators (non-associative)
arch_whitelist, arch_exclude, platform_whitelist, platform_exclude
are all syntactic sugar for these expressions. For instance
arch_exclude = x86 arc
Is the same as:
filter = not ARCH in ["x86", "arc"]
The ':' operator compiles the string argument as a regular expression,
and then returns a true value only if the symbol's value in the environment
matches. For example, if CONFIG_SOC="quark_se" then
filter = CONFIG_SOC : "quark.*"
Would match it.
The set of test cases that actually run depends on directives in the testcase
filed and options passed in on the command line. If there is any confusion,
running with -v or --discard-report can help show why particular test cases
were skipped.
Metrics (such as pass/fail state and binary size) for the last code
release are stored in scripts/sanity_chk/sanity_last_release.csv.
To update this, pass the --all --release options.
To load arguments from a file, write '+' before the file name, e.g.,
+file_name. File content must be one or more valid arguments separated by
line break instead of white spaces.
Most everyday users will run with no arguments.
"""
import argparse
import os
import sys
import configparser
import re
import tempfile
import subprocess
import multiprocessing
import select
import shutil
import signal
import threading
import time
import csv
import glob
import concurrent
import concurrent.futures
import xml.etree.ElementTree as ET
from xml.sax.saxutils import escape
from collections import OrderedDict
from itertools import islice
import yaml
import logging
log_format = "%(levelname)s %(name)s::%(module)s.%(funcName)s():%(lineno)d: %(message)s"
logging.basicConfig(format = log_format, level = 30)
if "ZEPHYR_BASE" not in os.environ:
sys.stderr.write("$ZEPHYR_BASE environment variable undefined.\n")
exit(1)
ZEPHYR_BASE = os.environ["ZEPHYR_BASE"]
from sanity_chk import scl
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/"))
from sanity_chk import expr_parser
VERBOSE = 0
LAST_SANITY = os.path.join(ZEPHYR_BASE, "scripts", "sanity_chk",
"last_sanity.csv")
LAST_SANITY_XUNIT = os.path.join(ZEPHYR_BASE, "scripts", "sanity_chk",
"last_sanity.xml")
RELEASE_DATA = os.path.join(ZEPHYR_BASE, "scripts", "sanity_chk",
"sanity_last_release.csv")
CPU_COUNTS = multiprocessing.cpu_count()
if os.isatty(sys.stdout.fileno()):
TERMINAL = True
COLOR_NORMAL = '\033[0m'
COLOR_RED = '\033[91m'
COLOR_GREEN = '\033[92m'
COLOR_YELLOW = '\033[93m'
else:
TERMINAL = False
COLOR_NORMAL = ""
COLOR_RED = ""
COLOR_GREEN = ""
COLOR_YELLOW = ""
class SanityCheckException(Exception):
pass
class SanityRuntimeError(SanityCheckException):
pass
class ConfigurationError(SanityCheckException):
def __init__(self, cfile, message):
self.cfile = cfile
self.message = message
def __str__(self):
return repr(self.cfile + ": " + self.message)
class MakeError(SanityCheckException):
pass
class BuildError(MakeError):
pass
class ExecutionError(MakeError):
pass
log_file = None
# Debug Functions
def info(what):
sys.stdout.write(what + "\n")
if log_file:
log_file.write(what + "\n")
log_file.flush()
def error(what):
sys.stderr.write(COLOR_RED + what + COLOR_NORMAL + "\n")
if log_file:
log_file(what + "\n")
log_file.flush()
def debug(what):
if VERBOSE >= 1:
info(what)
def verbose(what):
if VERBOSE >= 2:
info(what)
class Handler:
RUN_PASSED = "PROJECT EXECUTION SUCCESSFUL"
RUN_FAILED = "PROJECT EXECUTION FAILED"
def __init__(self, name, outdir, log_fn, timeout, unit=False):
"""Constructor
@param name Arbitrary name of the created thread
@param outdir Working directory, should be where qemu.pid gets created
by kbuild
@param log_fn Absolute path to write out QEMU's log data
@param timeout Kill the QEMU process if it doesn't finish up within
the given number of seconds
"""
self.lock = threading.Lock()
self.state = "waiting"
self.metrics = {}
self.metrics["qemu_time"] = 0
self.metrics["ram_size"] = 0
self.metrics["rom_size"] = 0
self.unit = unit
def set_state(self, state, metrics):
self.lock.acquire()
self.state = state
self.metrics.update(metrics)
self.lock.release()
def get_state(self):
self.lock.acquire()
ret = (self.state, self.metrics)
self.lock.release()
return ret
class UnitHandler(Handler):
def __init__(self, name, sourcedir, outdir, run_log, valgrind_log, timeout):
"""Constructor
@param name Arbitrary name of the created thread
@param outdir Working directory containing the test binary
@param run_log Absolute path to runtime logs
@param valgrind Absolute path to valgrind's log
@param timeout Kill the QEMU process if it doesn't finish up within
the given number of seconds
"""
super().__init__(name, outdir, run_log, timeout, True)
self.timeout = timeout
self.sourcedir = sourcedir
self.outdir = outdir
self.run_log = run_log
self.valgrind_log = valgrind_log
self.returncode = 0
self.set_state("running", {})
def handle(self):
out_state = "failed"
with open(self.run_log, "wt") as rl, open(self.valgrind_log, "wt") as vl:
try:
binary = os.path.join(self.outdir, "testbinary")
command = [binary]
if shutil.which("valgrind"):
command = ["valgrind", "--error-exitcode=2",
"--leak-check=full"] + command
returncode = subprocess.call(command, timeout=self.timeout,
stdout=rl, stderr=vl)
self.returncode = returncode
if returncode != 0:
if self.returncode == 1:
out_state = "failed"
else:
out_state = "failed valgrind"
else:
out_state = "passed"
except subprocess.TimeoutExpired:
out_state = "timeout"
self.returncode = 1
returncode = subprocess.call(["GCOV_PREFIX=" + self.outdir, "gcov", self.sourcedir, "-s", self.outdir], shell=True)
self.set_state(out_state, {})
class QEMUHandler(Handler):
"""Spawns a thread to monitor QEMU output from pipes
We pass QEMU_PIPE to 'make run' and monitor the pipes for output.
We need to do this as once qemu starts, it runs forever until killed.
Test cases emit special messages to the console as they run, we check
for these to collect whether the test passed or failed.
"""
@staticmethod
def _thread(handler, timeout, outdir, logfile, fifo_fn, pid_fn, results):
fifo_in = fifo_fn + ".in"
fifo_out = fifo_fn + ".out"
# These in/out nodes are named from QEMU's perspective, not ours
if os.path.exists(fifo_in):
os.unlink(fifo_in)
os.mkfifo(fifo_in)
if os.path.exists(fifo_out):
os.unlink(fifo_out)
os.mkfifo(fifo_out)
# We don't do anything with out_fp but we need to open it for
# writing so that QEMU doesn't block, due to the way pipes work
out_fp = open(fifo_in, "wb")
# Disable internal buffering, we don't
# want read() or poll() to ever block if there is data in there
in_fp = open(fifo_out, "rb", buffering=0)
log_out_fp = open(logfile, "wt")
start_time = time.time()
timeout_time = start_time + timeout
p = select.poll()
p.register(in_fp, select.POLLIN)
metrics = {}
line = ""
while True:
this_timeout = int((timeout_time - time.time()) * 1000)
if this_timeout < 0 or not p.poll(this_timeout):
out_state = "timeout"
break
try:
c = in_fp.read(1).decode("utf-8")
except UnicodeDecodeError:
# Test is writing something weird, fail
out_state = "unexpected byte"
break
if c == "":
# EOF, this shouldn't happen unless QEMU crashes
out_state = "unexpected eof"
break
line = line + c
if c != "\n":
continue
# line contains a full line of data output from QEMU
log_out_fp.write(line)
log_out_fp.flush()
line = line.strip()
verbose("QEMU: %s" % line)
if line == handler.RUN_PASSED:
out_state = "passed"
break
if line == handler.RUN_FAILED:
out_state = "failed"
break
# TODO: Add support for getting numerical performance data
# from test cases. Will involve extending test case reporting
# APIs. Add whatever gets reported to the metrics dictionary
line = ""
metrics["qemu_time"] = time.time() - start_time
verbose("QEMU complete (%s) after %f seconds" %
(out_state, metrics["qemu_time"]))
handler.set_state(out_state, metrics)
log_out_fp.close()
out_fp.close()
in_fp.close()
pid = int(open(pid_fn).read())
os.unlink(pid_fn)
try:
os.kill(pid, signal.SIGTERM)
except ProcessLookupError:
# Oh well, as long as it's dead! User probably sent Ctrl-C
pass
os.unlink(fifo_in)
os.unlink(fifo_out)
def __init__(self, name, outdir, log_fn, timeout):
"""Constructor
@param name Arbitrary name of the created thread
@param outdir Working directory, should be where qemu.pid gets created
by kbuild
@param log_fn Absolute path to write out QEMU's log data
@param timeout Kill the QEMU process if it doesn't finish up within
the given number of seconds
"""
super().__init__(name, outdir, log_fn, timeout)
self.results = {}
# We pass this to QEMU which looks for fifos with .in and .out
# suffixes.
self.fifo_fn = os.path.join(outdir, "qemu-fifo")
self.pid_fn = os.path.join(outdir, "qemu.pid")
if os.path.exists(self.pid_fn):
os.unlink(self.pid_fn)
self.log_fn = log_fn
self.thread = threading.Thread(name=name, target=QEMUHandler._thread,
args=(self, timeout, outdir,
self.log_fn, self.fifo_fn,
self.pid_fn, self.results))
self.thread.daemon = True
verbose("Spawning QEMU process for %s" % name)
self.thread.start()
def get_fifo(self):
return self.fifo_fn
class SizeCalculator:
alloc_sections = ["bss", "noinit", "app_bss", "app_noinit"]
rw_sections = ["datas", "initlevel", "_k_task_list", "_k_event_list",
"_k_memory_pool", "exceptions", "initshell",
"_static_thread_area", "_k_timer_area", "_k_work_area",
"_k_mem_slab_area", "_k_mem_pool_area",
"_k_sem_area", "_k_mutex_area", "_k_alert_area",
"_k_fifo_area", "_k_lifo_area", "_k_stack_area",
"_k_msgq_area", "_k_mbox_area", "_k_pipe_area",
"net_if", "net_if_event", "net_stack", "net_l2_data",
"_k_queue_area", "_net_buf_pool_area", "app_datas",
"kobject_data", "mmu_tables"]
# These get copied into RAM only on non-XIP
ro_sections = ["text", "ctors", "init_array", "reset", "object_access",
"rodata", "devconfig", "net_l2", "vector"]
def __init__(self, filename, extra_sections):
"""Constructor
@param filename Path to the output binary
The <filename> is parsed by objdump to determine section sizes
"""
# Make sure this is an ELF binary
with open(filename, "rb") as f:
magic = f.read(4)
if (magic != b'\x7fELF'):
raise SanityRuntimeError("%s is not an ELF binary" % filename)
# Search for CONFIG_XIP in the ELF's list of symbols using NM and AWK.
# GREP can not be used as it returns an error if the symbol is not found.
is_xip_command = "nm " + filename + " | awk '/CONFIG_XIP/ { print $3 }'"
is_xip_output = subprocess.check_output(is_xip_command, shell=True,
stderr=subprocess.STDOUT).decode("utf-8").strip()
if is_xip_output.endswith("no symbols"):
raise SanityRuntimeError("%s has no symbol information" % filename)
self.is_xip = (len(is_xip_output) != 0)
self.filename = filename
self.sections = []
self.rom_size = 0
self.ram_size = 0
self.extra_sections = extra_sections
self._calculate_sizes()
def get_ram_size(self):
"""Get the amount of RAM the application will use up on the device
@return amount of RAM, in bytes
"""
return self.ram_size
def get_rom_size(self):
"""Get the size of the data that this application uses on device's flash
@return amount of ROM, in bytes
"""
return self.rom_size
def unrecognized_sections(self):
"""Get a list of sections inside the binary that weren't recognized
@return list of unrecognized section names
"""
slist = []
for v in self.sections:
if not v["recognized"]:
slist.append(v["name"])
return slist
def _calculate_sizes(self):
""" Calculate RAM and ROM usage by section """
objdump_command = "objdump -h " + self.filename
objdump_output = subprocess.check_output(objdump_command,
shell=True).decode("utf-8").splitlines()
for line in objdump_output:
words = line.split()
if (len(words) == 0): # Skip lines that are too short
continue
index = words[0]
if (not index[0].isdigit()): # Skip lines that do not start
continue # with a digit
name = words[1] # Skip lines with section names
if (name[0] == '.'): # starting with '.'
continue
# TODO this doesn't actually reflect the size in flash or RAM as
# it doesn't include linker-imposed padding between sections.
# It is close though.
size = int(words[2], 16)
if size == 0:
continue
load_addr = int(words[4], 16)
virt_addr = int(words[3], 16)
# Add section to memory use totals (for both non-XIP and XIP scenarios)
# Unrecognized section names are not included in the calculations.
recognized = True
if name in SizeCalculator.alloc_sections:
self.ram_size += size
stype = "alloc"
elif name in SizeCalculator.rw_sections:
self.ram_size += size
self.rom_size += size
stype = "rw"
elif name in SizeCalculator.ro_sections:
self.rom_size += size
if not self.is_xip:
self.ram_size += size
stype = "ro"
else:
stype = "unknown"
if name not in self.extra_sections:
recognized = False
self.sections.append({"name" : name, "load_addr" : load_addr,
"size" : size, "virt_addr" : virt_addr,
"type" : stype, "recognized" : recognized})
class MakeGoal:
"""Metadata class representing one of the sub-makes called by MakeGenerator
MakeGenerator returns a dictionary of these which can then be associated
with TestInstances to get a complete picture of what happened during a test.
MakeGenerator is used for tasks outside of building tests (such as
defconfigs) which is why MakeGoal is a separate class from TestInstance.
"""
def __init__(self, name, text, qemu, make_log, build_log, run_log,
qemu_log):
self.name = name
self.text = text
self.qemu = qemu
self.make_log = make_log
self.build_log = build_log
self.run_log = run_log
self.qemu_log = qemu_log
self.make_state = "waiting"
self.failed = False
self.finished = False
self.reason = None
self.metrics = {}
def get_error_log(self):
if self.make_state == "waiting":
# Shouldn't ever see this; breakage in the main Makefile itself.
return self.make_log
elif self.make_state == "building":
# Failure when calling the sub-make to build the code
return self.build_log
elif self.make_state == "running":
# Failure in sub-make for "make run", qemu probably failed to start
return self.run_log
elif self.make_state == "finished":
# QEMU finished, but timed out or otherwise wasn't successful
return self.qemu_log
def fail(self, reason):
self.failed = True
self.finished = True
self.reason = reason
def success(self):
self.finished = True
def __str__(self):
if self.finished:
if self.failed:
return "[%s] failed (%s: see %s)" % (self.name, self.reason,
self.get_error_log())
else:
return "[%s] passed" % self.name
else:
return "[%s] in progress (%s)" % (self.name, self.make_state)
class MakeGenerator:
"""Generates a Makefile which just calls a bunch of sub-make sessions
In any given test suite we may need to build dozens if not hundreds of
test cases. The cleanest way to parallelize this is to just let Make
do the parallelization, sharing the jobserver among all the different
sub-make targets.
"""
GOAL_HEADER_TMPL = """.PHONY: {goal}
{goal}:
"""
MAKE_RULE_TMPL = """\t@echo sanity_test_{phase} {goal} >&2
\t$(MAKE) -C {directory} O={outdir} V={verb} EXTRA_CFLAGS="-Werror {cflags}" EXTRA_ASMFLAGS=-Wa,--fatal-warnings EXTRA_LDFLAGS=--fatal-warnings {args} >{logfile} 2>&1
"""
GOAL_FOOTER_TMPL = "\t@echo sanity_test_finished {goal} >&2\n\n"
re_make = re.compile("sanity_test_([A-Za-z0-9]+) (.+)|$|make[:] \*\*\* \[(.+:.+: )?(.+)\] Error.+$")
def __init__(self, base_outdir, asserts=False, deprecations=False, ccache=0):
"""MakeGenerator constructor
@param base_outdir Intended to be the base out directory. A make.log
file will be created here which contains the output of the
top-level Make session, as well as the dynamic control Makefile
@param verbose If true, pass V=1 to all the sub-makes which greatly
increases their verbosity
"""
self.goals = {}
if not os.path.exists(base_outdir):
os.makedirs(base_outdir)
self.logfile = os.path.join(base_outdir, "make.log")
self.makefile = os.path.join(base_outdir, "Makefile")
self.asserts = asserts
self.deprecations = deprecations
self.ccache = ccache
def _get_rule_header(self, name):
return MakeGenerator.GOAL_HEADER_TMPL.format(goal=name)
def _get_sub_make(self, name, phase, workdir, outdir, logfile, args):
verb = "1" if VERBOSE else "0"
args = " ".join(args)
if self.asserts:
cflags="-DCONFIG_ASSERT=1 -D__ASSERT_ON=2"
else:
cflags=""
if self.deprecations:
cflags = cflags + " -Wno-deprecated-declarations"
if self.ccache:
args = args + " USE_CCACHE=1"
return MakeGenerator.MAKE_RULE_TMPL.format(phase=phase, goal=name,
outdir=outdir, cflags=cflags,
directory=workdir, verb=verb,
args=args, logfile=logfile)
def _get_rule_footer(self, name):
return MakeGenerator.GOAL_FOOTER_TMPL.format(goal=name)
def _add_goal(self, outdir):
if not os.path.exists(outdir):
os.makedirs(outdir)
def add_build_goal(self, name, directory, outdir, args, buildlog):
"""Add a goal to invoke a Kbuild session
@param name A unique string name for this build goal. The results
dictionary returned by execute() will be keyed by this name.
@param directory Absolute path to working directory, will be passed
to make -C
@param outdir Absolute path to output directory, will be passed to
Kbuild via -O=<path>
@param args Extra command line arguments to pass to 'make', typically
environment variables or specific Make goals
"""
self._add_goal(outdir)
build_logfile = os.path.join(outdir, buildlog)
text = (self._get_rule_header(name) +
self._get_sub_make(name, "building", directory,
outdir, build_logfile, args) +
self._get_rule_footer(name))
self.goals[name] = MakeGoal(name, text, None, self.logfile, build_logfile,
None, None)
def add_qemu_goal(self, name, directory, outdir, args, timeout=30):
"""Add a goal to build a Zephyr project and then run it under QEMU
The generated make goal invokes Make twice, the first time it will
build the default goal, and the second will invoke the 'qemu' goal.
The output of the QEMU session will be monitored, and terminated
either upon pass/fail result of the test program, or the timeout
is reached.
@param name A unique string name for this build goal. The results
dictionary returned by execute() will be keyed by this name.
@param directory Absolute path to working directory, will be passed
to make -C
@param outdir Absolute path to output directory, will be passed to
Kbuild via -O=<path>
@param args Extra command line arguments to pass to 'make', typically
environment variables. Do not pass specific Make goals here.
@param timeout Maximum length of time QEMU session should be allowed
to run before automatically killing it. Default is 30 seconds.
"""
self._add_goal(outdir)
build_logfile = os.path.join(outdir, "build.log")
run_logfile = os.path.join(outdir, "run.log")
qemu_logfile = os.path.join(outdir, "qemu.log")
q = QEMUHandler(name, outdir, qemu_logfile, timeout)
args.append("QEMU_PIPE=%s" % q.get_fifo())
text = (self._get_rule_header(name) +
self._get_sub_make(name, "building", directory,
outdir, build_logfile, args) +
self._get_sub_make(name, "running", directory,
outdir, run_logfile,
args + ["run"]) +
self._get_rule_footer(name))
self.goals[name] = MakeGoal(name, text, q, self.logfile, build_logfile,
run_logfile, qemu_logfile)
def add_unit_goal(self, name, directory, outdir, args, timeout=30, coverage=False):
self._add_goal(outdir)
build_logfile = os.path.join(outdir, "build.log")
run_logfile = os.path.join(outdir, "run.log")
qemu_logfile = os.path.join(outdir, "qemu.log")
valgrind_logfile = os.path.join(outdir, "valgrind.log")
if coverage:
args += ["COVERAGE=1"]
# we handle running in the UnitHandler class
text = (self._get_rule_header(name) +
self._get_sub_make(name, "building", directory,
outdir, build_logfile, args) +
self._get_rule_footer(name))
q = UnitHandler(name, directory, outdir, run_logfile, valgrind_logfile, timeout)
self.goals[name] = MakeGoal(name, text, q, self.logfile, build_logfile,
run_logfile, valgrind_logfile)
def add_test_instance(self, ti, build_only=False, enable_slow=False, coverage=False,
extra_args=[]):
"""Add a goal to build/test a TestInstance object
@param ti TestInstance object to build. The status dictionary returned
by execute() will be keyed by its .name field.
"""
args = ti.test.extra_args[:]
arg_list = [
"ARCH=%s" % ti.platform.arch,
"BOARD=%s" % ti.platform.name]
if len(ti.test.extra_configs) > 0:
arg_list.append("OVERLAY_CONFIG=%s" % os.path.join(ti.outdir, "overlay.conf"))
args.extend(arg_list)
args.extend(extra_args)
if (ti.platform.qemu_support and (not ti.build_only) and
(not build_only) and (enable_slow or not ti.test.slow)):
self.add_qemu_goal(ti.name, ti.test.code_location, ti.outdir,
args, ti.test.timeout)
elif ti.test.type == "unit":
self.add_unit_goal(ti.name, ti.test.code_location, ti.outdir,
args, ti.test.timeout, coverage)
else:
self.add_build_goal(ti.name, ti.test.code_location, ti.outdir,
args, "build.log")
def execute(self, callback_fn=None, context=None):
"""Execute all the registered build goals
@param callback_fn If not None, a callback function will be called
as individual goals transition between states. This function
should accept two parameters: a string state and an arbitrary
context object, supplied here
@param context Context object to pass to the callback function.
Type and semantics are specific to that callback function.
@return A dictionary mapping goal names to final status.
"""
with open(self.makefile, "wt") as tf, \
open(os.devnull, "wb") as devnull, \
open(self.logfile, "wt") as make_log:
# Create our dynamic Makefile and execute it.
# Watch stderr output which is where we will keep
# track of build state
for name, goal in self.goals.items():
tf.write(goal.text)
tf.write("all: %s\n" % (" ".join(self.goals.keys())))
tf.flush()
cmd = ["make", "-k", "-j", str(CPU_COUNTS * 2), "-f", tf.name, "all"]
p = subprocess.Popen(cmd, stderr=subprocess.PIPE,
stdout=devnull)
for line in iter(p.stderr.readline, b''):
line = line.decode("utf-8")
make_log.write(line)
verbose("MAKE: " + repr(line.strip()))
m = MakeGenerator.re_make.match(line)
if not m:
continue
state, name, _, error = m.groups()
if error:
goal = self.goals[error]
else:
goal = self.goals[name]
goal.make_state = state
if error:
# Sometimes QEMU will run an image and then crash out, which
# will cause the 'make run' invocation to exit with
# nonzero status.
# Need to distinguish this case from a compilation failure.
if goal.qemu:
goal.fail("qemu_crash")
else:
goal.fail("build_error")
else:
if state == "finished":
if goal.qemu:
if goal.qemu.unit:
# We can't run unit tests with Make
goal.qemu.handle()
if goal.qemu.returncode == 2:
goal.qemu_log = goal.qemu.valgrind_log
elif goal.qemu.returncode:
goal.qemu_log = goal.qemu.run_log
thread_status, metrics = goal.qemu.get_state()
goal.metrics.update(metrics)
if thread_status == "passed":
goal.success()
else:
goal.fail(thread_status)
else:
goal.success()
if callback_fn:
callback_fn(context, self.goals, goal)
p.wait()
return self.goals
# "list" - List of strings
# "list:<type>" - List of <type>
# "set" - Set of unordered, unique strings
# "set:<type>" - Set of <type>
# "float" - Floating point
# "int" - Integer
# "bool" - Boolean
# "str" - String
# XXX Be sure to update __doc__ if you change any of this!!
arch_valid_keys = {"name" : {"type" : "str", "required" : True},
"platforms" : {"type" : "list", "required" : True},
"supported_toolchains" : {"type" : "list", "required" : True}}
platform_valid_keys = {"qemu_support" : {"type" : "bool", "default" : False},
"supported_toolchains" : {"type" : "list", "default" : []}}
testcase_valid_keys = {"tags" : {"type" : "set", "required" : False},
"type" : {"type" : "str", "default": "integration"},
"extra_args" : {"type" : "list"},
"extra_configs" : {"type" : "list"},
"build_only" : {"type" : "bool", "default" : False},
"build_on_all" : {"type" : "bool", "default" : False},
"skip" : {"type" : "bool", "default" : False},
"slow" : {"type" : "bool", "default" : False},
"timeout" : {"type" : "int", "default" : 60},
"min_ram" : {"type" : "int", "default" : 8},
"depends_on": {"type" : "set"},
"min_flash" : {"type" : "int", "default" : 32},
"arch_whitelist" : {"type" : "set"},
"arch_exclude" : {"type" : "set"},
"extra_sections" : {"type" : "list", "default" : []},
"platform_exclude" : {"type" : "set"},
"platform_whitelist" : {"type" : "set"},
"toolchain_exclude" : {"type" : "set"},
"toolchain_whitelist" : {"type" : "set"},
"filter" : {"type" : "str"}}
class SanityConfigParser:
"""Class to read test case files with semantic checking
"""
def __init__(self, filename, schema):
"""Instantiate a new SanityConfigParser object
@param filename Source .yaml file to read
"""
cp = scl.yaml_load_verify(filename, schema)
self.filename = filename
self.cp = cp
def _cast_value(self, value, typestr):
if type(value) is str:
v = value.strip()
if typestr == "str":
return v
elif typestr == "float":
return float(value)
elif typestr == "int":
return int(value)
elif typestr == "bool":
return value
elif typestr.startswith("list") and type(value) is list:
return value
elif typestr.startswith("list") and type(value) is str:
vs = v.split()
if len(typestr) > 4 and typestr[4] == ":":
return [self._cast_value(vsi, typestr[5:]) for vsi in vs]
else:
return vs
elif typestr.startswith("set"):
vs = v.split()
if len(typestr) > 3 and typestr[3] == ":":
return set([self._cast_value(vsi, typestr[4:]) for vsi in vs])
else:
return set(vs)
else:
raise ConfigurationError(self.filename, "unknown type '%s'" % value)
def section(self, name):
for s in self.sections():
if name in s:
return s.get(name, {})
def sections(self):
"""Get the set of test sections within the .yaml file
@return a list of string section names"""
return self.cp['tests']
def get_section(self, section, valid_keys, common):
"""Get a dictionary representing the keys/values within a section
@param section The section in the .yaml file to retrieve data from
@param valid_keys A dictionary representing the intended semantics
for this section. Each key in this dictionary is a key that could
be specified, if a key is given in the .yaml file which isn't in
here, it will generate an error. Each value in this dictionary
is another dictionary containing metadata:
"default" - Default value if not given
"type" - Data type to convert the text value to. Simple types
supported are "str", "float", "int", "bool" which will get
converted to respective Python data types. "set" and "list"
may also be specified which will split the value by
whitespace (but keep the elements as strings). finally,
"list:<type>" and "set:<type>" may be given which will
perform a type conversion after splitting the value up.
"required" - If true, raise an error if not defined. If false
and "default" isn't specified, a type conversion will be
done on an empty string
@return A dictionary containing the section key-value pairs with
type conversion and default values filled in per valid_keys
"""
d = {}
for k, v in common.items():
d[k] = v
for k, v in self.section(section).items():
if k not in valid_keys:
raise ConfigurationError(self.filename,
"Unknown config key '%s' in definition for '%s'"
% (k, section))
if k in d:
if type(d[k]) is str:
d[k] += " " + v
else:
d[k] = v
for k, kinfo in valid_keys.items():
if k not in d:
if "required" in kinfo:
required = kinfo["required"]
else:
required = False
if required:
raise ConfigurationError(self.filename,
"missing required value for '%s' in section '%s'"
% (k, section))
else:
if "default" in kinfo:
default = kinfo["default"]
else:
default = self._cast_value("", kinfo["type"])
d[k] = default
else:
try:
d[k] = self._cast_value(d[k], kinfo["type"])
except ValueError as ve:
raise ConfigurationError(self.filename,
"bad %s value '%s' for key '%s' in section '%s'"
% (kinfo["type"], d[k], k, section))
return d
class Platform:
"""Class representing metadata for a particular platform
Maps directly to BOARD when building"""
yaml_platform_schema = scl.yaml_load(
os.path.join(os.environ['ZEPHYR_BASE'],
"scripts", "sanity_chk", "sanitycheck-platform-schema.yaml"))
def __init__(self, cfile):
"""Constructor.
@param arch Architecture object for this platform
@param name String name for this platform, same as BOARD
@param plat_dict SanityConfigParser output on the relevant section
in the architecture configuration file which has lots of metadata.
See the Architecture class.
"""
scp = SanityConfigParser(cfile, self.yaml_platform_schema)
cp = scp.cp
self.name = cp['identifier']
# if no RAM size is specified by the board, take a default of 128K
self.ram = cp.get("ram", 128)
testing = cp.get("testing", {})
self.ignore_tags = testing.get("ignore_tags", [])
self.default = testing.get("default", False)
# if no flash size is specified by the board, take a default of 512K
self.flash = cp.get("flash", 512)
self.supported = set()
for supp_feature in cp.get("supported", []):
for item in supp_feature.split(":"):
self.supported.add(item)
self.qemu_support = True if cp.get('type', "na") == 'qemu' else False
self.arch = cp['arch']
self.supported_toolchains = cp.get("toolchain", [])
self.defconfig = None
pass
def __repr__(self):
return "<%s on %s>" % (self.name, self.arch)
class Architecture:
"""Class representing metadata for a particular architecture
"""
def __init__(self, name, platforms):
"""Architecture constructor
@param cfile Path to Architecture configuration file, which gives
info about the arch and all the platforms for it
"""
self.platforms = platforms
self.name = name
def __repr__(self):
return "<arch %s>" % self.name
class TestCase:
"""Class representing a test application
"""
def __init__(self, testcase_root, workdir, name, tc_dict, yamlfile):
"""TestCase constructor.
This gets called by TestSuite as it finds and reads test yaml files.
Multiple TestCase instances may be generated from a single testcase.yaml,
each one corresponds to a section within that file.
We need to have a unique name for every single test case. Since
a testcase.yaml can define multiple tests, the canonical name for
the test case is <workdir>/<name>.
@param testcase_root Absolute path to the root directory where
all the test cases live
@param workdir Relative path to the project directory for this
test application from the test_case root.
@param name Name of this test case, corresponding to the section name
in the test case configuration file. For many test cases that just
define one test, can be anything and is usually "test". This is
really only used to distinguish between different cases when
the testcase.yaml defines multiple tests
@param tc_dict Dictionary with section values for this test case
from the testcase.yaml file
"""
self.code_location = os.path.join(testcase_root, workdir)
self.type = tc_dict["type"]
self.tags = tc_dict["tags"]
self.extra_args = tc_dict["extra_args"]
self.extra_configs = tc_dict["extra_configs"]
self.arch_whitelist = tc_dict["arch_whitelist"]
self.arch_exclude = tc_dict["arch_exclude"]
self.skip = tc_dict["skip"]
self.platform_exclude = tc_dict["platform_exclude"]
self.platform_whitelist = tc_dict["platform_whitelist"]
self.toolchain_exclude = tc_dict["toolchain_exclude"]
self.toolchain_whitelist = tc_dict["toolchain_whitelist"]
self.tc_filter = tc_dict["filter"]
self.timeout = tc_dict["timeout"]
self.build_only = tc_dict["build_only"]
self.build_on_all = tc_dict["build_on_all"]
self.slow = tc_dict["slow"]
self.min_ram = tc_dict["min_ram"]
self.depends_on = tc_dict["depends_on"]
self.min_flash = tc_dict["min_flash"]
self.extra_sections = tc_dict["extra_sections"]
self.path = os.path.normpath(os.path.join(os.path.abspath(testcase_root).replace(ZEPHYR_BASE + "/",''), workdir, name))
self.name = os.path.join(self.path)
self.defconfig = {}
self.yamlfile = yamlfile
def __repr__(self):
return self.name
class TestInstance:
"""Class representing the execution of a particular TestCase on a platform
@param test The TestCase object we want to build/execute
@param platform Platform object that we want to build and run against
@param base_outdir Base directory for all test results. The actual
out directory used is <outdir>/<platform>/<test case name>
"""
def __init__(self, test, platform, base_outdir, build_only=False,
slow=False, coverage=False):
self.test = test
self.platform = platform
self.name = os.path.join(platform.name, test.name)
self.outdir = os.path.join(base_outdir, platform.name, test.path)
self.build_only = build_only or test.build_only
def create_overlay(self):
if len(self.test.extra_configs) > 0:
file = os.path.join(self.outdir, "overlay.conf")
os.makedirs(self.outdir, exist_ok=True)
f = open(file, "w")
content = ""
content = "\n".join(self.test.extra_configs)
f.write(content)
f.close()
def calculate_sizes(self):
"""Get the RAM/ROM sizes of a test case.
This can only be run after the instance has been executed by
MakeGenerator, otherwise there won't be any binaries to measure.
@return A SizeCalculator object
"""
fns = glob.glob(os.path.join(self.outdir, "*.elf"))
fns = [x for x in fns if not x.endswith('_prebuilt.elf')]
if (len(fns) != 1):
raise BuildError("Missing/multiple output ELF binary")
return SizeCalculator(fns[0], self.test.extra_sections)
def __repr__(self):
return "<TestCase %s on %s>" % (self.test.name, self.platform.name)
def defconfig_cb(context, goals, goal):
if not goal.failed:
return
info("%sCould not build defconfig for %s%s" %
(COLOR_RED, goal.name, COLOR_NORMAL));
if INLINE_LOGS:
with open(goal.get_error_log()) as fp:
data = fp.read()
sys.stdout.write(data)
if log_file:
log_file.write(data)
else:
info("\tsee: " + COLOR_YELLOW + goal.get_error_log() + COLOR_NORMAL)
class TestSuite:
config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
yaml_tc_schema = scl.yaml_load(
os.path.join(os.environ['ZEPHYR_BASE'],
"scripts", "sanity_chk", "sanitycheck-tc-schema.yaml"))
def __init__(self, board_root_list, testcase_roots, outdir, coverage):
# Keep track of which test cases we've filtered out and why
discards = {}
self.arches = {}
self.testcases = {}
self.platforms = []
self.outdir = os.path.abspath(outdir)
self.instances = {}
self.goals = None
self.discards = None
self.coverage = coverage
for testcase_root in testcase_roots:
testcase_root = os.path.abspath(testcase_root)
debug("Reading test case configuration files under %s..." %
testcase_root)
for dirpath, dirnames, filenames in os.walk(testcase_root,
topdown=True):
verbose("scanning %s" % dirpath)
if 'sample.yaml' in filenames:
filename = 'sample.yaml'
elif 'testcase.yaml' in filenames:
filename = 'testcase.yaml'
else:
continue
verbose("Found possible test case in " + dirpath)
dirnames[:] = []
yaml_path = os.path.join(dirpath, filename)
try:
cp = SanityConfigParser(yaml_path, self.yaml_tc_schema)
except RuntimeError as e:
error("E: %s: can't load: %s" % (yaml_path, e))
workdir = os.path.relpath(dirpath, testcase_root)
common = {}
if 'common' in cp.cp:
common = cp.cp['common']
for section in cp.sections():
name = list(section.keys())[0]
tc_dict = cp.get_section(name, testcase_valid_keys, common)
tc = TestCase(testcase_root, workdir, name, tc_dict,
yaml_path)
self.testcases[tc.name] = tc
for board_root in board_root_list:
board_root = os.path.abspath(board_root)
debug("Reading platform configuration files under %s..." % board_root)
for dirpath, dirnames, filenames in os.walk(board_root):
for filename in filenames:
if filename.endswith(".yaml"):
fn = os.path.join(dirpath, filename)
verbose("Found plaform configuration " + fn)
try:
platform = Platform(fn)
self.platforms.append(platform)
except RuntimeError as e:
error("E: %s: can't load: %s" % (fn, e))
arches = []
for p in self.platforms:
arches.append(p.arch)
for a in list(set(arches)):
aplatforms = [ p for p in self.platforms if p.arch == a ]
arch = Architecture(a, aplatforms)
self.arches[a] = arch
self.instances = {}
def get_last_failed(self):
if not os.path.exists(LAST_SANITY):
raise SanityRuntimeError("Couldn't find last sanity run.")
result = []
with open(LAST_SANITY, "r") as fp:
cr = csv.DictReader(fp)
for row in cr:
if row["passed"] == "True":
continue
test = row["test"]
platform = row["platform"]
result.append((test, platform))
return result
def load_from_file(self, file):
if not os.path.exists(file):
raise SanityRuntimeError("Couldn't find input file with list of tests.")
result = []
with open(file, "r") as fp:
cr = csv.reader(fp)
instance_list = []
for row in cr:
name = os.path.join(row[0], row[1])
platforms = self.arches[row[3]].platforms
myp = None
for p in platforms:
if p.name == row[2]:
myp = p
break
instance = TestInstance(self.testcases[name], myp, self.outdir)
instance.create_overlay()
instance_list.append(instance)
self.add_instances(instance_list)
def apply_filters(self, args, toolchain):
instances = []
discards = {}
platform_filter = args.platform
last_failed = args.only_failed
testcase_filter = args.test
arch_filter = args.arch
tag_filter = args.tag
exclude_tag = args.exclude_tag
config_filter = args.config
platform_limit = args.platform_limit
extra_args = args.extra_args
enable_ccache = args.ccache
all_plats = args.all
verbose("platform filter: " + str(platform_filter))
verbose(" arch_filter: " + str(arch_filter))
verbose(" tag_filter: " + str(tag_filter))
verbose(" exclude_tag: " + str(exclude_tag))
verbose(" config_filter: " + str(config_filter))
verbose(" enable_ccache: " + str(enable_ccache))
if last_failed:
failed_tests = self.get_last_failed()
default_platforms = False
if all_plats:
info("Selecting all possible platforms per test case")
# When --all used, any --platform arguments ignored
platform_filter = []
elif not platform_filter:
info("Selecting default platforms per test case")
default_platforms = True
mg = MakeGenerator(self.outdir, ccache=enable_ccache)
dlist = {}
for tc_name, tc in self.testcases.items():
for arch_name, arch in self.arches.items():
for plat in arch.platforms:
instance = TestInstance(tc, plat, self.outdir)
if (arch_name == "unit") != (tc.type == "unit"):
continue
if tc.build_on_all and not platform_filter:
platform_filter = []
if tc.skip:
continue
if tag_filter and not tc.tags.intersection(tag_filter):
continue
if exclude_tag and tc.tags.intersection(exclude_tag):
continue
if testcase_filter and tc_name not in testcase_filter:
continue
if last_failed and (tc.name, plat.name) not in failed_tests:
continue
if arch_filter and arch_name not in arch_filter:
continue
if tc.arch_whitelist and arch.name not in tc.arch_whitelist:
continue
if tc.arch_exclude and arch.name in tc.arch_exclude:
continue
if tc.platform_exclude and plat.name in tc.platform_exclude:
continue
if tc.toolchain_exclude and toolchain in tc.toolchain_exclude:
continue
if platform_filter and plat.name not in platform_filter:
continue
if plat.ram < tc.min_ram:
continue
if set(plat.ignore_tags) & tc.tags:
continue
if tc.depends_on:
dep_intersection = tc.depends_on.intersection(set(plat.supported))
if dep_intersection != set(tc.depends_on):
continue
if plat.flash < tc.min_flash:
continue
if tc.platform_whitelist and plat.name not in tc.platform_whitelist:
continue
if tc.toolchain_whitelist and toolchain not in tc.toolchain_whitelist:
continue
if (tc.tc_filter and (plat.default or all_plats or platform_filter) and
toolchain in plat.supported_toolchains):
args = tc.extra_args[:]
args.extend(["ARCH=" + plat.arch,
"BOARD=" + plat.name, "config-sanitycheck"])
args.extend(extra_args)
# FIXME would be nice to use a common outdir for this so that
# conf, gen_idt, etc aren't rebuilt for every combination,
# need a way to avoid different Make processes from clobbering
# each other since they all try to build them simultaneously
o = os.path.join(self.outdir, plat.name, tc.path)
dlist[tc, plat, tc.name.split("/")[-1]] = os.path.join(o,".config-sanitycheck")
goal = "_".join([plat.name, "_".join(tc.name.split("/")), "config-sanitycheck"])
mg.add_build_goal(goal, os.path.join(ZEPHYR_BASE, tc.code_location), o,
args, "config-sanitycheck.log")
info("Building testcase defconfigs...")
results = mg.execute(defconfig_cb)
for name, goal in results.items():
if goal.failed:
raise SanityRuntimeError("Couldn't build some defconfigs")
for k, out_config in dlist.items():
test, plat, name = k
defconfig = {}
with open(out_config, "r") as fp:
for line in fp.readlines():
m = TestSuite.config_re.match(line)
if not m:
if line.strip() and not line.startswith("#"):
sys.stderr.write("Unrecognized line %s\n" % line)
continue
defconfig[m.group(1)] = m.group(2).strip()
test.defconfig[plat] = defconfig
for tc_name, tc in self.testcases.items():
for arch_name, arch in self.arches.items():
instance_list = []
for plat in arch.platforms:
instance = TestInstance(tc, plat, self.outdir)
if (arch_name == "unit") != (tc.type == "unit"):
# Discard silently
continue
if tc.skip:
discards[instance] = "Skip filter"
continue
if tc.build_on_all and not platform_filter:
platform_filter = []
if tag_filter and not tc.tags.intersection(tag_filter):
discards[instance] = "Command line testcase tag filter"
continue
if exclude_tag and tc.tags.intersection(exclude_tag):
discards[instance] = "Command line testcase exclude filter"
continue
if testcase_filter and tc_name not in testcase_filter:
discards[instance] = "Testcase name filter"
continue
if last_failed and (tc.name, plat.name) not in failed_tests:
discards[instance] = "Passed or skipped during last run"
continue
if arch_filter and arch_name not in arch_filter:
discards[instance] = "Command line testcase arch filter"
continue
if tc.arch_whitelist and arch.name not in tc.arch_whitelist:
discards[instance] = "Not in test case arch whitelist"
continue
if tc.arch_exclude and arch.name in tc.arch_exclude:
discards[instance] = "In test case arch exclude"
continue
if tc.platform_exclude and plat.name in tc.platform_exclude:
discards[instance] = "In test case platform exclude"
continue
if tc.toolchain_exclude and toolchain in tc.toolchain_exclude:
discards[instance] = "In test case toolchain exclude"
continue
if platform_filter and plat.name not in platform_filter:
discards[instance] = "Command line platform filter"
continue
if tc.platform_whitelist and plat.name not in tc.platform_whitelist:
discards[instance] = "Not in testcase platform whitelist"
continue
if tc.toolchain_whitelist and toolchain not in tc.toolchain_whitelist:
discards[instance] = "Not in testcase toolchain whitelist"
continue
if toolchain and toolchain not in plat.supported_toolchains and tc.type != 'unit':
discards[instance] = "Not supported by the toolchain"
continue
if plat.ram < tc.min_ram:
discards[instance] = "Not enough RAM"
continue
if tc.depends_on:
dep_intersection = tc.depends_on.intersection(set(plat.supported))
if dep_intersection != set(tc.depends_on):
discards[instance] = "No hardware support"
continue
if plat.flash < tc.min_flash:
discards[instance] = "Not enough FLASH"
continue
if set(plat.ignore_tags) & tc.tags:
discards[instance] = "Excluded tags per platform"
continue
defconfig = {"ARCH" : arch.name, "PLATFORM" : plat.name}
defconfig.update(os.environ)
for p, tdefconfig in tc.defconfig.items():
if p == plat:
defconfig.update(tdefconfig)
break
if tc.tc_filter:
try:
res = expr_parser.parse(tc.tc_filter, defconfig)
except (ValueError, SyntaxError) as se:
sys.stderr.write("Failed processing %s\n" % tc.yamlfile)
raise se
if not res:
discards[instance] = ("defconfig doesn't satisfy expression '%s'" %
tc.tc_filter)
continue
instance.create_overlay()
instance_list.append(instance)
if not instance_list:
# Every platform in this arch was rejected already
continue
if default_platforms and not tc.build_on_all:
if not tc.platform_whitelist:
instances = list(filter(lambda tc: tc.platform.default, instance_list))
self.add_instances(instances)
else:
self.add_instances(instance_list[:platform_limit])
for instance in list(filter(lambda tc: not tc.platform.default, instance_list)):
discards[instance] = "Not a default test platform"
else:
self.add_instances(instance_list)
self.discards = discards
return discards
def add_instances(self, ti_list):
for ti in ti_list:
self.instances[ti.name] = ti
def execute(self, cb, cb_context, build_only, enable_slow, enable_asserts, enable_deprecations,
extra_args, enable_ccache):
def calc_one_elf_size(name, goal):
if not goal.failed:
i = self.instances[name]
sc = i.calculate_sizes()
goal.metrics["ram_size"] = sc.get_ram_size()
goal.metrics["rom_size"] = sc.get_rom_size()
goal.metrics["unrecognized"] = sc.unrecognized_sections()
mg = MakeGenerator(self.outdir, asserts=enable_asserts, deprecations=enable_deprecations,
ccache=enable_ccache)
for i in self.instances.values():
mg.add_test_instance(i, build_only, enable_slow, self.coverage, extra_args)
self.goals = mg.execute(cb, cb_context)
# Parallelize size calculation
executor = concurrent.futures.ThreadPoolExecutor(CPU_COUNTS)
futures = [executor.submit(calc_one_elf_size, name, goal) \
for name, goal in self.goals.items()]
concurrent.futures.wait(futures)
return self.goals
def run_report(self, filename):
with open(filename, "at") as csvfile:
fieldnames = ['path', 'test', 'platform', 'arch']
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
for instance in self.instances.values():
rowdict = {
"path": os.path.dirname(instance.test.name),
"test" : os.path.basename(instance.test.name),
"platform" : instance.platform.name,
"arch" : instance.platform.arch
}
cw.writerow(rowdict)
def discard_report(self, filename):
if self.discards == None:
raise SanityRuntimeException("apply_filters() hasn't been run!")
with open(filename, "wt") as csvfile:
fieldnames = ["test", "arch", "platform", "reason"]
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
cw.writeheader()
for instance, reason in self.discards.items():
rowdict = {"test" : instance.test.name,
"arch" : instance.platform.arch,
"platform" : instance.platform.name,
"reason" : reason}
cw.writerow(rowdict)
def compare_metrics(self, filename):
# name, datatype, lower results better
interesting_metrics = [("ram_size", int, True),
("rom_size", int, True)]
if self.goals == None:
raise SanityRuntimeException("execute() hasn't been run!")
if not os.path.exists(filename):
info("Cannot compare metrics, %s not found" % filename)
return []
results = []
saved_metrics = {}
with open(filename) as fp:
cr = csv.DictReader(fp)
for row in cr:
d = {}
for m, _, _ in interesting_metrics:
d[m] = row[m]
saved_metrics[(row["test"], row["platform"])] = d
for name, goal in self.goals.items():
i = self.instances[name]
mkey = (i.test.name, i.platform.name)
if mkey not in saved_metrics:
continue
sm = saved_metrics[mkey]
for metric, mtype, lower_better in interesting_metrics:
if metric not in goal.metrics:
continue
if sm[metric] == "":
continue
delta = goal.metrics[metric] - mtype(sm[metric])
if delta == 0:
continue
results.append((i, metric, goal.metrics[metric], delta,
lower_better))
return results
def testcase_xunit_report(self, filename, duration, args):
if self.goals == None:
raise SanityRuntimeException("execute() hasn't been run!")
fails = 0
passes = 0
errors = 0
for name, goal in self.goals.items():
if goal.failed:
if goal.reason in ['build_error', 'qemu_crash']:
errors += 1
else:
fails += 1
else:
passes += 1
run = "Sanitycheck"
eleTestsuite = None
append = args.only_failed
if os.path.exists(filename) and append:
tree = ET.parse(filename)
eleTestsuites = tree.getroot()
eleTestsuite = tree.findall('testsuite')[0];
else:
eleTestsuites = ET.Element('testsuites')
eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite', name=run, time="%d" %duration,
tests="%d" %(errors + passes + fails), failures="%d" %fails, errors="%d" %errors, skip="0")
qemu_time = "0"
for name, goal in self.goals.items():
i = self.instances[name]
if append:
for tc in eleTestsuite.findall('testcase'):
if tc.get('classname') == "%s:%s" %(i.platform.name, i.test.name):
eleTestsuite.remove(tc)
if not goal.failed and goal.qemu:
qemu_time = "%s" %(goal.metrics["qemu_time"])
eleTestcase = ET.SubElement(eleTestsuite, 'testcase', classname="%s:%s" %(i.platform.name, i.test.name), name="%s" %(name), time=qemu_time)
if goal.failed:
failure = ET.SubElement(eleTestcase, 'failure', type="failure", message=goal.reason)
p = ("%s/%s/%s" %(args.outdir, i.platform.name, i.test.name))
bl = os.path.join(p, "build.log")
if goal.reason != 'build_error':
bl = os.path.join(p, "qemu.log")
if os.path.exists(bl):
with open(bl, "r") as f:
log = f.read()
ansi_escape = re.compile(r'\x1b[^m]*m')
output = ansi_escape.sub('', str(log))
failure.text = (escape(output))
result = ET.tostring(eleTestsuites)
f = open(filename, 'wb')
f.write(result)
f.close()
def testcase_report(self, filename):
if self.goals == None:
raise SanityRuntimeException("execute() hasn't been run!")
with open(filename, "wt") as csvfile:
fieldnames = ["test", "arch", "platform", "passed", "status",
"extra_args", "qemu", "qemu_time", "ram_size",
"rom_size"]
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
cw.writeheader()
for name, goal in self.goals.items():
i = self.instances[name]
rowdict = {"test" : i.test.name,
"arch" : i.platform.arch,
"platform" : i.platform.name,
"extra_args" : " ".join(i.test.extra_args),
"qemu" : i.platform.qemu_support}
if goal.failed:
rowdict["passed"] = False
rowdict["status"] = goal.reason
else:
rowdict["passed"] = True
if goal.qemu:
rowdict["qemu_time"] = goal.metrics["qemu_time"]
rowdict["ram_size"] = goal.metrics["ram_size"]
rowdict["rom_size"] = goal.metrics["rom_size"]
cw.writerow(rowdict)
def parse_arguments():
parser = argparse.ArgumentParser(description = __doc__,
formatter_class = argparse.RawDescriptionHelpFormatter)
parser.fromfile_prefix_chars = "+"
parser.add_argument("-p", "--platform", action="append",
help="Platform filter for testing. This option may be used multiple "
"times. Testcases will only be built/run on the platforms "
"specified. If this option is not used, then platforms marked "
"as default in the platform metadata file will be chosen "
"to build and test. ")
parser.add_argument("-L", "--platform-limit", action="store", type=int,
metavar="N", default=1,
help="Controls what platforms are tested if --platform or "
"--all are not used. For each architecture specified by "
"--arch (defaults to all of them), choose the first "
"N platforms to test in the arch-specific .yaml file "
"'platforms' list. Defaults to 1.")
parser.add_argument("-a", "--arch", action="append",
help="Arch filter for testing. Takes precedence over --platform. "
"If unspecified, test all arches. Multiple invocations "
"are treated as a logical 'or' relationship")
parser.add_argument("-t", "--tag", action="append",
help="Specify tags to restrict which tests to run by tag value. "
"Default is to not do any tag filtering. Multiple invocations "
"are treated as a logical 'or' relationship")
parser.add_argument("-e", "--exclude-tag", action="append",
help="Specify tags of tests that should not run. "
"Default is to run all tests with all tags.")
parser.add_argument("-f", "--only-failed", action="store_true",
help="Run only those tests that failed the previous sanity check "
"invocation.")
parser.add_argument("-c", "--config", action="append",
help="Specify platform configuration values filtering. This can be "
"specified two ways: <config>=<value> or just <config>. The "
"defconfig for all platforms will be "
"checked. For the <config>=<value> case, only match defconfig "
"that have that value defined. For the <config> case, match "
"defconfig that have that value assigned to any value. "
"Prepend a '!' to invert the match.")
parser.add_argument("-s", "--test", action="append",
help="Run only the specified test cases. These are named by "
"<path to test project relative to "
"--testcase-root>/<testcase.yaml section name>")
parser.add_argument("-l", "--all", action="store_true",
help="Build/test on all platforms. Any --platform arguments "
"ignored.")
parser.add_argument("-o", "--testcase-report",
help="Output a CSV spreadsheet containing results of the test run")
parser.add_argument("-d", "--discard-report",
help="Output a CSV spreadsheet showing tests that were skipped "
"and why")
parser.add_argument("--compare-report",
help="Use this report file for size comparison")
parser.add_argument("--ccache", action="store_const", const=1, default=0,
help="Enable the use of ccache when building")
parser.add_argument("-B", "--subset",
help="Only run a subset of the tests, 1/4 for running the first 25%%, "
"3/5 means run the 3rd fifth of the total. "
"This option is useful when running a large number of tests on "
"different hosts to speed up execution time.")
parser.add_argument("-y", "--dry-run", action="store_true",
help="Create the filtered list of test cases, but don't actually "
"run them. Useful if you're just interested in "
"--discard-report")
parser.add_argument("-r", "--release", action="store_true",
help="Update the benchmark database with the results of this test "
"run. Intended to be run by CI when tagging an official "
"release. This database is used as a basis for comparison "
"when looking for deltas in metrics such as footprint")
parser.add_argument("-w", "--warnings-as-errors", action="store_true",
help="Treat warning conditions as errors")
parser.add_argument("-v", "--verbose", action="count", default=0,
help="Emit debugging information, call multiple times to increase "
"verbosity")
parser.add_argument("-i", "--inline-logs", action="store_true",
help="Upon test failure, print relevant log data to stdout "
"instead of just a path to it")
parser.add_argument("--log-file", metavar="FILENAME", action="store",
help="log also to file")
parser.add_argument("-m", "--last-metrics", action="store_true",
help="Instead of comparing metrics from the last --release, "
"compare with the results of the previous sanity check "
"invocation")
parser.add_argument("-u", "--no-update", action="store_true",
help="do not update the results of the last run of the sanity "
"checks")
parser.add_argument("-F", "--load-tests", metavar="FILENAME", action="store",
help="Load list of tests to be run from file.")
parser.add_argument("-E", "--save-tests", metavar="FILENAME", action="store",
help="Save list of tests to be run to file.")
parser.add_argument("-b", "--build-only", action="store_true",
help="Only build the code, do not execute any of it in QEMU")
parser.add_argument("-j", "--jobs", type=int,
help="Number of cores to use when building, defaults to "
"number of CPUs * 2")
parser.add_argument("-H", "--footprint-threshold", type=float, default=5,
help="When checking test case footprint sizes, warn the user if "
"the new app size is greater then the specified percentage "
"from the last release. Default is 5. 0 to warn on any "
"increase on app size")
parser.add_argument("-D", "--all-deltas", action="store_true",
help="Show all footprint deltas, positive or negative. Implies "
"--footprint-threshold=0")
parser.add_argument("-O", "--outdir",
default="%s/sanity-out" % ZEPHYR_BASE,
help="Output directory for logs and binaries.")
parser.add_argument("-n", "--no-clean", action="store_true",
help="Do not delete the outdir before building. Will result in "
"faster compilation since builds will be incremental")
parser.add_argument("-T", "--testcase-root", action="append", default=[],
help="Base directory to recursively search for test cases. All "
"testcase.yaml files under here will be processed. May be "
"called multiple times. Defaults to the 'samples' and "
"'tests' directories in the Zephyr tree.")
board_root_list = ["%s/boards" % ZEPHYR_BASE , "%s/scripts/sanity_chk/boards" % ZEPHYR_BASE]
parser.add_argument("-A", "--board-root", action="append",
default=board_root_list,
help="Directory to search for board configuration files. All .yaml "
"files in the directory will be processed.")
parser.add_argument("-z", "--size", action="append",
help="Don't run sanity checks. Instead, produce a report to "
"stdout detailing RAM/ROM sizes on the specified filenames. "
"All other command line arguments ignored.")
parser.add_argument("-S", "--enable-slow", action="store_true",
help="Execute time-consuming test cases that have been marked "
"as 'slow' in testcase.yaml. Normally these are only built.")
parser.add_argument("-R", "--enable-asserts", action="store_true",
help="Build all test cases with assertions enabled.")
parser.add_argument("-Q", "--error-on-deprecations", action="store_false",
help="Error on deprecation warnings.")
parser.add_argument("-x", "--extra-args", action="append", default=[],
help="Extra arguments to pass to the build when compiling test "
"cases. May be called multiple times. These will be passed "
"in after any sanitycheck-supplied options.")
parser.add_argument("-C", "--coverage", action="store_true",
help="Scan for unit test coverage with gcov + lcov.")
return parser.parse_args()
def log_info(filename):
filename = os.path.relpath(os.path.realpath(filename))
if INLINE_LOGS:
info("{:-^100}".format(filename))
try:
with open(filename) as fp:
data = fp.read()
except Exception as e:
data = "Unable to read log data (%s)\n" % (str(e))
sys.stdout.write(data)
if log_file:
log_file.write(data)
info("{:-^100}".format(filename))
else:
info("\tsee: " + COLOR_YELLOW + filename + COLOR_NORMAL)
def terse_test_cb(instances, goals, goal):
total_tests = len(goals)
total_done = 0
total_failed = 0
for k, g in goals.items():
if g.finished:
total_done += 1
if g.failed:
total_failed += 1
if goal.failed:
i = instances[goal.name]
info("\n\n{:<25} {:<50} {}FAILED{}: {}".format(i.platform.name,
i.test.name, COLOR_RED, COLOR_NORMAL, goal.reason))
log_info(goal.get_error_log())
info("")
sys.stdout.write("\rtotal complete: %s%4d/%4d%s %2d%% failed: %s%4d%s" % (
COLOR_GREEN, total_done, total_tests, COLOR_NORMAL,
int((float(total_done) / total_tests) * 100),
COLOR_RED if total_failed > 0 else COLOR_NORMAL,
total_failed, COLOR_NORMAL))
sys.stdout.flush()
def chatty_test_cb(instances, goals, goal):
i = instances[goal.name]
if VERBOSE < 2 and not goal.finished:
return
if goal.failed:
status = COLOR_RED + "FAILED" + COLOR_NORMAL + ": " + goal.reason
elif goal.finished:
status = COLOR_GREEN + "PASSED" + COLOR_NORMAL
else:
status = goal.make_state
info("{:<25} {:<50} {}".format(i.platform.name, i.test.name, status))
if goal.failed:
log_info(goal.get_error_log())
def size_report(sc):
info(sc.filename)
info("SECTION NAME VMA LMA SIZE HEX SZ TYPE")
for i in range(len(sc.sections)):
v = sc.sections[i]
info("%-17s 0x%08x 0x%08x %8d 0x%05x %-7s" %
(v["name"], v["virt_addr"], v["load_addr"], v["size"], v["size"],
v["type"]))
info("Totals: %d bytes (ROM), %d bytes (RAM)" %
(sc.rom_size, sc.ram_size))
info("")
def generate_coverage(outdir, ignores):
with open(os.path.join(outdir, "coverage.log"), "a") as coveragelog:
coveragefile = os.path.join(outdir, "coverage.info")
ztestfile = os.path.join(outdir, "ztest.info")
subprocess.call(["lcov", "--capture", "--directory", outdir,
"--output-file", coveragefile], stdout=coveragelog)
# We want to remove tests/* and tests/ztest/test/* but save tests/ztest
subprocess.call(["lcov", "--extract", coveragefile,
os.path.join(ZEPHYR_BASE, "tests", "ztest", "*"),
"--output-file", ztestfile], stdout=coveragelog)
subprocess.call(["lcov", "--remove", ztestfile,
os.path.join(ZEPHYR_BASE, "tests/ztest/test/*"),
"--output-file", ztestfile], stdout=coveragelog)
for i in ignores:
subprocess.call(["lcov", "--remove", coveragefile, i,
"--output-file", coveragefile], stdout=coveragelog)
subprocess.call(["genhtml", "-output-directory",
os.path.join(outdir, "coverage"),
coveragefile, ztestfile], stdout=coveragelog)
def main():
start_time = time.time()
global VERBOSE, INLINE_LOGS, CPU_COUNTS, log_file
args = parse_arguments()
toolchain = os.environ.get("ZEPHYR_GCC_VARIANT", None)
if toolchain == "zephyr":
os.environ["DISABLE_TRYRUN"] = "1"
if args.size:
for fn in args.size:
size_report(SizeCalculator(fn, []))
sys.exit(0)
VERBOSE += args.verbose
INLINE_LOGS = args.inline_logs
if args.log_file:
log_file = open(args.log_file, "w")
if args.jobs:
CPU_COUNTS = args.jobs
if args.subset:
subset, sets = args.subset.split("/")
if int(subset) > 0 and int(sets) >= int(subset):
info("Running only a subset: %s/%s" %(subset,sets))
else:
error("You have provided a wrong subset value: %s." %args.subset)
return
if os.path.exists(args.outdir) and not args.no_clean:
info("Cleaning output directory " + args.outdir)
shutil.rmtree(args.outdir)
if not args.testcase_root:
args.testcase_root = [os.path.join(ZEPHYR_BASE, "tests"),
os.path.join(ZEPHYR_BASE, "samples")]
ts = TestSuite(args.board_root, args.testcase_root, args.outdir, args.coverage)
discards = []
if args.load_tests:
ts.load_from_file(args.load_tests)
else:
discards = ts.apply_filters(args, toolchain)
if args.discard_report:
ts.discard_report(args.discard_report)
if VERBOSE > 1:
for i, reason in discards.items():
debug("{:<25} {:<50} {}SKIPPED{}: {}".format(i.platform.name,
i.test.name, COLOR_YELLOW, COLOR_NORMAL, reason))
ts.instances = OrderedDict(sorted(ts.instances.items(), key=lambda t: t[0]))
if args.save_tests:
ts.run_report(args.save_tests)
return
if args.subset:
subset, sets = args.subset.split("/")
total = len(ts.instances)
per_set = round(total / int(sets))
start = (int(subset) - 1 ) * per_set
if subset == sets:
end = total
else:
end = start + per_set
sliced_instances = islice(ts.instances.items(),start, end)
ts.instances = OrderedDict(sliced_instances)
info("%d tests selected, %d tests discarded due to filters" %
(len(ts.instances), len(discards)))
if args.dry_run:
return
if VERBOSE or not TERMINAL:
goals = ts.execute(chatty_test_cb, ts.instances, args.build_only,
args.enable_slow, args.enable_asserts, args.error_on_deprecations,
args.extra_args, args.ccache)
else:
goals = ts.execute(terse_test_cb, ts.instances, args.build_only,
args.enable_slow, args.enable_asserts, args.error_on_deprecations,
args.extra_args, args.ccache)
info("")
# figure out which report to use for size comparison
if args.compare_report:
report_to_use = args.compare_report
elif args.last_metrics:
report_to_use = LAST_SANITY
else:
report_to_use = RELEASE_DATA
deltas = ts.compare_metrics(report_to_use)
warnings = 0
if deltas:
for i, metric, value, delta, lower_better in deltas:
if not args.all_deltas and ((delta < 0 and lower_better) or
(delta > 0 and not lower_better)):
continue
percentage = (float(delta) / float(value - delta))
if not args.all_deltas and (percentage <
(args.footprint_threshold / 100.0)):
continue
info("{:<25} {:<60} {}{}{}: {} {:<+4}, is now {:6} {:+.2%}".format(
i.platform.name, i.test.name, COLOR_YELLOW,
"INFO" if args.all_deltas else "WARNING", COLOR_NORMAL,
metric, delta, value, percentage))
warnings += 1
if warnings:
info("Deltas based on metrics from last %s" %
("release" if not args.last_metrics else "run"))
failed = 0
for name, goal in goals.items():
if goal.failed:
failed += 1
elif goal.metrics.get("unrecognized"):
info("%sFAILED%s: %s has unrecognized binary sections: %s" %
(COLOR_RED, COLOR_NORMAL, goal.name,
str(goal.metrics["unrecognized"])))
failed += 1
if args.coverage:
info("Generating coverage files...")
generate_coverage(args.outdir, ["tests/*", "samples/*"])
duration = time.time() - start_time
info("%s%d of %d%s tests passed with %s%d%s warnings in %d seconds" %
(COLOR_RED if failed else COLOR_GREEN, len(goals) - failed,
len(goals), COLOR_NORMAL, COLOR_YELLOW if warnings else COLOR_NORMAL,
warnings, COLOR_NORMAL, duration))
if args.testcase_report:
ts.testcase_report(args.testcase_report)
if not args.no_update:
ts.testcase_xunit_report(LAST_SANITY_XUNIT, duration, args)
ts.testcase_report(LAST_SANITY)
if args.release:
ts.testcase_report(RELEASE_DATA)
if log_file:
log_file.close()
if failed or (warnings and args.warnings_as_errors):
sys.exit(1)
if __name__ == "__main__":
main()