Andrew Boie | 6acbe63 | 2015-07-17 12:03:52 -0700 | [diff] [blame] | 1 | #!/usr/bin/env python |
| 2 | """Zephyr Sanity Tests |
| 3 | |
| 4 | This script scans for the set of unit test applications in the git |
| 5 | repository and attempts to execute them. By default, it tries to |
| 6 | build each test case on one platform per architecture, using a precedence |
| 7 | list defined in an archtecture configuration file, and if possible |
| 8 | run the tests in the QEMU emulator. |
| 9 | |
| 10 | Test cases are detected by the presence of a 'testcase.ini' file in |
| 11 | the application's project directory. This file may contain one or |
| 12 | more blocks, each identifying a test scenario. The title of the block |
| 13 | is a name for the test case, which only needs to be unique for the |
| 14 | test cases specified in that testcase.ini file. The full canonical |
| 15 | name for each test case is <path to test case under samples/>/<block>. |
| 16 | |
| 17 | Each testcase.ini block can define the following key/value pairs: |
| 18 | |
| 19 | tags = <list of tags> (required) |
| 20 | A set of string tags for the testcase. Usually pertains to |
| 21 | functional domains but can be anything. Command line invocations |
| 22 | of this script can filter the set of tests to run based on tag. |
| 23 | |
| 24 | extra_args = <list of extra arguments> |
| 25 | Extra arguments to pass to Make when building or running the |
| 26 | test case. |
| 27 | |
| 28 | build_only = <True|False> |
| 29 | If true, don't try to run the test under QEMU even if the |
| 30 | selected platform supports it. |
| 31 | |
| 32 | timeout = <number of seconds> |
| 33 | Length of time to run test in QEMU before automatically killing it. |
| 34 | Default to 60 seconds. |
| 35 | |
| 36 | arch_whitelist = <list of arches, such as x86, arm, arc> |
| 37 | Set of architectures that this test case should only be run for. |
| 38 | |
Anas Nashif | 30d1387 | 2015-10-05 10:02:45 -0400 | [diff] [blame^] | 39 | arch_exclude = <list of arches, such as x86, arm, arc> |
| 40 | Set of architectures that this test case should not run on. |
| 41 | |
Andrew Boie | 6acbe63 | 2015-07-17 12:03:52 -0700 | [diff] [blame] | 42 | platform_whitelist = <list of platforms> |
Anas Nashif | 30d1387 | 2015-10-05 10:02:45 -0400 | [diff] [blame^] | 43 | Set of platforms that this test case should only be run for. |
| 44 | |
| 45 | platform_exclude = <list of platforms> |
| 46 | Set of platforms that this test case should not run on. |
Andrew Boie | 6acbe63 | 2015-07-17 12:03:52 -0700 | [diff] [blame] | 47 | |
| 48 | config_whitelist = <list of config options> |
| 49 | Config options can either be config names like CONFIG_FOO which |
| 50 | match if the configuration is defined to any value, or key/value |
| 51 | pairs like CONFIG_FOO=bar which match if it is set to a specific |
| 52 | value. May prepend a '!' to invert the match. |
| 53 | |
| 54 | Architectures and platforms are defined in an archtecture configuration |
| 55 | file which are stored by default in scripts/sanity_chk/arches/. These |
| 56 | each define an [arch] block with the following key/value pairs: |
| 57 | |
| 58 | name = <arch name> |
| 59 | The name of the arch. Example: x86 |
| 60 | |
| 61 | platforms = <list of supported platforms in order of precedence> |
| 62 | List of supported platforms for this arch. The ordering here |
| 63 | is used to select a default platform to build for that arch. |
| 64 | |
| 65 | For every platform defined, there must be a corresponding block for it |
| 66 | in the arch configuration file. This block can be empty if there are |
| 67 | no special definitions for that arch. Options are: |
| 68 | |
| 69 | qemu_support = <True|False> (default False) |
| 70 | Indicates whether binaries for this platform can run under QEMU |
| 71 | |
| 72 | microkernel_support = <True|False> (default True) |
| 73 | Indicates whether this platform supports microkernel or just nanokernel |
| 74 | |
| 75 | The set of test cases that actually run depends on directives in the |
| 76 | testcase and archtecture .ini file and options passed in on the command |
| 77 | line. If there is every any confusion, running with -v or --discard-report |
| 78 | can help show why particular test cases were skipped. |
| 79 | |
| 80 | Metrics (such as pass/fail state and binary size) for the last code |
| 81 | release are stored in scripts/sanity_chk/sanity_last_release.csv. |
| 82 | To update this, pass the --all --release options. |
| 83 | |
| 84 | Most everyday users will run with no arguments. |
| 85 | """ |
| 86 | |
| 87 | import argparse |
| 88 | import os |
| 89 | import sys |
| 90 | import ConfigParser |
| 91 | import re |
| 92 | import tempfile |
| 93 | import subprocess |
| 94 | import multiprocessing |
| 95 | import select |
| 96 | import shutil |
| 97 | import signal |
| 98 | import threading |
| 99 | import time |
| 100 | import csv |
Andrew Boie | 5d4eb78 | 2015-10-02 10:04:56 -0700 | [diff] [blame] | 101 | import glob |
Andrew Boie | 6acbe63 | 2015-07-17 12:03:52 -0700 | [diff] [blame] | 102 | |
| 103 | if "ZEPHYR_BASE" not in os.environ: |
Anas Nashif | 427cdd3 | 2015-08-06 07:25:42 -0400 | [diff] [blame] | 104 | sys.stderr.write("$ZEPHYR_BASE environment variable undefined.\n") |
Andrew Boie | 6acbe63 | 2015-07-17 12:03:52 -0700 | [diff] [blame] | 105 | exit(1) |
| 106 | ZEPHYR_BASE = os.environ["ZEPHYR_BASE"] |
| 107 | VERBOSE = 0 |
| 108 | LAST_SANITY = os.path.join(ZEPHYR_BASE, "scripts", "sanity_chk", |
| 109 | "last_sanity.csv") |
| 110 | RELEASE_DATA = os.path.join(ZEPHYR_BASE, "scripts", "sanity_chk", |
| 111 | "sanity_last_release.csv") |
| 112 | PARALLEL = multiprocessing.cpu_count() * 2 |
| 113 | |
| 114 | if os.isatty(sys.stdout.fileno()): |
| 115 | TERMINAL = True |
| 116 | COLOR_NORMAL = '\033[0m' |
| 117 | COLOR_RED = '\033[91m' |
| 118 | COLOR_GREEN = '\033[92m' |
| 119 | COLOR_YELLOW = '\033[93m' |
| 120 | else: |
| 121 | TERMINAL = False |
| 122 | COLOR_NORMAL = "" |
| 123 | COLOR_RED = "" |
| 124 | COLOR_GREEN = "" |
| 125 | COLOR_YELLOW = "" |
| 126 | |
| 127 | class SanityCheckException(Exception): |
| 128 | pass |
| 129 | |
| 130 | class SanityRuntimeError(SanityCheckException): |
| 131 | pass |
| 132 | |
| 133 | class ConfigurationError(SanityCheckException): |
| 134 | def __init__(self, cfile, message): |
| 135 | self.cfile = cfile |
| 136 | self.message = message |
| 137 | |
| 138 | def __str__(self): |
| 139 | return repr(self.cfile + ": " + self.message) |
| 140 | |
| 141 | class MakeError(SanityCheckException): |
| 142 | pass |
| 143 | |
| 144 | class BuildError(MakeError): |
| 145 | pass |
| 146 | |
| 147 | class ExecutionError(MakeError): |
| 148 | pass |
| 149 | |
| 150 | # Debug Functions |
| 151 | |
| 152 | def debug(what): |
| 153 | if VERBOSE >= 1: |
| 154 | print what |
| 155 | |
| 156 | def error(what): |
| 157 | sys.stderr.write(COLOR_RED + what + COLOR_NORMAL + "\n") |
| 158 | |
| 159 | def verbose(what): |
| 160 | if VERBOSE >= 2: |
| 161 | print what |
| 162 | |
| 163 | def info(what): |
| 164 | sys.stdout.write(what + "\n") |
| 165 | |
| 166 | # Utility functions |
| 167 | class QEMUHandler: |
| 168 | """Spawns a thread to monitor QEMU output from pipes |
| 169 | |
| 170 | We pass QEMU_PIPE to 'make qemu' and monitor the pipes for output. |
| 171 | We need to do this as once qemu starts, it runs forever until killed. |
| 172 | Test cases emit special messages to the console as they run, we check |
| 173 | for these to collect whether the test passed or failed. |
| 174 | """ |
| 175 | RUN_PASSED = "PROJECT EXECUTION SUCCESSFUL" |
| 176 | RUN_FAILED = "PROJECT EXECUTION FAILED" |
| 177 | |
| 178 | @staticmethod |
| 179 | def _thread(handler, timeout, outdir, logfile, fifo_fn, pid_fn, results): |
| 180 | fifo_in = fifo_fn + ".in" |
| 181 | fifo_out = fifo_fn + ".out" |
| 182 | |
| 183 | # These in/out nodes are named from QEMU's perspective, not ours |
| 184 | if os.path.exists(fifo_in): |
| 185 | os.unlink(fifo_in) |
| 186 | os.mkfifo(fifo_in) |
| 187 | if os.path.exists(fifo_out): |
| 188 | os.unlink(fifo_out) |
| 189 | os.mkfifo(fifo_out) |
| 190 | |
| 191 | # We don't do anything with out_fp but we need to open it for |
| 192 | # writing so that QEMU doesn't block, due to the way pipes work |
| 193 | out_fp = open(fifo_in, "wb") |
| 194 | # Disable internal buffering, we don't |
| 195 | # want read() or poll() to ever block if there is data in there |
| 196 | in_fp = open(fifo_out, "rb", buffering=0) |
| 197 | log_out_fp = open(logfile, "w") |
| 198 | |
| 199 | start_time = time.time() |
| 200 | timeout_time = start_time + timeout |
| 201 | p = select.poll() |
| 202 | p.register(in_fp, select.POLLIN) |
| 203 | |
| 204 | metrics = {} |
| 205 | line = "" |
| 206 | while True: |
| 207 | this_timeout = int((timeout_time - time.time()) * 1000) |
| 208 | if this_timeout < 0 or not p.poll(this_timeout): |
| 209 | out_state = "timeout" |
| 210 | break |
| 211 | |
| 212 | c = in_fp.read(1) |
| 213 | if c == "": |
| 214 | # EOF, this shouldn't happen unless QEMU crashes |
| 215 | out_state = "unexpected eof" |
| 216 | break |
| 217 | line = line + c |
| 218 | if c != "\n": |
| 219 | continue |
| 220 | |
| 221 | # If we get here, line contains a full line of data output from QEMU |
| 222 | log_out_fp.write(line) |
| 223 | log_out_fp.flush() |
| 224 | line = line.strip() |
| 225 | verbose("QEMU: %s" % line) |
| 226 | |
| 227 | if line == QEMUHandler.RUN_PASSED: |
| 228 | out_state = "passed" |
| 229 | break |
| 230 | |
| 231 | if line == QEMUHandler.RUN_FAILED: |
| 232 | out_state = "failed" |
| 233 | break |
| 234 | |
| 235 | # TODO: Add support for getting numerical performance data |
| 236 | # from test cases. Will involve extending test case reporting |
| 237 | # APIs. Add whatever gets reported to the metrics dictionary |
| 238 | line = "" |
| 239 | |
| 240 | metrics["qemu_time"] = time.time() - start_time |
| 241 | verbose("QEMU complete (%s) after %f seconds" % |
| 242 | (out_state, metrics["qemu_time"])) |
| 243 | handler.set_state(out_state, metrics) |
| 244 | |
| 245 | log_out_fp.close() |
| 246 | out_fp.close() |
| 247 | in_fp.close() |
| 248 | |
| 249 | pid = int(open(pid_fn).read()) |
| 250 | os.unlink(pid_fn) |
| 251 | os.kill(pid, signal.SIGTERM) |
| 252 | os.unlink(fifo_in) |
| 253 | os.unlink(fifo_out) |
| 254 | |
| 255 | |
| 256 | def __init__(self, name, outdir, log_fn, timeout): |
| 257 | """Constructor |
| 258 | |
| 259 | @param name Arbitrary name of the created thread |
| 260 | @param outdir Working directory, shoudl be where qemu.pid gets created |
| 261 | by kbuild |
| 262 | @param log_fn Absolute path to write out QEMU's log data |
| 263 | @param timeout Kill the QEMU process if it doesn't finish up within |
| 264 | the given number of seconds |
| 265 | """ |
| 266 | # Create pipe to get QEMU's serial output |
| 267 | self.results = {} |
| 268 | self.state = "waiting" |
| 269 | self.lock = threading.Lock() |
| 270 | |
| 271 | # We pass this to QEMU which looks for fifos with .in and .out |
| 272 | # suffixes. |
| 273 | self.fifo_fn = os.path.join(outdir, "qemu-fifo") |
| 274 | |
| 275 | self.pid_fn = os.path.join(outdir, "qemu.pid") |
| 276 | if os.path.exists(self.pid_fn): |
| 277 | os.unlink(self.pid_fn) |
| 278 | |
| 279 | self.log_fn = log_fn |
| 280 | self.thread = threading.Thread(name=name, target=QEMUHandler._thread, |
| 281 | args=(self, timeout, outdir, self.log_fn, |
| 282 | self.fifo_fn, self.pid_fn, |
| 283 | self.results)) |
| 284 | self.thread.daemon = True |
| 285 | verbose("Spawning QEMU process for %s" % name) |
| 286 | self.thread.start() |
| 287 | |
| 288 | def set_state(self, state, metrics): |
| 289 | self.lock.acquire() |
| 290 | self.state = state |
| 291 | self.metrics = metrics |
| 292 | self.lock.release() |
| 293 | |
| 294 | def get_state(self): |
| 295 | self.lock.acquire() |
| 296 | ret = (self.state, self.metrics) |
| 297 | self.lock.release() |
| 298 | return ret |
| 299 | |
| 300 | def get_fifo(self): |
| 301 | return self.fifo_fn |
| 302 | |
| 303 | |
| 304 | class SizeCalculator: |
Andrew Boie | 73b4ee6 | 2015-10-07 11:33:22 -0700 | [diff] [blame] | 305 | |
| 306 | alloc_sections = ["bss", "noinit"] |
| 307 | rw_sections = ["datas", "initlevel", "_k_mem_map_ptr", "_k_pipe_ptr", |
| 308 | "_k_task_ptr", "_k_task_list", "initlevel"] |
| 309 | # These get copied into RAM only on non-XIP |
| 310 | ro_sections = ["text", "ctors", "rodata", "devconfig"] |
| 311 | |
Andrew Boie | bbd670c | 2015-08-17 13:16:11 -0700 | [diff] [blame] | 312 | def __init__(self, filename): |
Andrew Boie | 6acbe63 | 2015-07-17 12:03:52 -0700 | [diff] [blame] | 313 | """Constructor |
| 314 | |
Andrew Boie | bbd670c | 2015-08-17 13:16:11 -0700 | [diff] [blame] | 315 | @param filename Path to the output binary |
| 316 | The <filename> is parsed by objdump to determine section sizes |
Andrew Boie | 6acbe63 | 2015-07-17 12:03:52 -0700 | [diff] [blame] | 317 | """ |
Andrew Boie | 6acbe63 | 2015-07-17 12:03:52 -0700 | [diff] [blame] | 318 | # Make sure this is an ELF binary |
Andrew Boie | bbd670c | 2015-08-17 13:16:11 -0700 | [diff] [blame] | 319 | with open(filename, "rb") as f: |
Andrew Boie | 6acbe63 | 2015-07-17 12:03:52 -0700 | [diff] [blame] | 320 | magic = f.read(4) |
| 321 | |
| 322 | if (magic != "\x7fELF"): |
Andrew Boie | bbd670c | 2015-08-17 13:16:11 -0700 | [diff] [blame] | 323 | raise SanityRuntimeError("%s is not an ELF binary" % filename) |
Andrew Boie | 6acbe63 | 2015-07-17 12:03:52 -0700 | [diff] [blame] | 324 | |
| 325 | # Search for CONFIG_XIP in the ELF's list of symbols using NM and AWK. |
| 326 | # GREP can not be used as it returns an error if the symbol is not found. |
Andrew Boie | bbd670c | 2015-08-17 13:16:11 -0700 | [diff] [blame] | 327 | is_xip_command = "nm " + filename + " | awk '/CONFIG_XIP/ { print $3 }'" |
Andrew Boie | 6acbe63 | 2015-07-17 12:03:52 -0700 | [diff] [blame] | 328 | is_xip_output = subprocess.check_output(is_xip_command, shell=True) |
| 329 | self.is_xip = (len(is_xip_output) != 0) |
| 330 | |
Andrew Boie | bbd670c | 2015-08-17 13:16:11 -0700 | [diff] [blame] | 331 | self.filename = filename |
Andrew Boie | 73b4ee6 | 2015-10-07 11:33:22 -0700 | [diff] [blame] | 332 | self.sections = [] |
| 333 | self.rom_size = 0 |
Andrew Boie | 6acbe63 | 2015-07-17 12:03:52 -0700 | [diff] [blame] | 334 | self.ram_size = 0 |
Andrew Boie | 9882dcd | 2015-10-07 14:25:51 -0700 | [diff] [blame] | 335 | self.mismatches = [] |
Andrew Boie | 6acbe63 | 2015-07-17 12:03:52 -0700 | [diff] [blame] | 336 | |
| 337 | self._calculate_sizes() |
| 338 | |
| 339 | def get_ram_size(self): |
| 340 | """Get the amount of RAM the application will use up on the device |
| 341 | |
| 342 | @return amount of RAM, in bytes |
| 343 | """ |
Andrew Boie | 73b4ee6 | 2015-10-07 11:33:22 -0700 | [diff] [blame] | 344 | return self.ram_size |
Andrew Boie | 6acbe63 | 2015-07-17 12:03:52 -0700 | [diff] [blame] | 345 | |
| 346 | def get_rom_size(self): |
| 347 | """Get the size of the data that this application uses on device's flash |
| 348 | |
| 349 | @return amount of ROM, in bytes |
| 350 | """ |
Andrew Boie | 73b4ee6 | 2015-10-07 11:33:22 -0700 | [diff] [blame] | 351 | return self.rom_size |
Andrew Boie | 6acbe63 | 2015-07-17 12:03:52 -0700 | [diff] [blame] | 352 | |
| 353 | def unrecognized_sections(self): |
| 354 | """Get a list of sections inside the binary that weren't recognized |
| 355 | |
| 356 | @return list of unrecogized section names |
| 357 | """ |
| 358 | slist = [] |
Andrew Boie | 73b4ee6 | 2015-10-07 11:33:22 -0700 | [diff] [blame] | 359 | for v in self.sections: |
Andrew Boie | 6acbe63 | 2015-07-17 12:03:52 -0700 | [diff] [blame] | 360 | if not v["recognized"]: |
Andrew Boie | 73b4ee6 | 2015-10-07 11:33:22 -0700 | [diff] [blame] | 361 | slist.append(v["name"]) |
Andrew Boie | 6acbe63 | 2015-07-17 12:03:52 -0700 | [diff] [blame] | 362 | return slist |
| 363 | |
Andrew Boie | 9882dcd | 2015-10-07 14:25:51 -0700 | [diff] [blame] | 364 | def mismatched_sections(self): |
| 365 | """Get a list of sections in the binary whose LMA and VMA offsets |
| 366 | from the previous section aren't proportional. This leads to issues |
| 367 | on XIP systems as they aren't correctly copied in to RAM |
| 368 | """ |
| 369 | slist = [] |
| 370 | for v in self.sections: |
| 371 | if v["lma_off"] != v["vma_off"]: |
| 372 | slist.append((v["name"], v["lma_off"], v["vma_off"])) |
| 373 | return slist |
| 374 | |
Andrew Boie | 6acbe63 | 2015-07-17 12:03:52 -0700 | [diff] [blame] | 375 | def _calculate_sizes(self): |
| 376 | """ Calculate RAM and ROM usage by section """ |
Andrew Boie | bbd670c | 2015-08-17 13:16:11 -0700 | [diff] [blame] | 377 | objdump_command = "objdump -h " + self.filename |
Andrew Boie | 6acbe63 | 2015-07-17 12:03:52 -0700 | [diff] [blame] | 378 | objdump_output = subprocess.check_output(objdump_command, |
| 379 | shell=True).splitlines() |
| 380 | |
| 381 | for line in objdump_output: |
| 382 | words = line.split() |
| 383 | |
| 384 | if (len(words) == 0): # Skip lines that are too short |
| 385 | continue |
| 386 | |
| 387 | index = words[0] |
| 388 | if (not index[0].isdigit()): # Skip lines that do not start |
| 389 | continue # with a digit |
| 390 | |
| 391 | name = words[1] # Skip lines with section names |
| 392 | if (name[0] == '.'): # starting with '.' |
| 393 | continue |
| 394 | |
Andrew Boie | 73b4ee6 | 2015-10-07 11:33:22 -0700 | [diff] [blame] | 395 | # TODO this doesn't actually reflect the size in flash or RAM as |
| 396 | # it doesn't include linker-imposed padding between sections. |
| 397 | # It is close though. |
Andrew Boie | 6acbe63 | 2015-07-17 12:03:52 -0700 | [diff] [blame] | 398 | size = int(words[2], 16) |
Andrew Boie | 9882dcd | 2015-10-07 14:25:51 -0700 | [diff] [blame] | 399 | if size == 0: |
| 400 | continue |
| 401 | |
Andrew Boie | 73b4ee6 | 2015-10-07 11:33:22 -0700 | [diff] [blame] | 402 | load_addr = int(words[4], 16) |
| 403 | virt_addr = int(words[3], 16) |
Andrew Boie | 6acbe63 | 2015-07-17 12:03:52 -0700 | [diff] [blame] | 404 | |
| 405 | # Add section to memory use totals (for both non-XIP and XIP scenarios) |
Andrew Boie | 6acbe63 | 2015-07-17 12:03:52 -0700 | [diff] [blame] | 406 | # Unrecognized section names are not included in the calculations. |
Andrew Boie | 6acbe63 | 2015-07-17 12:03:52 -0700 | [diff] [blame] | 407 | recognized = True |
Andrew Boie | 73b4ee6 | 2015-10-07 11:33:22 -0700 | [diff] [blame] | 408 | if name in SizeCalculator.alloc_sections: |
| 409 | self.ram_size += size |
| 410 | stype = "alloc" |
| 411 | elif name in SizeCalculator.rw_sections: |
| 412 | self.ram_size += size |
| 413 | self.rom_size += size |
| 414 | stype = "rw" |
| 415 | elif name in SizeCalculator.ro_sections: |
| 416 | self.rom_size += size |
| 417 | if not self.is_xip: |
| 418 | self.ram_size += size |
| 419 | stype = "ro" |
Andrew Boie | 6acbe63 | 2015-07-17 12:03:52 -0700 | [diff] [blame] | 420 | else: |
Andrew Boie | 73b4ee6 | 2015-10-07 11:33:22 -0700 | [diff] [blame] | 421 | stype = "unknown" |
Andrew Boie | 6acbe63 | 2015-07-17 12:03:52 -0700 | [diff] [blame] | 422 | recognized = False |
Andrew Boie | 6acbe63 | 2015-07-17 12:03:52 -0700 | [diff] [blame] | 423 | |
Andrew Boie | 9882dcd | 2015-10-07 14:25:51 -0700 | [diff] [blame] | 424 | lma_off = 0 |
| 425 | vma_off = 0 |
| 426 | |
| 427 | # Look for different section padding for LMA and VMA, if present |
| 428 | # this really messes up XIP systems as __csSet() copies all of |
| 429 | # them off flash into RAM as a single large block of memory |
| 430 | if self.is_xip and len(self.sections) > 0: |
| 431 | p = self.sections[-1] |
| 432 | |
| 433 | if stype == "rw" and p["type"] == "rw": |
| 434 | lma_off = load_addr - p["load_addr"] |
| 435 | vma_off = virt_addr - p["virt_addr"] |
| 436 | |
Andrew Boie | 73b4ee6 | 2015-10-07 11:33:22 -0700 | [diff] [blame] | 437 | self.sections.append({"name" : name, "load_addr" : load_addr, |
| 438 | "size" : size, "virt_addr" : virt_addr, |
Andrew Boie | 9882dcd | 2015-10-07 14:25:51 -0700 | [diff] [blame] | 439 | "type" : stype, "recognized" : recognized, |
| 440 | "lma_off" : lma_off, "vma_off" : vma_off}) |
Andrew Boie | 6acbe63 | 2015-07-17 12:03:52 -0700 | [diff] [blame] | 441 | |
| 442 | |
| 443 | class MakeGoal: |
| 444 | """Metadata class representing one of the sub-makes called by MakeGenerator |
| 445 | |
| 446 | MakeGenerator returns a dictionary of these which can then be associdated |
| 447 | with TestInstances to get a complete picture of what happened during a test. |
| 448 | MakeGenerator is used for tasks outside of building tests (such as |
| 449 | defconfigs) which is why MakeGoal is a separate class from TestInstance. |
| 450 | """ |
| 451 | def __init__(self, name, text, qemu, make_log, build_log, run_log, |
| 452 | qemu_log): |
| 453 | self.name = name |
| 454 | self.text = text |
| 455 | self.qemu = qemu |
| 456 | self.make_log = make_log |
| 457 | self.build_log = build_log |
| 458 | self.run_log = run_log |
| 459 | self.qemu_log = qemu_log |
| 460 | self.make_state = "waiting" |
| 461 | self.failed = False |
| 462 | self.finished = False |
| 463 | self.reason = None |
| 464 | self.metrics = {} |
| 465 | |
| 466 | def get_error_log(self): |
| 467 | if self.make_state == "waiting": |
| 468 | # Shouldn't ever see this; breakage in the main Makefile itself. |
| 469 | return self.make_log |
| 470 | elif self.make_state == "building": |
| 471 | # Failure when calling the sub-make to build the code |
| 472 | return self.build_log |
| 473 | elif self.make_state == "running": |
| 474 | # Failure in sub-make for "make qemu", qemu probably failed to start |
| 475 | return self.run_log |
| 476 | elif self.make_state == "finished": |
| 477 | # QEMU finished, but timed out or otherwise wasn't successful |
| 478 | return self.qemu_log |
| 479 | |
| 480 | def fail(self, reason): |
| 481 | self.failed = True |
| 482 | self.finished = True |
| 483 | self.reason = reason |
| 484 | |
| 485 | def success(self): |
| 486 | self.finished = True |
| 487 | |
| 488 | def __str__(self): |
| 489 | if self.finished: |
| 490 | if self.failed: |
| 491 | return "[%s] failed (%s: see %s)" % (self.name, self.reason, |
| 492 | self.get_error_log()) |
| 493 | else: |
| 494 | return "[%s] passed" % self.name |
| 495 | else: |
| 496 | return "[%s] in progress (%s)" % (self.name, self.make_state) |
| 497 | |
| 498 | |
| 499 | class MakeGenerator: |
| 500 | """Generates a Makefile which just calls a bunch of sub-make sessions |
| 501 | |
| 502 | In any given test suite we may need to build dozens if not hundreds of |
| 503 | test cases. The cleanest way to parallelize this is to just let Make |
| 504 | do the parallelization, sharing the jobserver among all the different |
| 505 | sub-make targets. |
| 506 | """ |
| 507 | |
| 508 | GOAL_HEADER_TMPL = """.PHONY: {goal} |
| 509 | {goal}: |
| 510 | """ |
| 511 | |
| 512 | MAKE_RULE_TMPL = """\t@echo sanity_test_{phase} {goal} >&2 |
Andrew Boie | d1b3d54 | 2015-08-04 10:53:25 -0700 | [diff] [blame] | 513 | \t$(MAKE) -C {directory} O={outdir} V={verb} EXTRA_CFLAGS=-Werror EXTRA_ASMFLAGS=-Wa,--fatal-warnings EXTRA_LFLAGS=--fatal-warnings {args} >{logfile} 2>&1 |
Andrew Boie | 6acbe63 | 2015-07-17 12:03:52 -0700 | [diff] [blame] | 514 | """ |
| 515 | |
| 516 | GOAL_FOOTER_TMPL = "\t@echo sanity_test_finished {goal} >&2\n\n" |
| 517 | |
| 518 | re_make = re.compile("sanity_test_([A-Za-z0-9]+) (.+)|$|make[:] \*\*\* [[](.+)[]] Error.+$") |
| 519 | |
| 520 | def __init__(self, base_outdir): |
| 521 | """MakeGenerator constructor |
| 522 | |
| 523 | @param base_outdir Intended to be the base out directory. A make.log |
| 524 | file will be created here which contains the output of the |
| 525 | top-level Make session, as well as the dynamic control Makefile |
| 526 | @param verbose If true, pass V=1 to all the sub-makes which greatly |
| 527 | increases their verbosity |
| 528 | """ |
| 529 | self.goals = {} |
| 530 | if not os.path.exists(base_outdir): |
| 531 | os.makedirs(base_outdir) |
| 532 | self.logfile = os.path.join(base_outdir, "make.log") |
| 533 | self.makefile = os.path.join(base_outdir, "Makefile") |
| 534 | |
| 535 | def _get_rule_header(self, name): |
| 536 | return MakeGenerator.GOAL_HEADER_TMPL.format(goal=name) |
| 537 | |
| 538 | def _get_sub_make(self, name, phase, workdir, outdir, logfile, args): |
| 539 | verb = "1" if VERBOSE else "0" |
| 540 | args = " ".join(args) |
| 541 | return MakeGenerator.MAKE_RULE_TMPL.format(phase=phase, goal=name, |
| 542 | outdir=outdir, |
| 543 | directory=workdir, verb=verb, |
| 544 | args=args, logfile=logfile) |
| 545 | |
| 546 | def _get_rule_footer(self, name): |
| 547 | return MakeGenerator.GOAL_FOOTER_TMPL.format(goal=name) |
| 548 | |
| 549 | def _add_goal(self, outdir): |
| 550 | if not os.path.exists(outdir): |
| 551 | os.makedirs(outdir) |
| 552 | |
| 553 | def add_build_goal(self, name, directory, outdir, args): |
| 554 | """Add a goal to invoke a Kbuild session |
| 555 | |
| 556 | @param name A unique string name for this build goal. The results |
| 557 | dictionary returned by execute() will be keyed by this name. |
| 558 | @param directory Absolute path to working directory, will be passed |
| 559 | to make -C |
| 560 | @param outdir Absolute path to output directory, will be passed to |
| 561 | Kbuild via -O=<path> |
| 562 | @param args Extra command line arguments to pass to 'make', typically |
| 563 | environment variables or specific Make goals |
| 564 | """ |
| 565 | self._add_goal(outdir) |
| 566 | build_logfile = os.path.join(outdir, "build.log") |
| 567 | text = (self._get_rule_header(name) + |
| 568 | self._get_sub_make(name, "building", directory, |
| 569 | outdir, build_logfile, args) + |
| 570 | self._get_rule_footer(name)) |
| 571 | self.goals[name] = MakeGoal(name, text, None, self.logfile, build_logfile, |
| 572 | None, None) |
| 573 | |
| 574 | def add_qemu_goal(self, name, directory, outdir, args, timeout=30): |
| 575 | """Add a goal to build a Zephyr project and then run it under QEMU |
| 576 | |
| 577 | The generated make goal invokes Make twice, the first time it will |
| 578 | build the default goal, and the second will invoke the 'qemu' goal. |
| 579 | The output of the QEMU session will be monitored, and terminated |
| 580 | either upon pass/fail result of the test program, or the timeout |
| 581 | is reached. |
| 582 | |
| 583 | @param name A unique string name for this build goal. The results |
| 584 | dictionary returned by execute() will be keyed by this name. |
| 585 | @param directory Absolute path to working directory, will be passed |
| 586 | to make -C |
| 587 | @param outdir Absolute path to output directory, will be passed to |
| 588 | Kbuild via -O=<path> |
| 589 | @param args Extra command line arguments to pass to 'make', typically |
| 590 | environment variables. Do not pass specific Make goals here. |
| 591 | @param timeout Maximum length of time QEMU session should be allowed |
| 592 | to run before automatically killing it. Default is 30 seconds. |
| 593 | """ |
| 594 | |
| 595 | self._add_goal(outdir) |
| 596 | build_logfile = os.path.join(outdir, "build.log") |
| 597 | run_logfile = os.path.join(outdir, "run.log") |
| 598 | qemu_logfile = os.path.join(outdir, "qemu.log") |
| 599 | |
| 600 | q = QEMUHandler(name, outdir, qemu_logfile, timeout) |
| 601 | args.append("QEMU_PIPE=%s" % q.get_fifo()) |
| 602 | text = (self._get_rule_header(name) + |
| 603 | self._get_sub_make(name, "building", directory, |
| 604 | outdir, build_logfile, args) + |
| 605 | self._get_sub_make(name, "running", directory, |
| 606 | outdir, run_logfile, |
| 607 | args + ["qemu"]) + |
| 608 | self._get_rule_footer(name)) |
| 609 | self.goals[name] = MakeGoal(name, text, q, self.logfile, build_logfile, |
| 610 | run_logfile, qemu_logfile) |
| 611 | |
| 612 | |
| 613 | def add_test_instance(self, ti, build_only=False): |
| 614 | """Add a goal to build/test a TestInstance object |
| 615 | |
| 616 | @param ti TestInstance object to build. The status dictionary returned |
| 617 | by execute() will be keyed by its .name field. |
| 618 | """ |
| 619 | args = ti.test.extra_args[:] |
| 620 | args.extend(["ARCH=%s" % ti.platform.arch.name, |
| 621 | "PLATFORM_CONFIG=%s" % ti.platform.name]) |
| 622 | if ti.platform.qemu_support and not ti.build_only and not build_only: |
| 623 | self.add_qemu_goal(ti.name, ti.test.code_location, ti.outdir, |
| 624 | args, ti.test.timeout) |
| 625 | else: |
| 626 | self.add_build_goal(ti.name, ti.test.code_location, ti.outdir, args) |
| 627 | |
| 628 | def execute(self, callback_fn=None, context=None): |
| 629 | """Execute all the registered build goals |
| 630 | |
| 631 | @param callback_fn If not None, a callback function will be called |
| 632 | as individual goals transition between states. This function |
| 633 | should accept two parameters: a string state and an arbitrary |
| 634 | context object, supplied here |
| 635 | @param context Context object to pass to the callback function. |
| 636 | Type and semantics are specific to that callback function. |
| 637 | @return A dictionary mapping goal names to final status. |
| 638 | """ |
| 639 | |
| 640 | with open(self.makefile, "w") as tf, \ |
| 641 | open(os.devnull, "wb") as devnull, \ |
| 642 | open(self.logfile, "w") as make_log: |
| 643 | # Create our dynamic Makefile and execute it. |
| 644 | # Watch stderr output which is where we will keep |
| 645 | # track of build state |
| 646 | for name, goal in self.goals.iteritems(): |
| 647 | tf.write(goal.text) |
| 648 | tf.write("all: %s\n" % (" ".join(self.goals.keys()))) |
| 649 | tf.flush() |
| 650 | |
| 651 | # os.environ["CC"] = "ccache gcc" FIXME doesn't work |
| 652 | |
| 653 | cmd = ["make", "-k", "-j", str(PARALLEL), "-f", tf.name, "all"] |
| 654 | p = subprocess.Popen(cmd, stderr=subprocess.PIPE, |
| 655 | stdout=devnull) |
| 656 | |
| 657 | for line in iter(p.stderr.readline, b''): |
| 658 | make_log.write(line) |
| 659 | verbose("MAKE: " + repr(line.strip())) |
| 660 | m = MakeGenerator.re_make.match(line) |
| 661 | if not m: |
| 662 | continue |
| 663 | |
| 664 | state, name, error = m.groups() |
| 665 | if error: |
| 666 | goal = self.goals[error] |
| 667 | else: |
| 668 | goal = self.goals[name] |
| 669 | goal.make_state = state |
| 670 | |
| 671 | |
| 672 | if error: |
| 673 | goal.fail("build_error") |
| 674 | else: |
| 675 | if state == "finished": |
| 676 | if goal.qemu: |
| 677 | thread_status, metrics = goal.qemu.get_state() |
| 678 | goal.metrics.update(metrics) |
| 679 | if thread_status == "passed": |
| 680 | goal.success() |
| 681 | else: |
| 682 | goal.fail(thread_status) |
| 683 | else: |
| 684 | goal.success() |
| 685 | |
| 686 | if callback_fn: |
| 687 | callback_fn(context, self.goals, goal) |
| 688 | |
| 689 | p.wait() |
| 690 | return self.goals |
| 691 | |
| 692 | |
| 693 | # "list" - List of strings |
| 694 | # "list:<type>" - List of <type> |
| 695 | # "set" - Set of unordered, unique strings |
| 696 | # "set:<type>" - Set of <type> |
| 697 | # "float" - Floating point |
| 698 | # "int" - Integer |
| 699 | # "bool" - Boolean |
| 700 | # "str" - String |
| 701 | |
| 702 | # XXX Be sure to update __doc__ if you change any of this!! |
| 703 | |
| 704 | arch_valid_keys = {"name" : {"type" : "str", "required" : True}, |
| 705 | "platforms" : {"type" : "list", "required" : True}} |
| 706 | |
| 707 | platform_valid_keys = {"qemu_support" : {"type" : "bool", "default" : False}, |
| 708 | "microkernel_support" : {"type" : "bool", |
| 709 | "default" : True}} |
| 710 | |
| 711 | testcase_valid_keys = {"tags" : {"type" : "set", "required" : True}, |
| 712 | "extra_args" : {"type" : "list"}, |
| 713 | "build_only" : {"type" : "bool", "default" : False}, |
| 714 | "timeout" : {"type" : "int", "default" : 60}, |
| 715 | "arch_whitelist" : {"type" : "set"}, |
Anas Nashif | 30d1387 | 2015-10-05 10:02:45 -0400 | [diff] [blame^] | 716 | "arch_exclude" : {"type" : "set"}, |
| 717 | "platform_exclude" : {"type" : "set"}, |
Andrew Boie | 6acbe63 | 2015-07-17 12:03:52 -0700 | [diff] [blame] | 718 | "platform_whitelist" : {"type" : "set"}, |
| 719 | "config_whitelist" : {"type" : "set"}} |
| 720 | |
| 721 | |
| 722 | class SanityConfigParser: |
| 723 | """Class to read architecture and test case .ini files with semantic checking |
| 724 | """ |
| 725 | def __init__(self, filename): |
| 726 | """Instantiate a new SanityConfigParser object |
| 727 | |
| 728 | @param filename Source .ini file to read |
| 729 | """ |
| 730 | cp = ConfigParser.SafeConfigParser() |
| 731 | cp.readfp(open(filename)) |
| 732 | self.filename = filename |
| 733 | self.cp = cp |
| 734 | |
| 735 | def _cast_value(self, value, typestr): |
| 736 | v = value.strip() |
| 737 | if typestr == "str": |
| 738 | return v |
| 739 | |
| 740 | elif typestr == "float": |
| 741 | return float(v) |
| 742 | |
| 743 | elif typestr == "int": |
| 744 | return int(v) |
| 745 | |
| 746 | elif typestr == "bool": |
| 747 | v = v.lower() |
| 748 | if v == "true" or v == "1": |
| 749 | return True |
| 750 | elif v == "" or v == "false" or v == "0": |
| 751 | return False |
| 752 | raise ConfigurationError(self.filename, |
| 753 | "bad value for boolean: '%s'" % value) |
| 754 | |
| 755 | elif typestr.startswith("list"): |
| 756 | vs = v.split() |
| 757 | if len(typestr) > 4 and typestr[4] == ":": |
| 758 | return [self._cast_value(vsi, typestr[5:]) for vsi in vs] |
| 759 | else: |
| 760 | return vs |
| 761 | |
| 762 | elif typestr.startswith("set"): |
| 763 | vs = v.split() |
| 764 | if len(typestr) > 3 and typestr[3] == ":": |
| 765 | return set([self._cast_value(vsi, typestr[4:]) for vsi in vs]) |
| 766 | else: |
| 767 | return set(vs) |
| 768 | |
| 769 | else: |
| 770 | raise ConfigurationError(self.filename, "unknown type '%s'" % value) |
| 771 | |
| 772 | |
| 773 | def sections(self): |
| 774 | """Get the set of sections within the .ini file |
| 775 | |
| 776 | @return a list of string section names""" |
| 777 | return self.cp.sections() |
| 778 | |
| 779 | def get_section(self, section, valid_keys): |
| 780 | """Get a dictionary representing the keys/values within a section |
| 781 | |
| 782 | @param section The section in the .ini file to retrieve data from |
| 783 | @param valid_keys A dictionary representing the intended semantics |
| 784 | for this section. Each key in this dictionary is a key that could |
| 785 | be specified, if a key is given in the .ini file which isn't in |
| 786 | here, it will generate an error. Each value in this dictionary |
| 787 | is another dictionary containing metadata: |
| 788 | |
| 789 | "default" - Default value if not given |
| 790 | "type" - Data type to convert the text value to. Simple types |
| 791 | supported are "str", "float", "int", "bool" which will get |
| 792 | converted to respective Python data types. "set" and "list" |
| 793 | may also be specified which will split the value by |
| 794 | whitespace (but keep the elements as strings). finally, |
| 795 | "list:<type>" and "set:<type>" may be given which will |
| 796 | perform a type conversion after splitting the value up. |
| 797 | "required" - If true, raise an error if not defined. If false |
| 798 | and "default" isn't specified, a type conversion will be |
| 799 | done on an empty string |
| 800 | @return A dictionary containing the section key-value pairs with |
| 801 | type conversion and default values filled in per valid_keys |
| 802 | """ |
| 803 | |
| 804 | d = {} |
| 805 | cp = self.cp |
| 806 | |
| 807 | if not cp.has_section(section): |
| 808 | raise ConfigurationError(self.filename, "Missing section '%s'" % section) |
| 809 | |
| 810 | for k, v in cp.items(section): |
| 811 | if k not in valid_keys: |
| 812 | raise ConfigurationError(self.filename, |
| 813 | "Unknown config key '%s' in defintiion for '%s'" |
| 814 | % (k, section)) |
| 815 | d[k] = v |
| 816 | |
| 817 | for k, kinfo in valid_keys.iteritems(): |
| 818 | if k not in d: |
| 819 | if "required" in kinfo: |
| 820 | required = kinfo["required"] |
| 821 | else: |
| 822 | required = False |
| 823 | |
| 824 | if required: |
| 825 | raise ConfigurationError(self.filename, |
| 826 | "missing required value for '%s' in section '%s'" |
| 827 | % (k, section)) |
| 828 | else: |
| 829 | if "default" in kinfo: |
| 830 | default = kinfo["default"] |
| 831 | else: |
| 832 | default = self._cast_value("", kinfo["type"]) |
| 833 | d[k] = default |
| 834 | else: |
| 835 | try: |
| 836 | d[k] = self._cast_value(d[k], kinfo["type"]) |
| 837 | except ValueError, ve: |
| 838 | raise ConfigurationError(self.filename, |
| 839 | "bad %s value '%s' for key '%s' in section '%s'" |
| 840 | % (kinfo["type"], d[k], k, section)) |
| 841 | |
| 842 | return d |
| 843 | |
| 844 | |
| 845 | class Platform: |
| 846 | """Class representing metadata for a particular platform |
| 847 | |
| 848 | Maps directly to PLATFORM_CONFIG when building""" |
| 849 | def __init__(self, arch, name, plat_dict): |
| 850 | """Constructor. |
| 851 | |
| 852 | @param arch Architecture object for this platform |
| 853 | @param name String name for this platform, same as PLATFORM_CONFIG |
| 854 | @param plat_dict SanityConfigParser output on the relevant section |
| 855 | in the architecture configuration file which has lots of metadata. |
| 856 | See the Architecture class. |
| 857 | """ |
| 858 | self.name = name |
| 859 | self.qemu_support = plat_dict["qemu_support"] |
| 860 | self.microkernel_support = plat_dict["microkernel_support"] |
| 861 | self.arch = arch |
| 862 | # Gets populated in a separate step |
| 863 | self.defconfig = {"micro" : None, "nano" : None} |
| 864 | pass |
| 865 | |
| 866 | def set_defconfig(self, ktype, defconfig): |
| 867 | """Set defconfig information for a particular kernel type. |
| 868 | |
| 869 | We do this in another step because all the defconfigs are generated |
| 870 | at once from a sub-make, see TestSuite constructor |
| 871 | |
| 872 | @param ktype Kernel type, either "micro" or "nano" |
| 873 | @param defconfig Dictionary containing defconfig information |
| 874 | """ |
| 875 | self.defconfig[ktype] = defconfig |
| 876 | |
| 877 | def get_defconfig(self, ktype): |
| 878 | """Return a dictionary representing the key/value pairs expressed |
| 879 | in the kernel defconfig used for this arch/platform. Used to identify |
| 880 | platform features. |
| 881 | |
| 882 | @param ktype Kernel type, either "micro" or "nano" |
| 883 | @return dictionary corresponding to the defconfig contents. unset |
| 884 | values will not be defined |
| 885 | """ |
| 886 | |
| 887 | if ktype == "micro" and not self.microkernel_support: |
| 888 | raise SanityRuntimeError("Invalid kernel type queried") |
| 889 | |
| 890 | return self.defconfig[ktype] |
| 891 | |
| 892 | def __repr__(self): |
| 893 | return "<%s on %s>" % (self.name, self.arch.name) |
| 894 | |
| 895 | |
| 896 | class Architecture: |
| 897 | """Class representing metadata for a particular architecture |
| 898 | """ |
| 899 | def __init__(self, cfile): |
| 900 | """Architecture constructor |
| 901 | |
| 902 | @param cfile Path to Architecture configuration file, which gives |
| 903 | info about the arch and all the platforms for it |
| 904 | """ |
| 905 | cp = SanityConfigParser(cfile) |
| 906 | self.platforms = [] |
| 907 | |
| 908 | arch = cp.get_section("arch", arch_valid_keys) |
| 909 | |
| 910 | self.name = arch["name"] |
| 911 | |
| 912 | for plat_name in arch["platforms"]: |
| 913 | verbose("Platform: %s" % plat_name) |
| 914 | plat_dict = cp.get_section(plat_name, platform_valid_keys) |
| 915 | self.platforms.append(Platform(self, plat_name, plat_dict)) |
| 916 | |
| 917 | def __repr__(self): |
| 918 | return "<arch %s>" % self.name |
| 919 | |
| 920 | |
| 921 | class TestCase: |
| 922 | """Class representing a test application |
| 923 | """ |
| 924 | makefile_re = re.compile("\s*KERNEL_TYPE\s*[?=]+\s*(micro|nano)\s*") |
| 925 | |
| 926 | def __init__(self, testcase_root, workdir, name, tc_dict): |
| 927 | """TestCase constructor. |
| 928 | |
| 929 | This gets called by TestSuite as it finds and reads testcase.ini files. |
| 930 | Multiple TestCase instances may be generated from a single testcase.ini, |
| 931 | each one corresponds to a section within that file. |
| 932 | |
| 933 | Reads the Makefile inside the testcase directory to figure out the |
| 934 | kernel type for purposes of configuration filtering |
| 935 | |
| 936 | We need to have a unique name for every single test case. Since |
| 937 | a testcase.ini can define multiple tests, the canonical name for |
| 938 | the test case is <workdir>/<name>. |
| 939 | |
| 940 | @param testcase_root Absolute path to the root directory where |
| 941 | all the test cases live |
| 942 | @param workdir Relative path to the project directory for this |
| 943 | test application from the test_case root. |
| 944 | @param name Name of this test case, corresponding to the section name |
| 945 | in the test case configuration file. For many test cases that just |
| 946 | define one test, can be anything and is usually "test". This is |
| 947 | really only used to distinguish between different cases when |
| 948 | the testcase.ini defines multiple tests |
| 949 | @param tc_dict Dictionary with section values for this test case |
| 950 | from the testcase.ini file |
| 951 | """ |
| 952 | self.code_location = os.path.join(testcase_root, workdir) |
| 953 | self.tags = tc_dict["tags"] |
| 954 | self.extra_args = tc_dict["extra_args"] |
| 955 | self.arch_whitelist = tc_dict["arch_whitelist"] |
Anas Nashif | 30d1387 | 2015-10-05 10:02:45 -0400 | [diff] [blame^] | 956 | self.arch_exclude = tc_dict["arch_exclude"] |
| 957 | self.platform_exclude = tc_dict["platform_exclude"] |
Andrew Boie | 6acbe63 | 2015-07-17 12:03:52 -0700 | [diff] [blame] | 958 | self.platform_whitelist = tc_dict["platform_whitelist"] |
| 959 | self.config_whitelist = tc_dict["config_whitelist"] |
| 960 | self.timeout = tc_dict["timeout"] |
| 961 | self.build_only = tc_dict["build_only"] |
| 962 | self.path = os.path.join(workdir, name) |
| 963 | self.name = self.path # for now |
| 964 | self.ktype = None |
| 965 | |
| 966 | with open(os.path.join(testcase_root, workdir, "Makefile")) as makefile: |
| 967 | for line in makefile.readlines(): |
| 968 | m = TestCase.makefile_re.match(line) |
| 969 | if m: |
| 970 | self.ktype = m.group(1) |
| 971 | break |
| 972 | if not self.ktype: |
| 973 | raise ConfigurationError(os.path.join(workdir, "Makefile"), |
| 974 | "KERNEL_TYPE not found") |
| 975 | |
| 976 | def __repr__(self): |
| 977 | return self.name |
| 978 | |
| 979 | |
| 980 | |
| 981 | class TestInstance: |
| 982 | """Class representing the execution of a particular TestCase on a platform |
| 983 | |
| 984 | @param test The TestCase object we want to build/execute |
| 985 | @param platform Platform object that we want to build and run against |
| 986 | @param base_outdir Base directory for all test results. The actual |
| 987 | out directory used is <outdir>/<platform>/<test case name> |
| 988 | """ |
| 989 | def __init__(self, test, platform, base_outdir, build_only=False): |
| 990 | self.test = test |
| 991 | self.platform = platform |
| 992 | self.name = os.path.join(platform.name, test.path) |
| 993 | self.outdir = os.path.join(base_outdir, platform.name, test.path) |
| 994 | self.build_only = build_only or test.build_only |
| 995 | |
| 996 | def calculate_sizes(self): |
| 997 | """Get the RAM/ROM sizes of a test case. |
| 998 | |
| 999 | This can only be run after the instance has been executed by |
| 1000 | MakeGenerator, otherwise there won't be any binaries to measure. |
| 1001 | |
| 1002 | @return A SizeCalculator object |
| 1003 | """ |
Andrew Boie | 5d4eb78 | 2015-10-02 10:04:56 -0700 | [diff] [blame] | 1004 | fns = glob.glob(os.path.join(self.outdir, "*.elf")) |
| 1005 | if (len(fns) != 1): |
| 1006 | raise BuildError("Missing/multiple output ELF binary") |
Andrew Boie | 6acbe63 | 2015-07-17 12:03:52 -0700 | [diff] [blame] | 1007 | |
Andrew Boie | 5d4eb78 | 2015-10-02 10:04:56 -0700 | [diff] [blame] | 1008 | return SizeCalculator(fns[0]) |
Andrew Boie | 6acbe63 | 2015-07-17 12:03:52 -0700 | [diff] [blame] | 1009 | |
| 1010 | def __repr__(self): |
| 1011 | return "<TestCase %s on %s>" % (self.test.name, self.platform.name) |
| 1012 | |
| 1013 | |
Andrew Boie | 4ef16c5 | 2015-08-28 12:36:03 -0700 | [diff] [blame] | 1014 | def defconfig_cb(context, goals, goal): |
| 1015 | if not goal.failed: |
| 1016 | return |
| 1017 | |
| 1018 | info("%sCould not build defconfig for %s%s" % |
| 1019 | (COLOR_RED, goal.name, COLOR_NORMAL)); |
| 1020 | if INLINE_LOGS: |
| 1021 | with open(goal.get_error_log()) as fp: |
| 1022 | sys.stdout.write(fp.read()) |
| 1023 | else: |
| 1024 | print "\tsee: " + COLOR_YELLOW + goal.get_error_log() + COLOR_NORMAL |
| 1025 | |
Andrew Boie | 6acbe63 | 2015-07-17 12:03:52 -0700 | [diff] [blame] | 1026 | |
| 1027 | class TestSuite: |
| 1028 | config_re = re.compile('(CONFIG_[A-Z0-9_]+)[=](.+)$') |
| 1029 | |
| 1030 | def __init__(self, arch_root, testcase_root, outdir): |
| 1031 | # Keep track of which test cases we've filtered out and why |
| 1032 | discards = {} |
| 1033 | self.arches = {} |
| 1034 | self.testcases = {} |
| 1035 | self.platforms = [] |
Andrew Boie | b391e66 | 2015-08-31 15:25:45 -0700 | [diff] [blame] | 1036 | self.outdir = os.path.abspath(outdir) |
Andrew Boie | 6acbe63 | 2015-07-17 12:03:52 -0700 | [diff] [blame] | 1037 | self.instances = {} |
| 1038 | self.goals = None |
| 1039 | self.discards = None |
| 1040 | |
| 1041 | arch_root = os.path.abspath(arch_root) |
| 1042 | testcase_root = os.path.abspath(testcase_root) |
Andrew Boie | 6acbe63 | 2015-07-17 12:03:52 -0700 | [diff] [blame] | 1043 | |
| 1044 | debug("Reading test case configuration files under %s..." % testcase_root) |
| 1045 | for dirpath, dirnames, filenames in os.walk(testcase_root, |
| 1046 | topdown=True): |
| 1047 | if "testcase.ini" in filenames: |
| 1048 | verbose("Found test case in " + dirpath) |
| 1049 | dirnames[:] = [] |
| 1050 | cp = SanityConfigParser(os.path.join(dirpath, "testcase.ini")) |
| 1051 | workdir = os.path.relpath(dirpath, testcase_root) |
| 1052 | |
| 1053 | for section in cp.sections(): |
| 1054 | tc_dict = cp.get_section(section, testcase_valid_keys) |
| 1055 | tc = TestCase(testcase_root, workdir, section, tc_dict) |
| 1056 | self.testcases[tc.name] = tc |
| 1057 | |
| 1058 | debug("Reading architecture configuration files under %s..." % arch_root) |
| 1059 | for dirpath, dirnames, filenames in os.walk(arch_root): |
| 1060 | for filename in filenames: |
| 1061 | if filename.endswith(".ini"): |
| 1062 | fn = os.path.join(dirpath, filename) |
| 1063 | verbose("Found arch configuration " + fn) |
| 1064 | arch = Architecture(fn) |
| 1065 | self.arches[arch.name] = arch |
| 1066 | self.platforms.extend(arch.platforms) |
| 1067 | |
| 1068 | |
| 1069 | # Now that we know the full set of arches/platforms, get the defconfig |
| 1070 | # information from them by calling Make |
| 1071 | info("Building platform defconfigs...") |
| 1072 | dlist = {} |
Andrew Boie | b391e66 | 2015-08-31 15:25:45 -0700 | [diff] [blame] | 1073 | config_outdir = os.path.join(self.outdir, "configs") |
Andrew Boie | 6acbe63 | 2015-07-17 12:03:52 -0700 | [diff] [blame] | 1074 | mg = MakeGenerator(config_outdir) |
| 1075 | |
| 1076 | for plat in self.platforms: |
| 1077 | ktypes = ["nano"] |
| 1078 | if plat.microkernel_support: |
| 1079 | ktypes.append("micro") |
| 1080 | |
| 1081 | for ktype in ktypes: |
| 1082 | stem = ktype + "_" + plat.name |
| 1083 | |
| 1084 | in_defconfig = stem + "_defconfig" |
| 1085 | out_config = os.path.join(config_outdir, stem + "_config") |
| 1086 | dlist[plat, ktype] = out_config |
| 1087 | |
| 1088 | args = ["ARCH=" + plat.arch.name, |
| 1089 | "KBUILD_DEFCONFIG=" + in_defconfig, |
| 1090 | "KCONFIG_CONFIG=" + out_config, "defconfig"] |
| 1091 | # FIXME would be nice to use a common outdir for this so that |
| 1092 | # conf, gen_idt, etc aren't rebuilt for every plat/ktype combo, |
| 1093 | # need a way to avoid different Make processe from clobbering |
| 1094 | # each other since they all try to build them simultaneously |
| 1095 | mg.add_build_goal(stem, ZEPHYR_BASE, os.path.join(config_outdir, |
| 1096 | plat.name, |
| 1097 | ktype), args) |
| 1098 | |
Andrew Boie | 4ef16c5 | 2015-08-28 12:36:03 -0700 | [diff] [blame] | 1099 | results = mg.execute(defconfig_cb) |
| 1100 | for name, goal in results.iteritems(): |
| 1101 | if goal.failed: |
| 1102 | raise SanityRuntimeError("Couldn't build some defconfigs") |
Andrew Boie | 6acbe63 | 2015-07-17 12:03:52 -0700 | [diff] [blame] | 1103 | |
| 1104 | for k, out_config in dlist.iteritems(): |
| 1105 | plat, ktype = k |
| 1106 | defconfig = {} |
| 1107 | with open(out_config, "r") as fp: |
| 1108 | for line in fp.readlines(): |
| 1109 | m = TestSuite.config_re.match(line) |
| 1110 | if not m: |
| 1111 | continue |
| 1112 | defconfig[m.group(1)] = m.group(2).strip() |
| 1113 | plat.set_defconfig(ktype, defconfig) |
| 1114 | |
| 1115 | self.instances = {} |
| 1116 | |
| 1117 | def get_last_failed(self): |
| 1118 | if not os.path.exists(LAST_SANITY): |
| 1119 | return [] |
| 1120 | result = [] |
| 1121 | with open(LAST_SANITY, "r") as fp: |
| 1122 | cr = csv.DictReader(fp) |
| 1123 | for row in cr: |
| 1124 | if row["passed"] == "True": |
| 1125 | continue |
| 1126 | test = row["test"] |
| 1127 | platform = row["platform"] |
| 1128 | result.append((test, platform)) |
| 1129 | return result |
| 1130 | |
| 1131 | def apply_filters(self, platform_filter, arch_filter, tag_filter, |
| 1132 | config_filter, testcase_filter, last_failed): |
| 1133 | instances = [] |
| 1134 | discards = {} |
| 1135 | verbose("platform filter: " + str(platform_filter)) |
| 1136 | verbose(" arch_filter: " + str(arch_filter)) |
| 1137 | verbose(" tag_filter: " + str(tag_filter)) |
| 1138 | verbose(" config_filter: " + str(config_filter)) |
| 1139 | |
| 1140 | if last_failed: |
| 1141 | failed_tests = self.get_last_failed() |
| 1142 | |
| 1143 | if not platform_filter or "default" in platform_filter: |
| 1144 | info("Selecting default platforms per test case") |
| 1145 | default_platforms = True |
| 1146 | platform_filter = [] |
| 1147 | else: |
| 1148 | default_platforms = False |
| 1149 | |
| 1150 | if "all" in platform_filter: |
| 1151 | info("Selecting all possible platforms per test case") |
| 1152 | platform_filter = [] |
| 1153 | |
| 1154 | for tc_name, tc in self.testcases.iteritems(): |
| 1155 | for arch_name, arch in self.arches.iteritems(): |
| 1156 | instance_list = [] |
| 1157 | for plat in arch.platforms: |
| 1158 | instance = TestInstance(tc, plat, self.outdir) |
| 1159 | |
| 1160 | if tag_filter and not tc.tags.intersection(tag_filter): |
| 1161 | discards[instance] = "Command line testcase tag filter" |
| 1162 | continue |
| 1163 | |
| 1164 | if testcase_filter and tc_name not in testcase_filter: |
| 1165 | discards[instance] = "Testcase name filter" |
| 1166 | continue |
| 1167 | |
| 1168 | if last_failed and (tc.name, plat.name) not in failed_tests: |
| 1169 | discards[instance] = "Passed or skipped during last run" |
| 1170 | continue |
| 1171 | |
| 1172 | if arch_filter and arch_name not in arch_filter: |
| 1173 | discards[instance] = "Command line testcase arch filter" |
| 1174 | continue |
| 1175 | |
| 1176 | if tc.arch_whitelist and arch.name not in tc.arch_whitelist: |
| 1177 | discards[instance] = "Not in test case arch whitelist" |
| 1178 | continue |
| 1179 | |
Anas Nashif | 30d1387 | 2015-10-05 10:02:45 -0400 | [diff] [blame^] | 1180 | if tc.arch_exclude and arch.name in tc.arch_exclude: |
| 1181 | discards[instance] = "In test case arch exclude" |
| 1182 | continue |
| 1183 | |
| 1184 | if tc.platform_exclude and plat.name in tc.platform_exclude: |
| 1185 | discards[instance] = "In test case platform exclude" |
| 1186 | continue |
| 1187 | |
Andrew Boie | 6acbe63 | 2015-07-17 12:03:52 -0700 | [diff] [blame] | 1188 | if platform_filter and plat.name not in platform_filter: |
| 1189 | discards[instance] = "Command line platform filter" |
| 1190 | continue |
| 1191 | |
| 1192 | if tc.platform_whitelist and plat.name not in tc.platform_whitelist: |
| 1193 | discards[instance] = "Not in testcase platform whitelist" |
| 1194 | continue |
| 1195 | |
| 1196 | if not plat.microkernel_support and tc.ktype == "micro": |
| 1197 | discards[instance] = "No microkernel support for platform" |
| 1198 | continue |
| 1199 | |
| 1200 | defconfig = plat.get_defconfig(tc.ktype) |
| 1201 | config_pass = True |
| 1202 | # FIXME this is kind of gross clean it up |
| 1203 | for cw in tc.config_whitelist: |
| 1204 | invert = (cw[0] == "!") |
| 1205 | if invert: |
| 1206 | cw = cw[1:] |
| 1207 | |
| 1208 | if "=" in cw: |
| 1209 | k, v = cw.split("=") |
| 1210 | testval = k not in defconfig or defconfig[k] != v |
| 1211 | if invert: |
| 1212 | testval = not testval |
| 1213 | if testval: |
| 1214 | discards[instance] = "%s%s in platform defconfig" % ( |
| 1215 | cw, " not" if not invert else "") |
| 1216 | config_pass = False |
| 1217 | break |
| 1218 | else: |
| 1219 | testval = cw not in defconfig |
| 1220 | if invert: |
| 1221 | testval = not testval |
| 1222 | if testval: |
| 1223 | discards[instance] = "%s%s set in platform defconfig" % ( |
| 1224 | cw, " not" if not invert else "") |
| 1225 | config_pass = False |
| 1226 | break |
| 1227 | |
| 1228 | if not config_pass: |
| 1229 | continue |
| 1230 | |
| 1231 | instance_list.append(instance) |
| 1232 | |
| 1233 | if not instance_list: |
| 1234 | # Every platform in this arch was rejected already |
| 1235 | continue |
| 1236 | |
| 1237 | if default_platforms: |
| 1238 | self.add_instance(instance_list[0]) |
| 1239 | for instance in instance_list[1:]: |
| 1240 | discards[instance] = "Not in default set for arch" |
| 1241 | else: |
| 1242 | for instance in instance_list: |
| 1243 | self.add_instance(instance) |
| 1244 | self.discards = discards |
| 1245 | return discards |
| 1246 | |
| 1247 | def add_instance(self, ti): |
| 1248 | self.instances[ti.name] = ti |
| 1249 | |
| 1250 | def execute(self, cb, cb_context, build_only): |
| 1251 | mg = MakeGenerator(self.outdir) |
| 1252 | for i in self.instances.values(): |
| 1253 | mg.add_test_instance(i, build_only) |
| 1254 | self.goals = mg.execute(cb, cb_context) |
| 1255 | for name, goal in self.goals.iteritems(): |
| 1256 | i = self.instances[name] |
| 1257 | if goal.failed: |
| 1258 | continue |
| 1259 | sc = i.calculate_sizes() |
| 1260 | goal.metrics["ram_size"] = sc.get_ram_size() |
| 1261 | goal.metrics["rom_size"] = sc.get_rom_size() |
Andrew Boie | 73b4ee6 | 2015-10-07 11:33:22 -0700 | [diff] [blame] | 1262 | goal.metrics["unrecognized"] = sc.unrecognized_sections() |
Andrew Boie | 9882dcd | 2015-10-07 14:25:51 -0700 | [diff] [blame] | 1263 | goal.metrics["mismatched"] = sc.mismatched_sections() |
Andrew Boie | 6acbe63 | 2015-07-17 12:03:52 -0700 | [diff] [blame] | 1264 | return self.goals |
| 1265 | |
| 1266 | def discard_report(self, filename): |
| 1267 | if self.discards == None: |
| 1268 | raise SanityRuntimeException("apply_filters() hasn't been run!") |
| 1269 | |
| 1270 | with open(filename, "wb") as csvfile: |
| 1271 | fieldnames = ["test", "arch", "platform", "reason"] |
| 1272 | cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep) |
| 1273 | cw.writeheader() |
| 1274 | for instance, reason in self.discards.iteritems(): |
| 1275 | rowdict = {"test" : i.test.name, |
| 1276 | "arch" : i.platform.arch.name, |
| 1277 | "platform" : i.platform.name, |
| 1278 | "reason" : reason} |
| 1279 | cw.writerow(rowdict) |
| 1280 | |
| 1281 | def compare_metrics(self, filename): |
| 1282 | # name, datatype, lower results better |
| 1283 | interesting_metrics = [("ram_size", int, True), |
| 1284 | ("rom_size", int, True)] |
| 1285 | |
| 1286 | if self.goals == None: |
| 1287 | raise SanityRuntimeException("execute() hasn't been run!") |
| 1288 | |
| 1289 | if not os.path.exists(filename): |
| 1290 | info("Cannot compare metrics, %s not found" % filename) |
| 1291 | return [] |
| 1292 | |
| 1293 | results = [] |
| 1294 | saved_metrics = {} |
| 1295 | with open(filename) as fp: |
| 1296 | cr = csv.DictReader(fp) |
| 1297 | for row in cr: |
| 1298 | d = {} |
| 1299 | for m, _, _ in interesting_metrics: |
| 1300 | d[m] = row[m] |
| 1301 | saved_metrics[(row["test"], row["platform"])] = d |
| 1302 | |
| 1303 | for name, goal in self.goals.iteritems(): |
| 1304 | i = self.instances[name] |
| 1305 | mkey = (i.test.name, i.platform.name) |
| 1306 | if mkey not in saved_metrics: |
| 1307 | continue |
| 1308 | sm = saved_metrics[mkey] |
| 1309 | for metric, mtype, lower_better in interesting_metrics: |
| 1310 | if metric not in goal.metrics: |
| 1311 | continue |
| 1312 | if sm[metric] == "": |
| 1313 | continue |
| 1314 | delta = goal.metrics[metric] - mtype(sm[metric]) |
Andrew Boie | ea7928f | 2015-08-14 14:27:38 -0700 | [diff] [blame] | 1315 | if delta == 0: |
| 1316 | continue |
| 1317 | results.append((i, metric, goal.metrics[metric], delta, |
| 1318 | lower_better)) |
Andrew Boie | 6acbe63 | 2015-07-17 12:03:52 -0700 | [diff] [blame] | 1319 | return results |
| 1320 | |
| 1321 | def testcase_report(self, filename): |
| 1322 | if self.goals == None: |
| 1323 | raise SanityRuntimeException("execute() hasn't been run!") |
| 1324 | |
| 1325 | with open(filename, "wb") as csvfile: |
| 1326 | fieldnames = ["test", "arch", "platform", "passed", "status", |
| 1327 | "extra_args", "qemu", "qemu_time", "ram_size", |
| 1328 | "rom_size"] |
| 1329 | cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep) |
| 1330 | cw.writeheader() |
| 1331 | for name, goal in self.goals.iteritems(): |
| 1332 | i = self.instances[name] |
| 1333 | rowdict = {"test" : i.test.name, |
| 1334 | "arch" : i.platform.arch.name, |
| 1335 | "platform" : i.platform.name, |
| 1336 | "extra_args" : " ".join(i.test.extra_args), |
| 1337 | "qemu" : i.platform.qemu_support} |
| 1338 | if goal.failed: |
| 1339 | rowdict["passed"] = False |
| 1340 | rowdict["status"] = goal.reason |
| 1341 | else: |
| 1342 | rowdict["passed"] = True |
| 1343 | if goal.qemu: |
| 1344 | rowdict["qemu_time"] = goal.metrics["qemu_time"] |
| 1345 | rowdict["ram_size"] = goal.metrics["ram_size"] |
| 1346 | rowdict["rom_size"] = goal.metrics["rom_size"] |
| 1347 | cw.writerow(rowdict) |
| 1348 | |
| 1349 | |
| 1350 | def parse_arguments(): |
| 1351 | |
| 1352 | parser = argparse.ArgumentParser(description = __doc__, |
| 1353 | formatter_class = argparse.RawDescriptionHelpFormatter) |
| 1354 | |
| 1355 | parser.add_argument("-p", "--platform", action="append", |
| 1356 | help="Platform filter for testing. If unspecified, default to the " |
| 1357 | "set of default platforms in the arch configuration files for " |
| 1358 | "the selected arches. May also specify 'all' to match all " |
| 1359 | "platforms for the selected arches. Multiple invocations " |
| 1360 | "are treated as a logical 'or' relationship") |
| 1361 | parser.add_argument("-a", "--arch", action="append", |
| 1362 | help="Arch filter for testing. Takes precedence over --platform. " |
| 1363 | "If unspecified, test all arches. Multiple invocations " |
| 1364 | "are treated as a logical 'or' relationship") |
| 1365 | parser.add_argument("-t", "--tag", action="append", |
| 1366 | help="Specify tags to restrict which tests to run by tag value. " |
| 1367 | "Default is to not do any tag filtering. Multiple invocations " |
| 1368 | "are treated as a logical 'or' relationship") |
| 1369 | parser.add_argument("-f", "--only-failed", action="store_true", |
| 1370 | help="Run only those tests that failed the previous sanity check " |
| 1371 | "invocation.") |
| 1372 | parser.add_argument("-c", "--config", action="append", |
| 1373 | help="Specify platform configuration values filtering. This can be " |
| 1374 | "specified two ways: <config>=<value> or just <config>. The " |
| 1375 | "defconfig for all platforms, for all kernel types will be " |
| 1376 | "checked. For the <config>=<value> case, only match defconfig " |
| 1377 | "that have that value defined. For the <config> case, match " |
| 1378 | "defconfig that have that value assigned to any value. " |
| 1379 | "Prepend a '!' to invert the match.") |
| 1380 | parser.add_argument("-s", "--test", action="append", |
| 1381 | help="Run only the specified test cases. These are named by " |
| 1382 | "<path to test project relative to " |
| 1383 | "--testcase-root>/<testcase.ini section name>") |
| 1384 | parser.add_argument("-l", "--all", action="store_true", |
| 1385 | help="Same as --platform all") |
| 1386 | |
| 1387 | parser.add_argument("-o", "--testcase-report", |
| 1388 | help="Output a CSV spreadsheet containing results of the test run") |
| 1389 | parser.add_argument("-d", "--discard-report", |
| 1390 | help="Output a CSV spreadhseet showing tests that were skipped " |
| 1391 | "and why") |
| 1392 | parser.add_argument("-y", "--dry-run", action="store_true", |
| 1393 | help="Create the filtered list of test cases, but don't actually " |
| 1394 | "run them. Useful if you're just interested in " |
| 1395 | "--discard-report") |
| 1396 | |
| 1397 | parser.add_argument("-r", "--release", action="store_true", |
| 1398 | help="Update the benchmark database with the results of this test " |
| 1399 | "run. Intended to be run by CI when tagging an official " |
| 1400 | "release. This database is used as a basis for comparison " |
| 1401 | "when looking for deltas in metrics such as footprint") |
| 1402 | parser.add_argument("-w", "--warnings-as-errors", action="store_true", |
| 1403 | help="Treat warning conditions as errors") |
| 1404 | parser.add_argument("-v", "--verbose", action="count", default=0, |
| 1405 | help="Emit debugging information, call multiple times to increase " |
| 1406 | "verbosity") |
| 1407 | parser.add_argument("-i", "--inline-logs", action="store_true", |
| 1408 | help="Upon test failure, print relevant log data to stdout " |
| 1409 | "instead of just a path to it") |
| 1410 | parser.add_argument("-m", "--last-metrics", action="store_true", |
| 1411 | help="Instead of comparing metrics from the last --release, " |
| 1412 | "compare with the results of the previous sanity check " |
| 1413 | "invocation") |
| 1414 | parser.add_argument("-u", "--no-update", action="store_true", |
| 1415 | help="do not update the results of the last run of the sanity " |
| 1416 | "checks") |
| 1417 | parser.add_argument("-b", "--build-only", action="store_true", |
| 1418 | help="Only build the code, do not execute any of it in QEMU") |
| 1419 | parser.add_argument("-j", "--jobs", type=int, |
| 1420 | help="Number of cores to use when building, defaults to " |
| 1421 | "number of CPUs * 2") |
| 1422 | parser.add_argument("-H", "--footprint-threshold", type=float, default=5, |
| 1423 | help="When checking test case footprint sizes, warn the user if " |
| 1424 | "the new app size is greater then the specified percentage " |
| 1425 | "from the last release. Default is 5. 0 to warn on any " |
| 1426 | "increase on app size") |
Andrew Boie | ea7928f | 2015-08-14 14:27:38 -0700 | [diff] [blame] | 1427 | parser.add_argument("-D", "--all-deltas", action="store_true", |
| 1428 | help="Show all footprint deltas, positive or negative. Implies " |
| 1429 | "--footprint-threshold=0") |
Andrew Boie | 6acbe63 | 2015-07-17 12:03:52 -0700 | [diff] [blame] | 1430 | parser.add_argument("-O", "--outdir", |
| 1431 | default="%s/sanity-out" % ZEPHYR_BASE, |
| 1432 | help="Output directory for logs and binaries.") |
Andrew Boie | ae9e7f7b | 2015-07-31 12:26:12 -0700 | [diff] [blame] | 1433 | parser.add_argument("-n", "--no-clean", action="store_true", |
| 1434 | help="Do not delete the outdir before building. Will result in " |
| 1435 | "faster compilation since builds will be incremental") |
Andrew Boie | 6acbe63 | 2015-07-17 12:03:52 -0700 | [diff] [blame] | 1436 | parser.add_argument("-T", "--testcase-root", |
| 1437 | default="%s/samples" % ZEPHYR_BASE, |
| 1438 | help="Base directory to recursively search for test cases. All " |
| 1439 | "testcase.ini files under here will be processed") |
| 1440 | parser.add_argument("-A", "--arch-root", |
| 1441 | default="%s/scripts/sanity_chk/arches" % ZEPHYR_BASE, |
| 1442 | help="Directory to search for arch configuration files. All .ini " |
| 1443 | "files in the directory will be processed.") |
Andrew Boie | bbd670c | 2015-08-17 13:16:11 -0700 | [diff] [blame] | 1444 | parser.add_argument("-z", "--size", action="append", |
| 1445 | help="Don't run sanity checks. Instead, produce a report to " |
| 1446 | "stdout detailing RAM/ROM sizes on the specified filenames. " |
| 1447 | "All other command line arguments ignored.") |
Andrew Boie | 6acbe63 | 2015-07-17 12:03:52 -0700 | [diff] [blame] | 1448 | |
| 1449 | return parser.parse_args() |
| 1450 | |
| 1451 | def log_info(filename): |
| 1452 | filename = os.path.relpath(filename) |
| 1453 | if INLINE_LOGS: |
| 1454 | print "{:-^100}".format(filename) |
| 1455 | with open(filename) as fp: |
| 1456 | sys.stdout.write(fp.read()) |
| 1457 | print "{:-^100}".format(filename) |
| 1458 | else: |
| 1459 | print "\tsee: " + COLOR_YELLOW + filename + COLOR_NORMAL |
| 1460 | |
| 1461 | def terse_test_cb(instances, goals, goal): |
| 1462 | total_tests = len(goals) |
| 1463 | total_done = 0 |
| 1464 | total_failed = 0 |
| 1465 | |
| 1466 | for k, g in goals.iteritems(): |
| 1467 | if g.finished: |
| 1468 | total_done += 1 |
| 1469 | if g.failed: |
| 1470 | total_failed += 1 |
| 1471 | |
| 1472 | if goal.failed: |
| 1473 | i = instances[goal.name] |
| 1474 | info("\n\n{:<25} {:<50} {}FAILED{}: {}".format(i.platform.name, |
| 1475 | i.test.name, COLOR_RED, COLOR_NORMAL, goal.reason)) |
| 1476 | log_info(goal.get_error_log()) |
| 1477 | info("") |
| 1478 | |
| 1479 | sys.stdout.write("\rtotal complete: %s%3d/%3d%s failed: %s%3d%s" % ( |
| 1480 | COLOR_GREEN, total_done, total_tests, COLOR_NORMAL, |
| 1481 | COLOR_RED if total_failed > 0 else COLOR_NORMAL, |
| 1482 | total_failed, COLOR_NORMAL)) |
| 1483 | sys.stdout.flush() |
| 1484 | |
| 1485 | def chatty_test_cb(instances, goals, goal): |
| 1486 | i = instances[goal.name] |
| 1487 | |
| 1488 | if VERBOSE < 2 and not goal.finished: |
| 1489 | return |
| 1490 | |
| 1491 | if goal.failed: |
| 1492 | status = COLOR_RED + "FAILED" + COLOR_NORMAL + ": " + goal.reason |
| 1493 | elif goal.finished: |
| 1494 | status = COLOR_GREEN + "PASSED" + COLOR_NORMAL |
| 1495 | else: |
| 1496 | status = goal.make_state |
| 1497 | |
| 1498 | info("{:<25} {:<50} {}".format(i.platform.name, i.test.name, status)) |
| 1499 | if goal.failed: |
| 1500 | log_info(goal.get_error_log()) |
| 1501 | |
Andrew Boie | bbd670c | 2015-08-17 13:16:11 -0700 | [diff] [blame] | 1502 | |
| 1503 | def size_report(sc): |
| 1504 | info(sc.filename) |
Andrew Boie | 73b4ee6 | 2015-10-07 11:33:22 -0700 | [diff] [blame] | 1505 | info("SECTION NAME VMA LMA SIZE HEX SZ TYPE") |
Andrew Boie | 9882dcd | 2015-10-07 14:25:51 -0700 | [diff] [blame] | 1506 | for i in range(len(sc.sections)): |
| 1507 | v = sc.sections[i] |
| 1508 | |
Andrew Boie | 73b4ee6 | 2015-10-07 11:33:22 -0700 | [diff] [blame] | 1509 | info("%-17s 0x%08x 0x%08x %8d 0x%05x %-7s" % |
| 1510 | (v["name"], v["virt_addr"], v["load_addr"], v["size"], v["size"], |
| 1511 | v["type"])) |
Andrew Boie | 9882dcd | 2015-10-07 14:25:51 -0700 | [diff] [blame] | 1512 | if v["lma_off"] != v["vma_off"]: |
| 1513 | info(" WARNING: LMA and VMA offsets between %s and %s differ: %d vs %d" % |
| 1514 | (sc.sections[i-1]["name"], v["name"], v["lma_off"], |
| 1515 | v["vma_off"])) |
| 1516 | |
Andrew Boie | 73b4ee6 | 2015-10-07 11:33:22 -0700 | [diff] [blame] | 1517 | info("Totals: %d bytes (ROM), %d bytes (RAM)" % |
| 1518 | (sc.rom_size, sc.ram_size)) |
Andrew Boie | bbd670c | 2015-08-17 13:16:11 -0700 | [diff] [blame] | 1519 | info("") |
| 1520 | |
| 1521 | |
Andrew Boie | 6acbe63 | 2015-07-17 12:03:52 -0700 | [diff] [blame] | 1522 | def main(): |
Andrew Boie | 4b18247 | 2015-07-31 12:25:22 -0700 | [diff] [blame] | 1523 | start_time = time.time() |
Andrew Boie | 6acbe63 | 2015-07-17 12:03:52 -0700 | [diff] [blame] | 1524 | global VERBOSE, INLINE_LOGS, PARALLEL |
| 1525 | args = parse_arguments() |
Andrew Boie | bbd670c | 2015-08-17 13:16:11 -0700 | [diff] [blame] | 1526 | |
| 1527 | if args.size: |
| 1528 | for fn in args.size: |
| 1529 | size_report(SizeCalculator(fn)) |
| 1530 | sys.exit(0) |
| 1531 | |
Andrew Boie | 6acbe63 | 2015-07-17 12:03:52 -0700 | [diff] [blame] | 1532 | VERBOSE += args.verbose |
| 1533 | INLINE_LOGS = args.inline_logs |
| 1534 | if args.jobs: |
| 1535 | PARALLEL = args.jobs |
| 1536 | if args.all: |
| 1537 | args.platform = ["all"] |
| 1538 | |
Andrew Boie | ae9e7f7b | 2015-07-31 12:26:12 -0700 | [diff] [blame] | 1539 | if os.path.exists(args.outdir) and not args.no_clean: |
Andrew Boie | 6acbe63 | 2015-07-17 12:03:52 -0700 | [diff] [blame] | 1540 | info("Cleaning output directory " + args.outdir) |
| 1541 | shutil.rmtree(args.outdir) |
| 1542 | |
| 1543 | ts = TestSuite(args.arch_root, args.testcase_root, args.outdir) |
| 1544 | discards = ts.apply_filters(args.platform, args.arch, args.tag, args.config, |
| 1545 | args.test, args.only_failed) |
| 1546 | |
| 1547 | if args.discard_report: |
| 1548 | ts.discard_report(args.discard_report) |
| 1549 | |
| 1550 | if VERBOSE: |
| 1551 | for i, reason in discards.iteritems(): |
| 1552 | debug("{:<25} {:<50} {}SKIPPED{}: {}".format(i.platform.name, |
| 1553 | i.test.name, COLOR_YELLOW, COLOR_NORMAL, reason)) |
| 1554 | |
| 1555 | info("%d tests selected, %d tests discarded due to filters" % |
| 1556 | (len(ts.instances), len(discards))) |
| 1557 | |
| 1558 | if args.dry_run: |
| 1559 | return |
| 1560 | |
| 1561 | if VERBOSE or not TERMINAL: |
| 1562 | goals = ts.execute(chatty_test_cb, ts.instances, args.build_only) |
| 1563 | else: |
| 1564 | goals = ts.execute(terse_test_cb, ts.instances, args.build_only) |
| 1565 | print |
| 1566 | |
| 1567 | deltas = ts.compare_metrics(LAST_SANITY if args.last_metrics |
| 1568 | else RELEASE_DATA) |
| 1569 | warnings = 0 |
| 1570 | if deltas: |
Andrew Boie | ea7928f | 2015-08-14 14:27:38 -0700 | [diff] [blame] | 1571 | for i, metric, value, delta, lower_better in deltas: |
| 1572 | if not args.all_deltas and ((delta < 0 and lower_better) or |
| 1573 | (delta > 0 and not lower_better)): |
Andrew Boie | 6acbe63 | 2015-07-17 12:03:52 -0700 | [diff] [blame] | 1574 | continue |
| 1575 | |
Andrew Boie | ea7928f | 2015-08-14 14:27:38 -0700 | [diff] [blame] | 1576 | percentage = (float(delta) / float(value - delta)) |
| 1577 | if not args.all_deltas and (percentage < |
| 1578 | (args.footprint_threshold / 100.0)): |
| 1579 | continue |
| 1580 | |
| 1581 | info("{:<25} {:<50} {}{}{}: {} is now {} {:+.2%}".format( |
| 1582 | i.platform.name, i.test.name, COLOR_YELLOW, |
| 1583 | "INFO" if args.all_deltas else "WARNING", COLOR_NORMAL, |
Andrew Boie | 6acbe63 | 2015-07-17 12:03:52 -0700 | [diff] [blame] | 1584 | metric, value, percentage)) |
| 1585 | warnings += 1 |
| 1586 | |
| 1587 | if warnings: |
| 1588 | info("Deltas based on metrics from last %s" % |
| 1589 | ("release" if not args.last_metrics else "run")) |
| 1590 | |
| 1591 | failed = 0 |
| 1592 | for name, goal in goals.iteritems(): |
| 1593 | if goal.failed: |
| 1594 | failed += 1 |
Andrew Boie | 73b4ee6 | 2015-10-07 11:33:22 -0700 | [diff] [blame] | 1595 | elif goal.metrics["unrecognized"]: |
| 1596 | info("%sFAILED%s: %s has unrecognized binary sections: %s" % |
| 1597 | (COLOR_RED, COLOR_NORMAL, goal.name, |
| 1598 | str(goal.metrics["unrecognized"]))) |
| 1599 | failed += 1 |
Andrew Boie | 9882dcd | 2015-10-07 14:25:51 -0700 | [diff] [blame] | 1600 | elif goal.metrics["mismatched"]: |
| 1601 | info("%sFAILED%s: %s has mismatched section offsets for: %s" % |
| 1602 | (COLOR_RED, COLOR_NORMAL, goal.name, |
| 1603 | str(goal.metrics["mismatched"]))) |
| 1604 | failed += 1 |
Andrew Boie | 6acbe63 | 2015-07-17 12:03:52 -0700 | [diff] [blame] | 1605 | |
Andrew Boie | 4b18247 | 2015-07-31 12:25:22 -0700 | [diff] [blame] | 1606 | info("%s%d of %d%s tests passed with %s%d%s warnings in %d seconds" % |
Andrew Boie | 6acbe63 | 2015-07-17 12:03:52 -0700 | [diff] [blame] | 1607 | (COLOR_RED if failed else COLOR_GREEN, len(goals) - failed, |
| 1608 | len(goals), COLOR_NORMAL, COLOR_YELLOW if warnings else COLOR_NORMAL, |
Andrew Boie | 4b18247 | 2015-07-31 12:25:22 -0700 | [diff] [blame] | 1609 | warnings, COLOR_NORMAL, time.time() - start_time)) |
Andrew Boie | 6acbe63 | 2015-07-17 12:03:52 -0700 | [diff] [blame] | 1610 | |
| 1611 | if args.testcase_report: |
| 1612 | ts.testcase_report(args.testcase_report) |
| 1613 | if not args.no_update: |
| 1614 | ts.testcase_report(LAST_SANITY) |
| 1615 | if args.release: |
| 1616 | ts.testcase_report(RELEASE_DATA) |
| 1617 | |
| 1618 | if failed or (warnings and args.warnings_as_errors): |
| 1619 | sys.exit(1) |
| 1620 | |
| 1621 | if __name__ == "__main__": |
| 1622 | main() |
| 1623 | |