twister: add --report-summary switch
Added a switch that show failed/error report from the last run.
Default shows all items found. However, you can specify the number of items
(e.g. --report-summary 15).
It also works well with the --outdir switch
Signed-off-by: Kamil Paszkiet <kamilx.paszkiet@intel.com>
diff --git a/scripts/pylib/twister/twisterlib/environment.py b/scripts/pylib/twister/twisterlib/environment.py
index 09ddaa9..ea1eb18 100644
--- a/scripts/pylib/twister/twisterlib/environment.py
+++ b/scripts/pylib/twister/twisterlib/environment.py
@@ -603,6 +603,12 @@
""")
parser.add_argument(
+ "--report-summary", action="store", nargs='?', type=int, const=0,
+ help="Show failed/error report from latest run. Default shows all items found. "
+ "However, you can specify the number of items (e.g. --report-summary 15). "
+ "It also works well with the --outdir switch.")
+
+ parser.add_argument(
"--report-suffix",
help="""Add a suffix to all generated file names, for example to add a
version or a commit ID.
diff --git a/scripts/pylib/twister/twisterlib/reports.py b/scripts/pylib/twister/twisterlib/reports.py
index 254f249..c02bda7 100644
--- a/scripts/pylib/twister/twisterlib/reports.py
+++ b/scripts/pylib/twister/twisterlib/reports.py
@@ -26,6 +26,7 @@
self.env = env
self.timestamp = datetime.now().isoformat()
self.outdir = os.path.abspath(env.options.outdir)
+ self.instance_fail_count = plan.instance_fail_count
@staticmethod
def process_log(log_file):
@@ -432,20 +433,36 @@
(report if not last_metrics else "the last twister run.")))
def synopsis(self):
+ if self.env.options.report_summary == 0:
+ count = self.instance_fail_count
+ log_txt = f"The following issues were found (showing the all {count} items):"
+ elif self.env.options.report_summary:
+ count = self.env.options.report_summary
+ log_txt = f"The following issues were found "
+ if count > self.instance_fail_count:
+ log_txt += f"(presenting {self.instance_fail_count} out of the {count} items requested):"
+ else:
+ log_txt += f"(showing the {count} of {self.instance_fail_count} items):"
+ else:
+ count = 10
+ log_txt = f"The following issues were found (showing the top {count} items):"
cnt = 0
example_instance = None
detailed_test_id = self.env.options.detailed_test_id
for instance in self.instances.values():
if instance.status not in ["passed", "filtered", "skipped"]:
- cnt = cnt + 1
+ cnt += 1
if cnt == 1:
logger.info("-+" * 40)
- logger.info("The following issues were found (showing the top 10 items):")
+ logger.info(log_txt)
logger.info(f"{cnt}) {instance.testsuite.name} on {instance.platform.name} {instance.status} ({instance.reason})")
example_instance = instance
- if cnt == 10:
+ if cnt == count:
break
+ if cnt == 0 and self.env.options.report_summary is not None:
+ logger.info("-+" * 40)
+ logger.info(f"No errors/fails found")
if cnt and example_instance:
logger.info("")
diff --git a/scripts/pylib/twister/twisterlib/testplan.py b/scripts/pylib/twister/twisterlib/testplan.py
index d27e719..83c5d6b 100755
--- a/scripts/pylib/twister/twisterlib/testplan.py
+++ b/scripts/pylib/twister/twisterlib/testplan.py
@@ -17,6 +17,7 @@
import shutil
import random
import snippets
+from colorama import Fore
from pathlib import Path
from argparse import Namespace
@@ -107,6 +108,7 @@
self.default_platforms = []
self.load_errors = 0
self.instances = dict()
+ self.instance_fail_count = 0
self.warnings = 0
self.scenarios = []
@@ -217,7 +219,7 @@
else:
last_run = os.path.join(self.options.outdir, "twister.json")
- if self.options.only_failed:
+ if self.options.only_failed or self.options.report_summary is not None:
self.load_from_file(last_run)
self.selected_platforms = set(p.platform.name for p in self.instances.values())
elif self.options.load_tests:
@@ -581,72 +583,83 @@
instance.add_filter("Not under quarantine", Filters.QUARANTINE)
def load_from_file(self, file, filter_platform=[]):
- with open(file, "r") as json_test_plan:
- jtp = json.load(json_test_plan)
- instance_list = []
- for ts in jtp.get("testsuites", []):
- logger.debug(f"loading {ts['name']}...")
- testsuite = ts["name"]
+ try:
+ with open(file, "r") as json_test_plan:
+ jtp = json.load(json_test_plan)
+ instance_list = []
+ for ts in jtp.get("testsuites", []):
+ logger.debug(f"loading {ts['name']}...")
+ testsuite = ts["name"]
- platform = self.get_platform(ts["platform"])
- if filter_platform and platform.name not in filter_platform:
- continue
- instance = TestInstance(self.testsuites[testsuite], platform, self.env.outdir)
- if ts.get("run_id"):
- instance.run_id = ts.get("run_id")
+ platform = self.get_platform(ts["platform"])
+ if filter_platform and platform.name not in filter_platform:
+ continue
+ instance = TestInstance(self.testsuites[testsuite], platform, self.env.outdir)
+ if ts.get("run_id"):
+ instance.run_id = ts.get("run_id")
- if self.options.device_testing:
- tfilter = 'runnable'
- else:
- tfilter = 'buildable'
- instance.run = instance.check_runnable(
- self.options.enable_slow,
- tfilter,
- self.options.fixture,
- self.hwm
- )
+ if self.options.device_testing:
+ tfilter = 'runnable'
+ else:
+ tfilter = 'buildable'
+ instance.run = instance.check_runnable(
+ self.options.enable_slow,
+ tfilter,
+ self.options.fixture,
+ self.hwm
+ )
- instance.metrics['handler_time'] = ts.get('execution_time', 0)
- instance.metrics['used_ram'] = ts.get("used_ram", 0)
- instance.metrics['used_rom'] = ts.get("used_rom",0)
- instance.metrics['available_ram'] = ts.get('available_ram', 0)
- instance.metrics['available_rom'] = ts.get('available_rom', 0)
+ instance.metrics['handler_time'] = ts.get('execution_time', 0)
+ instance.metrics['used_ram'] = ts.get("used_ram", 0)
+ instance.metrics['used_rom'] = ts.get("used_rom",0)
+ instance.metrics['available_ram'] = ts.get('available_ram', 0)
+ instance.metrics['available_rom'] = ts.get('available_rom', 0)
- status = ts.get('status', None)
- reason = ts.get("reason", "Unknown")
- if status in ["error", "failed"]:
- instance.status = None
- instance.reason = None
- instance.retries += 1
- # test marked as passed (built only) but can run when
- # --test-only is used. Reset status to capture new results.
- elif status == 'passed' and instance.run and self.options.test_only:
- instance.status = None
- instance.reason = None
- else:
- instance.status = status
- instance.reason = reason
+ status = ts.get('status', None)
+ reason = ts.get("reason", "Unknown")
+ if status in ["error", "failed"]:
+ if self.options.report_summary is not None:
+ if status == "error": status = "ERROR"
+ elif status == "failed": status = "FAILED"
+ instance.status = Fore.RED + status + Fore.RESET
+ instance.reason = reason
+ self.instance_fail_count += 1
+ else:
+ instance.status = None
+ instance.reason = None
+ instance.retries += 1
+ # test marked as passed (built only) but can run when
+ # --test-only is used. Reset status to capture new results.
+ elif status == 'passed' and instance.run and self.options.test_only:
+ instance.status = None
+ instance.reason = None
+ else:
+ instance.status = status
+ instance.reason = reason
- self.handle_quarantined_tests(instance, platform)
+ self.handle_quarantined_tests(instance, platform)
- for tc in ts.get('testcases', []):
- identifier = tc['identifier']
- tc_status = tc.get('status', None)
- tc_reason = None
- # we set reason only if status is valid, it might have been
- # reset above...
- if instance.status:
- tc_reason = tc.get('reason')
- if tc_status:
- case = instance.set_case_status_by_name(identifier, tc_status, tc_reason)
- case.duration = tc.get('execution_time', 0)
- if tc.get('log'):
- case.output = tc.get('log')
+ for tc in ts.get('testcases', []):
+ identifier = tc['identifier']
+ tc_status = tc.get('status', None)
+ tc_reason = None
+ # we set reason only if status is valid, it might have been
+ # reset above...
+ if instance.status:
+ tc_reason = tc.get('reason')
+ if tc_status:
+ case = instance.set_case_status_by_name(identifier, tc_status, tc_reason)
+ case.duration = tc.get('execution_time', 0)
+ if tc.get('log'):
+ case.output = tc.get('log')
- instance.create_overlay(platform, self.options.enable_asan, self.options.enable_ubsan, self.options.enable_coverage, self.options.coverage_platform)
- instance_list.append(instance)
- self.add_instances(instance_list)
+ instance.create_overlay(platform, self.options.enable_asan, self.options.enable_ubsan, self.options.enable_coverage, self.options.coverage_platform)
+ instance_list.append(instance)
+ self.add_instances(instance_list)
+ except FileNotFoundError as e:
+ logger.error(f"{e}")
+ return 1
def apply_filters(self, **kwargs):
diff --git a/scripts/pylib/twister/twisterlib/twister_main.py b/scripts/pylib/twister/twisterlib/twister_main.py
index 3065654..f61880c 100644
--- a/scripts/pylib/twister/twisterlib/twister_main.py
+++ b/scripts/pylib/twister/twisterlib/twister_main.py
@@ -73,7 +73,7 @@
previous_results = None
# Cleanup
- if options.no_clean or options.only_failed or options.test_only:
+ if options.no_clean or options.only_failed or options.test_only or options.report_summary is not None:
if os.path.exists(options.outdir):
print("Keeping artifacts untouched")
elif options.last_metrics:
@@ -160,6 +160,13 @@
report.json_report(options.save_tests)
return 0
+ if options.report_summary is not None:
+ if options.report_summary < 0:
+ logger.error("The report summary value cannot be less than 0")
+ return 1
+ report.synopsis()
+ return 0
+
if options.device_testing and not options.build_only:
print("\nDevice testing on:")
hwm.dump(filtered=tplan.selected_platforms)