pw_unit_test: Add TestRecord of Test Results

In order to view passing and failing test cases at the callsite of
run_tests in pw_unit_test, return a TestRecord dataclass that contains
a list of the passing, failing and disabled tests.

Change-Id: I2de4bcd6e0042e86582ef82ae73b71dc5cdf3951
Reviewed-on: https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/166273
Commit-Queue: William Abajian <williamabajian@google.com>
Reviewed-by: Wyatt Hepler <hepler@google.com>
diff --git a/pw_system/py/pw_system/device.py b/pw_system/py/pw_system/device.py
index 8d8f1a2..f8cfe31 100644
--- a/pw_system/py/pw_system/device.py
+++ b/pw_system/py/pw_system/device.py
@@ -33,7 +33,7 @@
 from pw_thread_protos import thread_pb2
 from pw_tokenizer import detokenize
 from pw_tokenizer.proto import decode_optionally_tokenized
-from pw_unit_test.rpc import run_tests as pw_unit_test_run_tests
+from pw_unit_test.rpc import run_tests as pw_unit_test_run_tests, TestRecord
 
 # Internal log for troubleshooting this tool (the console).
 _LOG = logging.getLogger('tools')
@@ -144,7 +144,7 @@
         """Returns an object for accessing services on the specified channel."""
         return next(iter(self.client.client.channels())).rpcs
 
-    def run_tests(self, timeout_s: Optional[float] = 5) -> bool:
+    def run_tests(self, timeout_s: Optional[float] = 5) -> TestRecord:
         """Runs the unit tests on this device."""
         return pw_unit_test_run_tests(self.rpcs, timeout_s=timeout_s)
 
diff --git a/pw_unit_test/docs.rst b/pw_unit_test/docs.rst
index 1ab6eee..5c3415a 100644
--- a/pw_unit_test/docs.rst
+++ b/pw_unit_test/docs.rst
@@ -683,7 +683,8 @@
 All tests flashed to an attached device can be run via python by calling
 ``pw_unit_test.rpc.run_tests()`` with a RPC client services object that has
 the unit testing RPC service enabled. By default, the results will output via
-logging.
+logging. This method returns a ``TestRecord`` dataclass instance, containing
+the results of the test run.
 
 .. code:: python
 
@@ -699,7 +700,7 @@
 pw_unit_test.rpc
 ----------------
 .. automodule:: pw_unit_test.rpc
-   :members: EventHandler, run_tests
+   :members: EventHandler, run_tests, TestRecord
 
 ----------------------------
 Module Configuration Options
diff --git a/pw_unit_test/py/pw_unit_test/rpc.py b/pw_unit_test/py/pw_unit_test/rpc.py
index ac1c56a..459071e 100644
--- a/pw_unit_test/py/pw_unit_test/rpc.py
+++ b/pw_unit_test/py/pw_unit_test/rpc.py
@@ -17,7 +17,7 @@
 import abc
 from dataclasses import dataclass
 import logging
-from typing import Iterable
+from typing import Iterable, List, Tuple
 
 from pw_rpc.client import Services
 from pw_rpc.callback_client import OptionalTimeout, UseDefault
@@ -133,13 +133,28 @@
         log('        Actual: %s', expectation.evaluated_expression)
 
 
+@dataclass(frozen=True)
+class TestRecord:
+    """Class for recording test results."""
+
+    passing_tests: Tuple[TestCase, ...]
+    failing_tests: Tuple[TestCase, ...]
+    disabled_tests: Tuple[TestCase, ...]
+
+    def all_tests_passed(self) -> bool:
+        return not self.failing_tests
+
+    def __bool__(self) -> bool:
+        return self.all_tests_passed()
+
+
 def run_tests(
     rpcs: Services,
     report_passed_expectations: bool = False,
     test_suites: Iterable[str] = (),
     event_handlers: Iterable[EventHandler] = (LoggingEventHandler(),),
     timeout_s: OptionalTimeout = UseDefault.VALUE,
-) -> bool:
+) -> TestRecord:
     """Runs unit tests on a device over Pigweed RPC.
 
     Calls each of the provided event handlers as test events occur, and returns
@@ -174,39 +189,53 @@
     for event_handler in event_handlers:
         event_handler.run_all_tests_start()
 
-    all_tests_passed = False
+    passing_tests: List[TestCase] = []
+    failing_tests: List[TestCase] = []
+    disabled_tests: List[TestCase] = []
 
     for response in test_responses:
-        if response.HasField('test_case_start'):
-            raw_test_case = response.test_case_start
-            current_test_case = _test_case(raw_test_case)
-
-        for event_handler in event_handlers:
-            if response.HasField('test_run_start'):
+        if response.HasField('test_run_start'):
+            for event_handler in event_handlers:
                 event_handler.run_all_tests_start()
-            elif response.HasField('test_run_end'):
+        elif response.HasField('test_run_end'):
+            for event_handler in event_handlers:
                 event_handler.run_all_tests_end(
                     response.test_run_end.passed, response.test_run_end.failed
                 )
-                if response.test_run_end.failed == 0:
-                    all_tests_passed = True
-            elif response.HasField('test_case_start'):
+            assert len(passing_tests) == response.test_run_end.passed
+            assert len(failing_tests) == response.test_run_end.failed
+            test_record = TestRecord(
+                passing_tests=tuple(passing_tests),
+                failing_tests=tuple(failing_tests),
+                disabled_tests=tuple(disabled_tests),
+            )
+        elif response.HasField('test_case_start'):
+            raw_test_case = response.test_case_start
+            current_test_case = _test_case(raw_test_case)
+            for event_handler in event_handlers:
                 event_handler.test_case_start(current_test_case)
-            elif response.HasField('test_case_end'):
-                result = TestCaseResult(response.test_case_end)
+        elif response.HasField('test_case_end'):
+            result = TestCaseResult(response.test_case_end)
+            for event_handler in event_handlers:
                 event_handler.test_case_end(current_test_case, result)
-            elif response.HasField('test_case_disabled'):
-                event_handler.test_case_disabled(
-                    _test_case(response.test_case_disabled)
-                )
-            elif response.HasField('test_case_expectation'):
-                raw_expectation = response.test_case_expectation
-                expectation = TestExpectation(
-                    raw_expectation.expression,
-                    raw_expectation.evaluated_expression,
-                    raw_expectation.line_number,
-                    raw_expectation.success,
-                )
+            if result == TestCaseResult.SUCCESS:
+                passing_tests.append(current_test_case)
+            else:
+                failing_tests.append(current_test_case)
+        elif response.HasField('test_case_disabled'):
+            raw_test_case = response.test_case_disabled
+            current_test_case = _test_case(raw_test_case)
+            for event_handler in event_handlers:
+                event_handler.test_case_disabled(current_test_case)
+            disabled_tests.append(current_test_case)
+        elif response.HasField('test_case_expectation'):
+            raw_expectation = response.test_case_expectation
+            expectation = TestExpectation(
+                raw_expectation.expression,
+                raw_expectation.evaluated_expression,
+                raw_expectation.line_number,
+                raw_expectation.success,
+            )
+            for event_handler in event_handlers:
                 event_handler.test_case_expect(current_test_case, expectation)
-
-    return all_tests_passed
+    return test_record