blob: c638af13dd8790510409d8e3a974750ed1660003 [file] [log] [blame]
#!/usr/bin/env python3
# Copyright (c) 2024 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
import importlib
import mock
import os
import pytest
import sys
import json
from conftest import ZEPHYR_BASE, TEST_DATA, testsuite_filename_mock
from twisterlib.testplan import TestPlan
class TestDummy:
TESTDATA_X = [
("smoke", 5),
("acceptance", 6),
]
@classmethod
def setup_class(cls):
apath = os.path.join(ZEPHYR_BASE, "scripts", "twister")
cls.loader = importlib.machinery.SourceFileLoader("__main__", apath)
cls.spec = importlib.util.spec_from_loader(cls.loader.name, cls.loader)
cls.twister_module = importlib.util.module_from_spec(cls.spec)
@classmethod
def teardown_class(cls):
pass
@pytest.mark.parametrize(
"level, expected_tests", TESTDATA_X, ids=["smoke", "acceptance"]
)
@mock.patch.object(TestPlan, "TESTSUITE_FILENAME", testsuite_filename_mock)
def test_level(self, capfd, out_path, level, expected_tests):
# Select platforms used for the tests
test_platforms = ["qemu_x86", "frdm_k64f"]
# Select test root
path = os.path.join(TEST_DATA, "tests")
config_path = os.path.join(TEST_DATA, "test_config.yaml")
# Set flags for our Twister command as a list of strs
args = (
# Flags related to the generic test setup:
# * Control the level of detail in stdout/err
# * Establish the output directory
# * Select Zephyr tests to use
# * Control whether to only build or build and run aforementioned tests
["-i", "--outdir", out_path, "-T", path, "-y"]
# Flags under test
+ ["--level", level]
# Flags required for the test
+ ["--test-config", config_path]
# Flags related to platform selection
+ [
val
for pair in zip(["-p"] * len(test_platforms), test_platforms)
for val in pair
]
)
# First, provide the args variable as our Twister command line arguments.
# Then, catch the exit code in the sys_exit variable.
with mock.patch.object(sys, "argv", [sys.argv[0]] + args), pytest.raises(
SystemExit
) as sys_exit:
# Execute the Twister call itself.
self.loader.exec_module(self.twister_module)
# Check whether the Twister call succeeded
assert str(sys_exit.value) == "0"
# Access to the test file output
with open(os.path.join(out_path, "testplan.json")) as f:
j = json.load(f)
filtered_j = [
(ts["platform"], ts["name"], tc["identifier"])
for ts in j["testsuites"]
for tc in ts["testcases"]
if "reason" not in tc
]
# Read stdout and stderr to out and err variables respectively
out, err = capfd.readouterr()
# Rewrite the captured buffers to stdout and stderr so the user can still read them
sys.stdout.write(out)
sys.stderr.write(err)
# Test-relevant checks
assert expected_tests == len(filtered_j)