scripts: tests: sanitycheck: Add testcases for TestCase class

test_testinstance.py: Add testcases to scan file and path for
sub testcases
test_data/testcases: Also added the ztest test files
test_testsuite_class.py: changed get_all_tests() to match
count of sub testcases in ztest files

Signed-off-by: Spoorthy Priya Yerabolu <spoorthy.priya.yerabolu@intel.com>
diff --git a/scripts/tests/sanitycheck/test_data/testcases/tests/test_a/test_ztest_error.c b/scripts/tests/sanitycheck/test_data/testcases/tests/test_a/test_ztest_error.c
new file mode 100644
index 0000000..ae0df15
--- /dev/null
+++ b/scripts/tests/sanitycheck/test_data/testcases/tests/test_a/test_ztest_error.c
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2020 Intel Corporation
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+static void test_1b(void)
+{
+	ztest_test_skip();
+}
+
+void test_main(void)
+{
+	#ifdef TEST_feature1
+		ztest_test_suite(feature1,
+		ztest_unit_test(1a), ztest_unit_test(test_1b),
+		ztest_unit_test(test_1c)
+	);
+	#endif
+	#ifdef TEST_feature2
+		ztest_test_suite(feature2,
+		ztest_unit_test(test_2a),
+		ztest_unit_test(test_2b)
+		);
+		ztest_run_test_suite(feature2);
+	#endif
+}
diff --git a/scripts/tests/sanitycheck/test_data/testcases/tests/test_a/test_ztest_error_1.c b/scripts/tests/sanitycheck/test_data/testcases/tests/test_a/test_ztest_error_1.c
new file mode 100644
index 0000000..a7b5515
--- /dev/null
+++ b/scripts/tests/sanitycheck/test_data/testcases/tests/test_a/test_ztest_error_1.c
@@ -0,0 +1,14 @@
+/*
+ * Copyright (c) 2020 Intel Corporation
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+ztest_test_suite(feature3,
+				ztest_unit_test(test_unit_1a),
+#ifdef CONFIG_WHATEVER
+				ztest_unit_test(test_unit_1b),
+#endif
+				ztest_unit_test(test_Unit_1c)
+				);
+				ztest_run_test_suite(feature3);
diff --git a/scripts/tests/sanitycheck/test_data/testcases/tests/test_ztest.c b/scripts/tests/sanitycheck/test_data/testcases/tests/test_ztest.c
new file mode 100644
index 0000000..effb2fe
--- /dev/null
+++ b/scripts/tests/sanitycheck/test_data/testcases/tests/test_ztest.c
@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) 2020 Intel Corporation
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
+	ztest_test_suite(test_api,
+			 ztest_1cpu_unit_test(test_a) /* comment! */,
+/* comment */ztest_1cpu_unit_test(test_b),
+			 ztest_1cpu_unit_test(test_c),
+			 ztest_unit_test(test_unit_a), ztest_unit_test(test_unit_b),
+			 ztest_1cpu_unit_test(
+				test_newline),
+			 ztest_1cpu_unit_test(test_test_test_aa),
+			 ztest_user_unit_test(test_user),
+			 ztest_1cpu_unit_test(test_last));
+	ztest_run_test_suite(test_api);
diff --git a/scripts/tests/sanitycheck/test_testinstance.py b/scripts/tests/sanitycheck/test_testinstance.py
index 783b05d..798d886 100644
--- a/scripts/tests/sanitycheck/test_testinstance.py
+++ b/scripts/tests/sanitycheck/test_testinstance.py
@@ -13,7 +13,7 @@
 
 ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
 sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/sanity_chk"))
-from sanitylib import TestInstance, BuildError
+from sanitylib import TestInstance, BuildError, TestCase, SanityCheckException
 
 
 TESTDATA_1 = [
@@ -62,7 +62,7 @@
 ]
 
 @pytest.mark.parametrize("enable_asan, enable_coverage, coverage_platform, platform_type, expected_content", TESTDATA_2)
-def test_create_overlay(class_testsuite, all_testcases_dict, platforms_list, test_data, enable_asan, enable_coverage, coverage_platform, platform_type, expected_content):
+def test_create_overlay(class_testsuite, all_testcases_dict, platforms_list, enable_asan, enable_coverage, coverage_platform, platform_type, expected_content):
     """Test correct content is written to testcase_extra.conf based on if conditions
     TO DO: Add extra_configs to the input list"""
     class_testsuite.testcases = all_testcases_dict
@@ -84,3 +84,63 @@
 
     with pytest.raises(BuildError):
         assert testinstance.calculate_sizes() == "Missing/multiple output ELF binary"
+
+TESTDATA_3 = [
+    (ZEPHYR_BASE + '/scripts/tests/sanitycheck/test_data/testcases', ZEPHYR_BASE, '/scripts/tests/sanitycheck/test_data/testcases/tests/test_a/test_a.check_1', '/scripts/tests/sanitycheck/test_data/testcases/tests/test_a/test_a.check_1'),
+    (ZEPHYR_BASE, '.', 'test_a.check_1', 'test_a.check_1'),
+    (ZEPHYR_BASE, '/scripts/tests/sanitycheck/test_data/testcases/test_b', 'test_b.check_1', '/scripts/tests/sanitycheck/test_data/testcases/test_b/test_b.check_1'),
+    (os.path.join(ZEPHYR_BASE, '/scripts/tests'), '.', 'test_b.check_1', 'test_b.check_1'),
+    (os.path.join(ZEPHYR_BASE, '/scripts/tests'), '.', '.', '.'),
+    (ZEPHYR_BASE, '.', 'test_a.check_1.check_2', 'test_a.check_1.check_2'),
+]
+@pytest.mark.parametrize("testcase_root, workdir, name, expected", TESTDATA_3)
+def test_get_unique(testcase_root, workdir, name, expected):
+    '''Test to check if the unique name is given for each testcase root and workdir'''
+    unique = TestCase(testcase_root, workdir, name)
+    assert unique.name == expected
+
+TESTDATA_4 = [
+    (ZEPHYR_BASE, '.', 'test_c', 'Tests should reference the category and subsystem with a dot as a separator.'),
+    (os.path.join(ZEPHYR_BASE, '/scripts/tests'), '.', '', 'Tests should reference the category and subsystem with a dot as a separator.'),
+]
+@pytest.mark.parametrize("testcase_root, workdir, name, exception", TESTDATA_4)
+def test_get_unique_exception(testcase_root, workdir, name, exception):
+    '''Test to check if tests reference the category and subsystem with a dot as a separator'''
+
+    with pytest.raises(SanityCheckException):
+        unique = TestCase(testcase_root, workdir, name)
+        assert unique == exception
+
+TESTDATA_5 = [
+    ("testcases/tests/test_ztest.c", None, ['a', 'c', 'unit_a', 'newline', 'aa', 'user', 'last']),
+    ("testcases/tests/test_a/test_ztest_error.c", "Found a test that does not start with test_", ['1a', '1c', '2a', '2b']),
+    ("testcases/tests/test_a/test_ztest_error_1.c", "found invalid #ifdef, #endif in ztest_test_suite()", ['unit_1a', 'unit_1b', 'Unit_1c']),
+]
+
+@pytest.mark.parametrize("test_file, expected_warnings, expected_subcases", TESTDATA_5)
+def test_scan_file(test_data, test_file, expected_warnings, expected_subcases):
+    '''Testing scan_file method with different ztest files for warnings and results'''
+
+    testcase = TestCase("/scripts/tests/sanitycheck/test_data/testcases/tests", ".", "test_a.check_1")
+
+    results, warnings = testcase.scan_file(os.path.join(test_data, test_file))
+    assert sorted(results) == sorted(expected_subcases)
+    assert warnings == expected_warnings
+
+
+TESTDATA_6 = [
+    ("testcases/tests", ['a', 'c', 'unit_a', 'newline', 'aa', 'user', 'last']),
+    ("testcases/tests/test_a", ['unit_1a', 'unit_1b', 'Unit_1c', '1a', '1c', '2a', '2b']),
+]
+
+@pytest.mark.parametrize("test_path, expected_subcases", TESTDATA_6)
+def test_subcases(test_data, test_path, expected_subcases):
+    '''Testing scan path and parse subcases methods for expected subcases'''
+    testcase = TestCase("/scripts/tests/sanitycheck/test_data/testcases/tests", ".", "test_a.check_1")
+
+    subcases = testcase.scan_path(os.path.join(test_data, test_path))
+    assert sorted(subcases) == sorted(expected_subcases)
+
+    testcase.id = "test_id"
+    testcase.parse_subcases(test_data + test_path)
+    assert sorted(testcase.cases) == [testcase.id + '.' + x for x in sorted(expected_subcases)]
diff --git a/scripts/tests/sanitycheck/test_testsuite_class.py b/scripts/tests/sanitycheck/test_testsuite_class.py
index 22eb6ad..f2d6dbe 100755
--- a/scripts/tests/sanitycheck/test_testsuite_class.py
+++ b/scripts/tests/sanitycheck/test_testsuite_class.py
@@ -58,11 +58,9 @@
 def test_get_all_testcases(class_testsuite, all_testcases_dict):
     """ Testing get_all_testcases function of TestSuite class in Sanitycheck """
     class_testsuite.testcases = all_testcases_dict
-    expected_tests = ['test_b.check_1', 'test_b.check_2',
-                      'test_c.check_1', 'test_c.check_2',
-                      'test_a.check_1', 'test_a.check_2',
-                      'sample_test.app']
-    assert len(class_testsuite.get_all_tests()) == 7
+    expected_tests = ['sample_test.app', 'test_a.check_1.1a', 'test_a.check_1.1c',
+                    'test_a.check_1.2a', 'test_a.check_1.2b', 'test_a.check_1.Unit_1c', 'test_a.check_1.unit_1a', 'test_a.check_1.unit_1b', 'test_a.check_2.1a', 'test_a.check_2.1c', 'test_a.check_2.2a', 'test_a.check_2.2b', 'test_a.check_2.Unit_1c', 'test_a.check_2.unit_1a', 'test_a.check_2.unit_1b', 'test_b.check_1', 'test_b.check_2', 'test_c.check_1', 'test_c.check_2']
+    assert len(class_testsuite.get_all_tests()) == 19
     assert sorted(class_testsuite.get_all_tests()) == sorted(expected_tests)
 
 def test_get_toolchain(class_testsuite, monkeypatch, capsys):