The kunit_mark_skipped() macro marks the current test as "skipped", with the provided reason. The kunit_skip() macro will mark the test as skipped, and abort the test.
The TAP specification supports this "SKIP directive" as a comment after the "ok" / "not ok" for a test. See the "Directives" section of the TAP spec for details: https://testanything.org/tap-specification.html#directives
The 'success' field for KUnit tests is replaced with a kunit_status enum, which can be SUCCESS, FAILURE, or SKIPPED, combined with a 'status_comment' containing information on why a test was skipped.
A new 'kunit_status' test suite is added to test this.
Signed-off-by: David Gow davidgow@google.com --- This change depends on the assertion typechecking fix here: https://lore.kernel.org/linux-kselftest/20210513193204.816681-1-davidgow@goo... Only the first two patches in the series are required.
This is the long-awaited follow-up to the skip tests RFC: https://lore.kernel.org/linux-kselftest/20200513042956.109987-1-davidgow@goo...
There are quite a few changes since that version, principally: - A kunit_status enum is now used, with SKIPPED a distinct state - The kunit_mark_skipped() and kunit_skip() macros now take printf-style format strings. - There is now a kunit_status test suite providing basic tests of this functionality. - The kunit_tool changes have been split into a separate commit. - The example skipped tests have been expanded an moved to their own suite, which is not enabled by KUNIT_ALL_TESTS. - A number of other fixes and changes here and there.
Cheers, -- David
include/kunit/test.h | 68 ++++++++++++++++++++++++++++++++++++++---- lib/kunit/kunit-test.c | 42 +++++++++++++++++++++++++- lib/kunit/test.c | 51 ++++++++++++++++++------------- 3 files changed, 134 insertions(+), 27 deletions(-)
diff --git a/include/kunit/test.h b/include/kunit/test.h index b68c61348121..40b536da027e 100644 --- a/include/kunit/test.h +++ b/include/kunit/test.h @@ -105,6 +105,18 @@ struct kunit; #define KUNIT_SUBTEST_INDENT " " #define KUNIT_SUBSUBTEST_INDENT " "
+/** + * enum kunit_status - Type of result for a test or test suite + * @KUNIT_SUCCESS: Denotes the test suite has not failed nor been skipped + * @KUNIT_FAILURE: Denotes the test has failed. + * @KUNIT_SKIPPED: Denotes the test has been skipped. + */ +enum kunit_status { + KUNIT_SUCCESS, + KUNIT_FAILURE, + KUNIT_SKIPPED, +}; + /** * struct kunit_case - represents an individual test case. * @@ -148,13 +160,20 @@ struct kunit_case { const void* (*generate_params)(const void *prev, char *desc);
/* private: internal use only. */ - bool success; + enum kunit_status status; char *log; };
-static inline char *kunit_status_to_string(bool status) +static inline char *kunit_status_to_string(enum kunit_status status) { - return status ? "ok" : "not ok"; + switch (status) { + case KUNIT_SKIPPED: + case KUNIT_SUCCESS: + return "ok"; + case KUNIT_FAILURE: + return "not ok"; + } + return "invalid"; }
/** @@ -212,6 +231,7 @@ struct kunit_suite { struct kunit_case *test_cases;
/* private: internal use only */ + char status_comment[256]; struct dentry *debugfs; char *log; }; @@ -245,19 +265,21 @@ struct kunit { * be read after the test case finishes once all threads associated * with the test case have terminated. */ - bool success; /* Read only after test_case finishes! */ spinlock_t lock; /* Guards all mutable test state. */ + enum kunit_status status; /* Read only after test_case finishes! */ /* * Because resources is a list that may be updated multiple times (with * new resources) from any thread associated with a test case, we must * protect it with some type of lock. */ struct list_head resources; /* Protected by lock. */ + + char status_comment[256]; };
static inline void kunit_set_failure(struct kunit *test) { - WRITE_ONCE(test->success, false); + WRITE_ONCE(test->status, KUNIT_FAILURE); }
void kunit_init_test(struct kunit *test, const char *name, char *log); @@ -348,7 +370,7 @@ static inline int kunit_run_all_tests(void) #define kunit_suite_for_each_test_case(suite, test_case) \ for (test_case = suite->test_cases; test_case->run_case; test_case++)
-bool kunit_suite_has_succeeded(struct kunit_suite *suite); +enum kunit_status kunit_suite_has_succeeded(struct kunit_suite *suite);
/* * Like kunit_alloc_resource() below, but returns the struct kunit_resource @@ -612,6 +634,40 @@ void kunit_cleanup(struct kunit *test);
void kunit_log_append(char *log, const char *fmt, ...);
+/** + * kunit_mark_skipped() - Marks @test_or_suite as skipped + * + * @test_or_suite: The test context object. + * @fmt: A printk() style format string. + * + * Marks the test as skipped. @fmt is given output as the test status + * comment, typically the reason the test was skipped. + * + * Test execution continues after kunit_mark_skipped() is called. + */ +#define kunit_mark_skipped(test_or_suite, fmt, ...) \ + do { \ + WRITE_ONCE((test_or_suite)->status, KUNIT_SKIPPED); \ + scnprintf((test_or_suite)->status_comment, 256, fmt, ##__VA_ARGS__); \ + } while (0) + +/** + * kunit_skip() - Marks @test_or_suite as skipped + * + * @test_or_suite: The test context object. + * @fmt: A printk() style format string. + * + * Skips the test. @fmt is given output as the test status + * comment, typically the reason the test was skipped. + * + * Test execution is halted after kunit_skip() is called. + */ +#define kunit_skip(test_or_suite, fmt, ...) \ + do { \ + kunit_mark_skipped((test_or_suite), fmt, ##__VA_ARGS__);\ + kunit_try_catch_throw(&((test_or_suite)->try_catch)); \ + } while (0) + /* * printk and log to per-test or per-suite log buffer. Logging only done * if CONFIG_KUNIT_DEBUGFS is 'y'; if it is 'n', no log is allocated/used. diff --git a/lib/kunit/kunit-test.c b/lib/kunit/kunit-test.c index 69f902440a0e..d69efcbed624 100644 --- a/lib/kunit/kunit-test.c +++ b/lib/kunit/kunit-test.c @@ -437,7 +437,47 @@ static void kunit_log_test(struct kunit *test) #endif }
+static void kunit_status_set_failure_test(struct kunit *test) +{ + struct kunit fake; + + kunit_init_test(&fake, "fake test", NULL); + + KUNIT_EXPECT_EQ(test, fake.status, (enum kunit_status)KUNIT_SUCCESS); + kunit_set_failure(&fake); + KUNIT_EXPECT_EQ(test, fake.status, (enum kunit_status)KUNIT_FAILURE); +} + +static void kunit_status_mark_skipped_test(struct kunit *test) +{ + struct kunit fake; + + kunit_init_test(&fake, "fake test", NULL); + + /* Before: Should be SUCCESS with no comment. */ + KUNIT_EXPECT_EQ(test, fake.status, KUNIT_SUCCESS); + KUNIT_EXPECT_STREQ(test, fake.status_comment, ""); + + /* Mark the test as skipped. */ + kunit_mark_skipped(&fake, "Accepts format string: %s", "YES"); + + /* After: Should be SKIPPED with our comment. */ + KUNIT_EXPECT_EQ(test, fake.status, (enum kunit_status)KUNIT_SKIPPED); + KUNIT_EXPECT_STREQ(test, fake.status_comment, "Accepts format string: YES"); +} + +static struct kunit_case kunit_status_test_cases[] = { + KUNIT_CASE(kunit_status_set_failure_test), + KUNIT_CASE(kunit_status_mark_skipped_test), + {} +}; + +static struct kunit_suite kunit_status_test_suite = { + .name = "kunit_status", + .test_cases = kunit_status_test_cases, +}; + kunit_test_suites(&kunit_try_catch_test_suite, &kunit_resource_test_suite, - &kunit_log_test_suite); + &kunit_log_test_suite, &kunit_status_test_suite);
MODULE_LICENSE("GPL v2"); diff --git a/lib/kunit/test.c b/lib/kunit/test.c index 2f6cc0123232..0ee07705d2b0 100644 --- a/lib/kunit/test.c +++ b/lib/kunit/test.c @@ -98,12 +98,14 @@ static void kunit_print_subtest_start(struct kunit_suite *suite)
static void kunit_print_ok_not_ok(void *test_or_suite, bool is_test, - bool is_ok, + enum kunit_status status, size_t test_number, - const char *description) + const char *description, + const char *directive) { struct kunit_suite *suite = is_test ? NULL : test_or_suite; struct kunit *test = is_test ? test_or_suite : NULL; + const char *directive_header = (status == KUNIT_SKIPPED) ? " # SKIP " : "";
/* * We do not log the test suite results as doing so would @@ -114,25 +116,31 @@ static void kunit_print_ok_not_ok(void *test_or_suite, * representation. */ if (suite) - pr_info("%s %zd - %s\n", - kunit_status_to_string(is_ok), - test_number, description); + pr_info("%s %zd - %s%s%s\n", + kunit_status_to_string(status), + test_number, description, + directive_header, directive ? directive : ""); else - kunit_log(KERN_INFO, test, KUNIT_SUBTEST_INDENT "%s %zd - %s", - kunit_status_to_string(is_ok), - test_number, description); + kunit_log(KERN_INFO, test, + KUNIT_SUBTEST_INDENT "%s %zd - %s%s%s", + kunit_status_to_string(status), + test_number, description, + directive_header, directive ? directive : ""); }
-bool kunit_suite_has_succeeded(struct kunit_suite *suite) +enum kunit_status kunit_suite_has_succeeded(struct kunit_suite *suite) { const struct kunit_case *test_case; + enum kunit_status status = KUNIT_SKIPPED;
kunit_suite_for_each_test_case(suite, test_case) { - if (!test_case->success) - return false; + if (test_case->status == KUNIT_FAILURE) + return KUNIT_FAILURE; + else if (test_case->status == KUNIT_SUCCESS) + status = KUNIT_SUCCESS; }
- return true; + return status; } EXPORT_SYMBOL_GPL(kunit_suite_has_succeeded);
@@ -143,7 +151,8 @@ static void kunit_print_subtest_end(struct kunit_suite *suite) kunit_print_ok_not_ok((void *)suite, false, kunit_suite_has_succeeded(suite), kunit_suite_counter++, - suite->name); + suite->name, + suite->status_comment); }
unsigned int kunit_test_case_num(struct kunit_suite *suite, @@ -252,7 +261,8 @@ void kunit_init_test(struct kunit *test, const char *name, char *log) test->log = log; if (test->log) test->log[0] = '\0'; - test->success = true; + test->status = KUNIT_SUCCESS; + test->status_comment[0] = '\0'; } EXPORT_SYMBOL_GPL(kunit_init_test);
@@ -376,7 +386,8 @@ static void kunit_run_case_catch_errors(struct kunit_suite *suite, context.test_case = test_case; kunit_try_catch_run(try_catch, &context);
- test_case->success = test->success; + test_case->status = test->status; + }
int kunit_run_tests(struct kunit_suite *suite) @@ -388,7 +399,6 @@ int kunit_run_tests(struct kunit_suite *suite)
kunit_suite_for_each_test_case(suite, test_case) { struct kunit test = { .param_value = NULL, .param_index = 0 }; - bool test_success = true;
if (test_case->generate_params) { /* Get initial param. */ @@ -398,7 +408,6 @@ int kunit_run_tests(struct kunit_suite *suite)
do { kunit_run_case_catch_errors(suite, test_case, &test); - test_success &= test_case->success;
if (test_case->generate_params) { if (param_desc[0] == '\0') { @@ -410,7 +419,7 @@ int kunit_run_tests(struct kunit_suite *suite) KUNIT_SUBTEST_INDENT "# %s: %s %d - %s", test_case->name, - kunit_status_to_string(test.success), + kunit_status_to_string(test.status), test.param_index + 1, param_desc);
/* Get next param. */ @@ -420,9 +429,10 @@ int kunit_run_tests(struct kunit_suite *suite) } } while (test.param_value);
- kunit_print_ok_not_ok(&test, true, test_success, + kunit_print_ok_not_ok(&test, true, test_case->status, kunit_test_case_num(suite, test_case), - test_case->name); + test_case->name, + test.status_comment); }
kunit_print_subtest_end(suite); @@ -434,6 +444,7 @@ EXPORT_SYMBOL_GPL(kunit_run_tests); static void kunit_init_suite(struct kunit_suite *suite) { kunit_debugfs_create_suite(suite); + suite->status_comment[0] = '\0'; }
int __kunit_test_suites_init(struct kunit_suite * const * const suites)
Add support for the SKIP directive to kunit_tool's TAP parser.
Skipped tests now show up as such in the printed summary. The number of skipped tests is counted, and if all tests in a suite are skipped, the suite is also marked as skipped. Otherwise, skipped tests do affect the suite result.
Example output: [00:22:34] ======== [SKIPPED] example_skip ======== [00:22:34] [SKIPPED] example_skip_test # SKIP this test should be skipped [00:22:34] [SKIPPED] example_mark_skipped_test # SKIP this test should be skipped [00:22:34] ============================================================ [00:22:34] Testing complete. 2 tests run. 0 failed. 0 crashed. 2 skipped.
Signed-off-by: David Gow davidgow@google.com --- tools/testing/kunit/kunit_parser.py | 47 +++++++++++++++++++------- tools/testing/kunit/kunit_tool_test.py | 22 ++++++++++++ 2 files changed, 57 insertions(+), 12 deletions(-)
diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py index e8bcc139702e..6b5dd26b479d 100644 --- a/tools/testing/kunit/kunit_parser.py +++ b/tools/testing/kunit/kunit_parser.py @@ -43,6 +43,7 @@ class TestCase(object): class TestStatus(Enum): SUCCESS = auto() FAILURE = auto() + SKIPPED = auto() TEST_CRASHED = auto() NO_TESTS = auto() FAILURE_TO_PARSE_TESTS = auto() @@ -108,6 +109,8 @@ def save_non_diagnostic(lines: List[str], test_case: TestCase) -> None:
OkNotOkResult = namedtuple('OkNotOkResult', ['is_ok','description', 'text'])
+OK_NOT_OK_SKIP = re.compile(r'^[\s]*(ok|not ok) [0-9]+ - (.*) # SKIP(.*)$') + OK_NOT_OK_SUBTEST = re.compile(r'^[\s]+(ok|not ok) [0-9]+ - (.*)$')
OK_NOT_OK_MODULE = re.compile(r'^(ok|not ok) ([0-9]+) - (.*)$') @@ -125,6 +128,10 @@ def parse_ok_not_ok_test_case(lines: List[str], test_case: TestCase) -> bool: if match: test_case.log.append(lines.pop(0)) test_case.name = match.group(2) + skip_match = OK_NOT_OK_SKIP.match(line) + if skip_match: + test_case.status = TestStatus.SKIPPED + return True if test_case.status == TestStatus.TEST_CRASHED: return True if match.group(1) == 'ok': @@ -188,16 +195,16 @@ def parse_subtest_plan(lines: List[str]) -> Optional[int]: return None
def max_status(left: TestStatus, right: TestStatus) -> TestStatus: - if left == TestStatus.TEST_CRASHED or right == TestStatus.TEST_CRASHED: + if left == right: + return left + elif left == TestStatus.TEST_CRASHED or right == TestStatus.TEST_CRASHED: return TestStatus.TEST_CRASHED elif left == TestStatus.FAILURE or right == TestStatus.FAILURE: return TestStatus.FAILURE - elif left != TestStatus.SUCCESS: - return left - elif right != TestStatus.SUCCESS: + elif left == TestStatus.SKIPPED: return right else: - return TestStatus.SUCCESS + return left
def parse_ok_not_ok_test_suite(lines: List[str], test_suite: TestSuite, @@ -214,6 +221,9 @@ def parse_ok_not_ok_test_suite(lines: List[str], test_suite.status = TestStatus.SUCCESS else: test_suite.status = TestStatus.FAILURE + skip_match = OK_NOT_OK_SKIP.match(line) + if skip_match: + test_suite.status = TestStatus.SKIPPED suite_index = int(match.group(2)) if suite_index != expected_suite_index: print_with_timestamp( @@ -224,8 +234,8 @@ def parse_ok_not_ok_test_suite(lines: List[str], else: return False
-def bubble_up_errors(statuses: Iterable[TestStatus]) -> TestStatus: - return reduce(max_status, statuses, TestStatus.SUCCESS) +def bubble_up_errors(status_list: Iterable[TestStatus]) -> TestStatus: + return reduce(max_status, status_list, TestStatus.SKIPPED)
def bubble_up_test_case_errors(test_suite: TestSuite) -> TestStatus: max_test_case_status = bubble_up_errors(x.status for x in test_suite.cases) @@ -315,9 +325,12 @@ def print_and_count_results(test_result: TestResult) -> Tuple[int, int, int]: total_tests = 0 failed_tests = 0 crashed_tests = 0 + skipped_tests = 0 for test_suite in test_result.suites: if test_suite.status == TestStatus.SUCCESS: print_suite_divider(green('[PASSED] ') + test_suite.name) + elif test_suite.status == TestStatus.SKIPPED: + print_suite_divider(yellow('[SKIPPED] ') + test_suite.name) elif test_suite.status == TestStatus.TEST_CRASHED: print_suite_divider(red('[CRASHED] ' + test_suite.name)) else: @@ -326,6 +339,9 @@ def print_and_count_results(test_result: TestResult) -> Tuple[int, int, int]: total_tests += 1 if test_case.status == TestStatus.SUCCESS: print_with_timestamp(green('[PASSED] ') + test_case.name) + elif test_case.status == TestStatus.SKIPPED: + skipped_tests += 1 + print_with_timestamp(yellow('[SKIPPED] ') + test_case.name) elif test_case.status == TestStatus.TEST_CRASHED: crashed_tests += 1 print_with_timestamp(red('[CRASHED] ' + test_case.name)) @@ -336,12 +352,13 @@ def print_and_count_results(test_result: TestResult) -> Tuple[int, int, int]: print_with_timestamp(red('[FAILED] ') + test_case.name) print_log(map(yellow, test_case.log)) print_with_timestamp('') - return total_tests, failed_tests, crashed_tests + return total_tests, failed_tests, crashed_tests, skipped_tests
def parse_run_tests(kernel_output) -> TestResult: total_tests = 0 failed_tests = 0 crashed_tests = 0 + skipped_tests = 0 test_result = parse_test_result(list(isolate_kunit_output(kernel_output))) if test_result.status == TestStatus.NO_TESTS: print(red('[ERROR] ') + yellow('no tests run!')) @@ -350,10 +367,16 @@ def parse_run_tests(kernel_output) -> TestResult: else: (total_tests, failed_tests, - crashed_tests) = print_and_count_results(test_result) + crashed_tests, + skipped_tests) = print_and_count_results(test_result) print_with_timestamp(DIVIDER) - fmt = green if test_result.status == TestStatus.SUCCESS else red + if test_result.status == TestStatus.SUCCESS: + fmt = green + elif test_result.status == TestStatus.SKIPPED: + fmt = yellow + else: + fmt =red print_with_timestamp( - fmt('Testing complete. %d tests run. %d failed. %d crashed.' % - (total_tests, failed_tests, crashed_tests))) + fmt('Testing complete. %d tests run. %d failed. %d crashed. %d skipped.' % + (total_tests, failed_tests, crashed_tests, skipped_tests))) return test_result diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py index 2e809dd956a7..a51e70cafcc1 100755 --- a/tools/testing/kunit/kunit_tool_test.py +++ b/tools/testing/kunit/kunit_tool_test.py @@ -183,6 +183,28 @@ class KUnitParserTest(unittest.TestCase): kunit_parser.TestStatus.TEST_CRASHED, result.status)
+ def test_skipped_test(self): + skipped_log = test_data_path('test_skip_tests.log') + file = open(skipped_log) + result = kunit_parser.parse_run_tests(file.readlines()) + + # A skipped test does not fail the whole suite. + self.assertEqual( + kunit_parser.TestStatus.SUCCESS, + result.status) + file.close() + + def test_skipped_all_tests(self): + skipped_log = test_data_path('test_skip_all_tests.log') + file = open(skipped_log) + result = kunit_parser.parse_run_tests(file.readlines()) + + self.assertEqual( + kunit_parser.TestStatus.SKIPPED, + result.status) + file.close() + + def test_ignores_prefix_printk_time(self): prefix_log = test_data_path('test_config_printk_time.log') with open(prefix_log) as file:
On Wed, May 26, 2021 at 1:11 AM 'David Gow' via KUnit Development kunit-dev@googlegroups.com wrote:
Add support for the SKIP directive to kunit_tool's TAP parser.
Skipped tests now show up as such in the printed summary. The number of skipped tests is counted, and if all tests in a suite are skipped, the suite is also marked as skipped. Otherwise, skipped tests do affect the suite result.
Example output: [00:22:34] ======== [SKIPPED] example_skip ======== [00:22:34] [SKIPPED] example_skip_test # SKIP this test should be skipped [00:22:34] [SKIPPED] example_mark_skipped_test # SKIP this test should be skipped [00:22:34] ============================================================ [00:22:34] Testing complete. 2 tests run. 0 failed. 0 crashed. 2 skipped.
Signed-off-by: David Gow davidgow@google.com
tools/testing/kunit/kunit_parser.py | 47 +++++++++++++++++++------- tools/testing/kunit/kunit_tool_test.py | 22 ++++++++++++
This seems to be missing the added test files.
2 files changed, 57 insertions(+), 12 deletions(-)
diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py index e8bcc139702e..6b5dd26b479d 100644 --- a/tools/testing/kunit/kunit_parser.py +++ b/tools/testing/kunit/kunit_parser.py @@ -43,6 +43,7 @@ class TestCase(object): class TestStatus(Enum): SUCCESS = auto() FAILURE = auto()
SKIPPED = auto() TEST_CRASHED = auto() NO_TESTS = auto() FAILURE_TO_PARSE_TESTS = auto()
@@ -108,6 +109,8 @@ def save_non_diagnostic(lines: List[str], test_case: TestCase) -> None:
OkNotOkResult = namedtuple('OkNotOkResult', ['is_ok','description', 'text'])
+OK_NOT_OK_SKIP = re.compile(r'^[\s]*(ok|not ok) [0-9]+ - (.*) # SKIP(.*)$')
OK_NOT_OK_SUBTEST = re.compile(r'^[\s]+(ok|not ok) [0-9]+ - (.*)$')
OK_NOT_OK_MODULE = re.compile(r'^(ok|not ok) ([0-9]+) - (.*)$') @@ -125,6 +128,10 @@ def parse_ok_not_ok_test_case(lines: List[str], test_case: TestCase) -> bool: if match: test_case.log.append(lines.pop(0)) test_case.name = match.group(2)
skip_match = OK_NOT_OK_SKIP.match(line)
if skip_match:
test_case.status = TestStatus.SKIPPED
return True if test_case.status == TestStatus.TEST_CRASHED: return True if match.group(1) == 'ok':
@@ -188,16 +195,16 @@ def parse_subtest_plan(lines: List[str]) -> Optional[int]: return None
def max_status(left: TestStatus, right: TestStatus) -> TestStatus:
if left == TestStatus.TEST_CRASHED or right == TestStatus.TEST_CRASHED:
if left == right:
return left
elif left == TestStatus.TEST_CRASHED or right == TestStatus.TEST_CRASHED: return TestStatus.TEST_CRASHED elif left == TestStatus.FAILURE or right == TestStatus.FAILURE: return TestStatus.FAILURE
elif left != TestStatus.SUCCESS:
return left
elif right != TestStatus.SUCCESS:
elif left == TestStatus.SKIPPED: return right else:
return TestStatus.SUCCESS
return left
def parse_ok_not_ok_test_suite(lines: List[str], test_suite: TestSuite, @@ -214,6 +221,9 @@ def parse_ok_not_ok_test_suite(lines: List[str], test_suite.status = TestStatus.SUCCESS else: test_suite.status = TestStatus.FAILURE
skip_match = OK_NOT_OK_SKIP.match(line)
if skip_match:
test_suite.status = TestStatus.SKIPPED suite_index = int(match.group(2)) if suite_index != expected_suite_index: print_with_timestamp(
@@ -224,8 +234,8 @@ def parse_ok_not_ok_test_suite(lines: List[str], else: return False
-def bubble_up_errors(statuses: Iterable[TestStatus]) -> TestStatus:
return reduce(max_status, statuses, TestStatus.SUCCESS)
+def bubble_up_errors(status_list: Iterable[TestStatus]) -> TestStatus:
return reduce(max_status, status_list, TestStatus.SKIPPED)
def bubble_up_test_case_errors(test_suite: TestSuite) -> TestStatus: max_test_case_status = bubble_up_errors(x.status for x in test_suite.cases) @@ -315,9 +325,12 @@ def print_and_count_results(test_result: TestResult) -> Tuple[int, int, int]:
Btw, this type annotation is out of date. But I think an ever growing Tuple is too cumbersome, how about this?
diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py index 6b5dd26b479d..055ee1e4d19d 100644 --- a/tools/testing/kunit/kunit_parser.py +++ b/tools/testing/kunit/kunit_parser.py @@ -6,6 +6,7 @@ # Author: Felix Guo felixguoxiuping@gmail.com # Author: Brendan Higgins brendanhiggins@google.com
+from dataclasses import dataclass import re
from collections import namedtuple @@ -321,11 +322,19 @@ def parse_test_result(lines: List[str]) -> TestResult: else: return TestResult(TestStatus.NO_TESTS, [], lines)
-def print_and_count_results(test_result: TestResult) -> Tuple[int, int, int]: - total_tests = 0 - failed_tests = 0 - crashed_tests = 0 - skipped_tests = 0 +# Note: This would require Python 3.7. We currently only required 3.6 (enum.auto). We can do it by hand to avoid that, if we want. +@dataclass +class TestCounts: + passed: int = 0 + failed: int = 0 + skipped: int = 0 + crashed: int = 0 + + def total(self) -> int: + return self.passed + self.failed + self.skipped + self.crashed + +def print_and_count_results(test_result: TestResult) -> TestCounts: + counts = TestCounts() for test_suite in test_result.suites: if test_suite.status == TestStatus.SUCCESS: print_suite_divider(green('[PASSED] ') + test_suite.name) @@ -336,39 +345,33 @@ def print_and_count_results(test_result: TestResult) -> Tuple[int, int, int]: else: print_suite_divider(red('[FAILED] ') + test_suite.name) for test_case in test_suite.cases: - total_tests += 1 if test_case.status == TestStatus.SUCCESS: + counts.passed += 1 print_with_timestamp(green('[PASSED] ') + test_case.name) elif test_case.status == TestStatus.SKIPPED: - skipped_tests += 1 + counts.skipped += 1 print_with_timestamp(yellow('[SKIPPED] ') + test_case.name) elif test_case.status == TestStatus.TEST_CRASHED: - crashed_tests += 1 + counts.crashed += 1 print_with_timestamp(red('[CRASHED] ' + test_case.name)) print_log(map(yellow, test_case.log)) print_with_timestamp('') else: - failed_tests += 1 + counts.failed += 1 print_with_timestamp(red('[FAILED] ') + test_case.name) print_log(map(yellow, test_case.log)) print_with_timestamp('') - return total_tests, failed_tests, crashed_tests, skipped_tests + return counts
def parse_run_tests(kernel_output) -> TestResult: - total_tests = 0 - failed_tests = 0 - crashed_tests = 0 - skipped_tests = 0 + counts = TestCounts() test_result = parse_test_result(list(isolate_kunit_output(kernel_output))) if test_result.status == TestStatus.NO_TESTS: print(red('[ERROR] ') + yellow('no tests run!')) elif test_result.status == TestStatus.FAILURE_TO_PARSE_TESTS: print(red('[ERROR] ') + yellow('could not parse test results!')) else: - (total_tests, - failed_tests, - crashed_tests, - skipped_tests) = print_and_count_results(test_result) + counts = print_and_count_results(test_result) print_with_timestamp(DIVIDER) if test_result.status == TestStatus.SUCCESS: fmt = green @@ -378,5 +381,5 @@ def parse_run_tests(kernel_output) -> TestResult: fmt =red print_with_timestamp( fmt('Testing complete. %d tests run. %d failed. %d crashed. %d skipped.' % - (total_tests, failed_tests, crashed_tests, skipped_tests))) + (counts.total(), counts.failed, counts.crashed, counts.skipped))) return test_result
total_tests = 0 failed_tests = 0 crashed_tests = 0
skipped_tests = 0 for test_suite in test_result.suites: if test_suite.status == TestStatus.SUCCESS: print_suite_divider(green('[PASSED] ') + test_suite.name)
elif test_suite.status == TestStatus.SKIPPED:
print_suite_divider(yellow('[SKIPPED] ') + test_suite.name) elif test_suite.status == TestStatus.TEST_CRASHED: print_suite_divider(red('[CRASHED] ' + test_suite.name)) else:
@@ -326,6 +339,9 @@ def print_and_count_results(test_result: TestResult) -> Tuple[int, int, int]: total_tests += 1 if test_case.status == TestStatus.SUCCESS: print_with_timestamp(green('[PASSED] ') + test_case.name)
elif test_case.status == TestStatus.SKIPPED:
skipped_tests += 1
print_with_timestamp(yellow('[SKIPPED] ') + test_case.name) elif test_case.status == TestStatus.TEST_CRASHED: crashed_tests += 1 print_with_timestamp(red('[CRASHED] ' + test_case.name))
@@ -336,12 +352,13 @@ def print_and_count_results(test_result: TestResult) -> Tuple[int, int, int]: print_with_timestamp(red('[FAILED] ') + test_case.name) print_log(map(yellow, test_case.log)) print_with_timestamp('')
return total_tests, failed_tests, crashed_tests
return total_tests, failed_tests, crashed_tests, skipped_tests
def parse_run_tests(kernel_output) -> TestResult: total_tests = 0 failed_tests = 0 crashed_tests = 0
skipped_tests = 0 test_result = parse_test_result(list(isolate_kunit_output(kernel_output))) if test_result.status == TestStatus.NO_TESTS: print(red('[ERROR] ') + yellow('no tests run!'))
@@ -350,10 +367,16 @@ def parse_run_tests(kernel_output) -> TestResult: else: (total_tests, failed_tests,
crashed_tests) = print_and_count_results(test_result)
crashed_tests,
skipped_tests) = print_and_count_results(test_result) print_with_timestamp(DIVIDER)
fmt = green if test_result.status == TestStatus.SUCCESS else red
if test_result.status == TestStatus.SUCCESS:
fmt = green
elif test_result.status == TestStatus.SKIPPED:
fmt = yellow
else:
fmt =red print_with_timestamp(
fmt('Testing complete. %d tests run. %d failed. %d crashed.' %
(total_tests, failed_tests, crashed_tests)))
fmt('Testing complete. %d tests run. %d failed. %d crashed. %d skipped.' %
(total_tests, failed_tests, crashed_tests, skipped_tests))) return test_result
diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py index 2e809dd956a7..a51e70cafcc1 100755 --- a/tools/testing/kunit/kunit_tool_test.py +++ b/tools/testing/kunit/kunit_tool_test.py @@ -183,6 +183,28 @@ class KUnitParserTest(unittest.TestCase): kunit_parser.TestStatus.TEST_CRASHED, result.status)
def test_skipped_test(self):
skipped_log = test_data_path('test_skip_tests.log')
file = open(skipped_log)
result = kunit_parser.parse_run_tests(file.readlines())
# A skipped test does not fail the whole suite.
self.assertEqual(
kunit_parser.TestStatus.SUCCESS,
result.status)
file.close()
def test_skipped_all_tests(self):
skipped_log = test_data_path('test_skip_all_tests.log')
file = open(skipped_log)
result = kunit_parser.parse_run_tests(file.readlines())
self.assertEqual(
kunit_parser.TestStatus.SKIPPED,
result.status)
file.close()
def test_ignores_prefix_printk_time(self): prefix_log = test_data_path('test_config_printk_time.log') with open(prefix_log) as file:
-- 2.31.1.818.g46aad6cb9e-goog
-- You received this message because you are subscribed to the Google Groups "KUnit Development" group. To unsubscribe from this group and stop receiving emails from it, send an email to kunit-dev+unsubscribe@googlegroups.com. To view this discussion on the web visit https://groups.google.com/d/msgid/kunit-dev/20210526081112.3652290-2-davidgo....
On Thu, May 27, 2021 at 3:10 AM Daniel Latypov dlatypov@google.com wrote:
On Wed, May 26, 2021 at 1:11 AM 'David Gow' via KUnit Development kunit-dev@googlegroups.com wrote:
Add support for the SKIP directive to kunit_tool's TAP parser.
Skipped tests now show up as such in the printed summary. The number of skipped tests is counted, and if all tests in a suite are skipped, the suite is also marked as skipped. Otherwise, skipped tests do affect the suite result.
Example output: [00:22:34] ======== [SKIPPED] example_skip ======== [00:22:34] [SKIPPED] example_skip_test # SKIP this test should be skipped [00:22:34] [SKIPPED] example_mark_skipped_test # SKIP this test should be skipped [00:22:34] ============================================================ [00:22:34] Testing complete. 2 tests run. 0 failed. 0 crashed. 2 skipped.
Signed-off-by: David Gow davidgow@google.com
tools/testing/kunit/kunit_parser.py | 47 +++++++++++++++++++------- tools/testing/kunit/kunit_tool_test.py | 22 ++++++++++++
This seems to be missing the added test files.
Whoops, yes: I'll add these back in v2.
2 files changed, 57 insertions(+), 12 deletions(-)
diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py index e8bcc139702e..6b5dd26b479d 100644 --- a/tools/testing/kunit/kunit_parser.py +++ b/tools/testing/kunit/kunit_parser.py @@ -43,6 +43,7 @@ class TestCase(object): class TestStatus(Enum): SUCCESS = auto() FAILURE = auto()
SKIPPED = auto() TEST_CRASHED = auto() NO_TESTS = auto() FAILURE_TO_PARSE_TESTS = auto()
@@ -108,6 +109,8 @@ def save_non_diagnostic(lines: List[str], test_case: TestCase) -> None:
OkNotOkResult = namedtuple('OkNotOkResult', ['is_ok','description', 'text'])
+OK_NOT_OK_SKIP = re.compile(r'^[\s]*(ok|not ok) [0-9]+ - (.*) # SKIP(.*)$')
OK_NOT_OK_SUBTEST = re.compile(r'^[\s]+(ok|not ok) [0-9]+ - (.*)$')
OK_NOT_OK_MODULE = re.compile(r'^(ok|not ok) ([0-9]+) - (.*)$') @@ -125,6 +128,10 @@ def parse_ok_not_ok_test_case(lines: List[str], test_case: TestCase) -> bool: if match: test_case.log.append(lines.pop(0)) test_case.name = match.group(2)
skip_match = OK_NOT_OK_SKIP.match(line)
if skip_match:
test_case.status = TestStatus.SKIPPED
return True if test_case.status == TestStatus.TEST_CRASHED: return True if match.group(1) == 'ok':
@@ -188,16 +195,16 @@ def parse_subtest_plan(lines: List[str]) -> Optional[int]: return None
def max_status(left: TestStatus, right: TestStatus) -> TestStatus:
if left == TestStatus.TEST_CRASHED or right == TestStatus.TEST_CRASHED:
if left == right:
return left
elif left == TestStatus.TEST_CRASHED or right == TestStatus.TEST_CRASHED: return TestStatus.TEST_CRASHED elif left == TestStatus.FAILURE or right == TestStatus.FAILURE: return TestStatus.FAILURE
elif left != TestStatus.SUCCESS:
return left
elif right != TestStatus.SUCCESS:
elif left == TestStatus.SKIPPED: return right else:
return TestStatus.SUCCESS
return left
def parse_ok_not_ok_test_suite(lines: List[str], test_suite: TestSuite, @@ -214,6 +221,9 @@ def parse_ok_not_ok_test_suite(lines: List[str], test_suite.status = TestStatus.SUCCESS else: test_suite.status = TestStatus.FAILURE
skip_match = OK_NOT_OK_SKIP.match(line)
if skip_match:
test_suite.status = TestStatus.SKIPPED suite_index = int(match.group(2)) if suite_index != expected_suite_index: print_with_timestamp(
@@ -224,8 +234,8 @@ def parse_ok_not_ok_test_suite(lines: List[str], else: return False
-def bubble_up_errors(statuses: Iterable[TestStatus]) -> TestStatus:
return reduce(max_status, statuses, TestStatus.SUCCESS)
+def bubble_up_errors(status_list: Iterable[TestStatus]) -> TestStatus:
return reduce(max_status, status_list, TestStatus.SKIPPED)
def bubble_up_test_case_errors(test_suite: TestSuite) -> TestStatus: max_test_case_status = bubble_up_errors(x.status for x in test_suite.cases) @@ -315,9 +325,12 @@ def print_and_count_results(test_result: TestResult) -> Tuple[int, int, int]:
Btw, this type annotation is out of date.
Oops: will fix and/or replace with the below.
But I think an ever growing Tuple is too cumbersome, how about this?
Yeah, this does seem cleaner: I'll put this or something like it in v2.
diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py index 6b5dd26b479d..055ee1e4d19d 100644 --- a/tools/testing/kunit/kunit_parser.py +++ b/tools/testing/kunit/kunit_parser.py @@ -6,6 +6,7 @@ # Author: Felix Guo felixguoxiuping@gmail.com # Author: Brendan Higgins brendanhiggins@google.com
+from dataclasses import dataclass import re
from collections import namedtuple @@ -321,11 +322,19 @@ def parse_test_result(lines: List[str]) -> TestResult: else: return TestResult(TestStatus.NO_TESTS, [], lines)
-def print_and_count_results(test_result: TestResult) -> Tuple[int, int, int]:
total_tests = 0
failed_tests = 0
crashed_tests = 0
skipped_tests = 0
+# Note: This would require Python 3.7. We currently only required 3.6 (enum.auto). We can do it by hand to avoid that, if we want.
Hmm... I'm generally loath to increase the version requirement for something this simple, so might look into doing a version of this without the dataclass.
+@dataclass +class TestCounts:
passed: int = 0
failed: int = 0
skipped: int = 0
crashed: int = 0
def total(self) -> int:
return self.passed + self.failed + self.skipped + self.crashed
+def print_and_count_results(test_result: TestResult) -> TestCounts:
counts = TestCounts() for test_suite in test_result.suites: if test_suite.status == TestStatus.SUCCESS: print_suite_divider(green('[PASSED] ') +
test_suite.name) @@ -336,39 +345,33 @@ def print_and_count_results(test_result: TestResult) -> Tuple[int, int, int]: else: print_suite_divider(red('[FAILED] ') + test_suite.name) for test_case in test_suite.cases:
total_tests += 1 if test_case.status == TestStatus.SUCCESS:
counts.passed += 1 print_with_timestamp(green('[PASSED]
') + test_case.name) elif test_case.status == TestStatus.SKIPPED:
skipped_tests += 1
counts.skipped += 1 print_with_timestamp(yellow('[SKIPPED]
') + test_case.name) elif test_case.status == TestStatus.TEST_CRASHED:
crashed_tests += 1
counts.crashed += 1 print_with_timestamp(red('[CRASHED] '
- test_case.name)) print_log(map(yellow, test_case.log)) print_with_timestamp('') else:
failed_tests += 1
counts.failed += 1 print_with_timestamp(red('[FAILED] ')
- test_case.name) print_log(map(yellow, test_case.log)) print_with_timestamp('')
return total_tests, failed_tests, crashed_tests, skipped_tests
return counts
def parse_run_tests(kernel_output) -> TestResult:
total_tests = 0
failed_tests = 0
crashed_tests = 0
skipped_tests = 0
counts = TestCounts() test_result =
parse_test_result(list(isolate_kunit_output(kernel_output))) if test_result.status == TestStatus.NO_TESTS: print(red('[ERROR] ') + yellow('no tests run!')) elif test_result.status == TestStatus.FAILURE_TO_PARSE_TESTS: print(red('[ERROR] ') + yellow('could not parse test results!')) else:
(total_tests,
failed_tests,
crashed_tests,
skipped_tests) = print_and_count_results(test_result)
counts = print_and_count_results(test_result) print_with_timestamp(DIVIDER) if test_result.status == TestStatus.SUCCESS: fmt = green
@@ -378,5 +381,5 @@ def parse_run_tests(kernel_output) -> TestResult: fmt =red print_with_timestamp( fmt('Testing complete. %d tests run. %d failed. %d crashed. %d skipped.' %
(total_tests, failed_tests, crashed_tests, skipped_tests)))
(counts.total(), counts.failed, counts.crashed,
counts.skipped))) return test_result
total_tests = 0 failed_tests = 0 crashed_tests = 0
skipped_tests = 0 for test_suite in test_result.suites: if test_suite.status == TestStatus.SUCCESS: print_suite_divider(green('[PASSED] ') + test_suite.name)
elif test_suite.status == TestStatus.SKIPPED:
print_suite_divider(yellow('[SKIPPED] ') + test_suite.name) elif test_suite.status == TestStatus.TEST_CRASHED: print_suite_divider(red('[CRASHED] ' + test_suite.name)) else:
@@ -326,6 +339,9 @@ def print_and_count_results(test_result: TestResult) -> Tuple[int, int, int]: total_tests += 1 if test_case.status == TestStatus.SUCCESS: print_with_timestamp(green('[PASSED] ') + test_case.name)
elif test_case.status == TestStatus.SKIPPED:
skipped_tests += 1
print_with_timestamp(yellow('[SKIPPED] ') + test_case.name) elif test_case.status == TestStatus.TEST_CRASHED: crashed_tests += 1 print_with_timestamp(red('[CRASHED] ' + test_case.name))
@@ -336,12 +352,13 @@ def print_and_count_results(test_result: TestResult) -> Tuple[int, int, int]: print_with_timestamp(red('[FAILED] ') + test_case.name) print_log(map(yellow, test_case.log)) print_with_timestamp('')
return total_tests, failed_tests, crashed_tests
return total_tests, failed_tests, crashed_tests, skipped_tests
def parse_run_tests(kernel_output) -> TestResult: total_tests = 0 failed_tests = 0 crashed_tests = 0
skipped_tests = 0 test_result = parse_test_result(list(isolate_kunit_output(kernel_output))) if test_result.status == TestStatus.NO_TESTS: print(red('[ERROR] ') + yellow('no tests run!'))
@@ -350,10 +367,16 @@ def parse_run_tests(kernel_output) -> TestResult: else: (total_tests, failed_tests,
crashed_tests) = print_and_count_results(test_result)
crashed_tests,
skipped_tests) = print_and_count_results(test_result) print_with_timestamp(DIVIDER)
fmt = green if test_result.status == TestStatus.SUCCESS else red
if test_result.status == TestStatus.SUCCESS:
fmt = green
elif test_result.status == TestStatus.SKIPPED:
fmt = yellow
else:
fmt =red print_with_timestamp(
fmt('Testing complete. %d tests run. %d failed. %d crashed.' %
(total_tests, failed_tests, crashed_tests)))
fmt('Testing complete. %d tests run. %d failed. %d crashed. %d skipped.' %
(total_tests, failed_tests, crashed_tests, skipped_tests))) return test_result
diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py index 2e809dd956a7..a51e70cafcc1 100755 --- a/tools/testing/kunit/kunit_tool_test.py +++ b/tools/testing/kunit/kunit_tool_test.py @@ -183,6 +183,28 @@ class KUnitParserTest(unittest.TestCase): kunit_parser.TestStatus.TEST_CRASHED, result.status)
def test_skipped_test(self):
skipped_log = test_data_path('test_skip_tests.log')
file = open(skipped_log)
result = kunit_parser.parse_run_tests(file.readlines())
# A skipped test does not fail the whole suite.
self.assertEqual(
kunit_parser.TestStatus.SUCCESS,
result.status)
file.close()
def test_skipped_all_tests(self):
skipped_log = test_data_path('test_skip_all_tests.log')
file = open(skipped_log)
result = kunit_parser.parse_run_tests(file.readlines())
self.assertEqual(
kunit_parser.TestStatus.SKIPPED,
result.status)
file.close()
def test_ignores_prefix_printk_time(self): prefix_log = test_data_path('test_config_printk_time.log') with open(prefix_log) as file:
-- 2.31.1.818.g46aad6cb9e-goog
-- You received this message because you are subscribed to the Google Groups "KUnit Development" group. To unsubscribe from this group and stop receiving emails from it, send an email to kunit-dev+unsubscribe@googlegroups.com. To view this discussion on the web visit https://groups.google.com/d/msgid/kunit-dev/20210526081112.3652290-2-davidgo....
On Thu, May 27, 2021 at 1:22 AM David Gow davidgow@google.com wrote:
On Thu, May 27, 2021 at 3:10 AM Daniel Latypov dlatypov@google.com wrote:
On Wed, May 26, 2021 at 1:11 AM 'David Gow' via KUnit Development kunit-dev@googlegroups.com wrote:
Add support for the SKIP directive to kunit_tool's TAP parser.
Skipped tests now show up as such in the printed summary. The number of skipped tests is counted, and if all tests in a suite are skipped, the suite is also marked as skipped. Otherwise, skipped tests do affect the suite result.
Example output: [00:22:34] ======== [SKIPPED] example_skip ======== [00:22:34] [SKIPPED] example_skip_test # SKIP this test should be skipped [00:22:34] [SKIPPED] example_mark_skipped_test # SKIP this test should be skipped [00:22:34] ============================================================ [00:22:34] Testing complete. 2 tests run. 0 failed. 0 crashed. 2 skipped.
Signed-off-by: David Gow davidgow@google.com
tools/testing/kunit/kunit_parser.py | 47 +++++++++++++++++++------- tools/testing/kunit/kunit_tool_test.py | 22 ++++++++++++
This seems to be missing the added test files.
Whoops, yes: I'll add these back in v2.
2 files changed, 57 insertions(+), 12 deletions(-)
diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py index e8bcc139702e..6b5dd26b479d 100644 --- a/tools/testing/kunit/kunit_parser.py +++ b/tools/testing/kunit/kunit_parser.py @@ -43,6 +43,7 @@ class TestCase(object): class TestStatus(Enum): SUCCESS = auto() FAILURE = auto()
SKIPPED = auto() TEST_CRASHED = auto() NO_TESTS = auto() FAILURE_TO_PARSE_TESTS = auto()
@@ -108,6 +109,8 @@ def save_non_diagnostic(lines: List[str], test_case: TestCase) -> None:
OkNotOkResult = namedtuple('OkNotOkResult', ['is_ok','description', 'text'])
+OK_NOT_OK_SKIP = re.compile(r'^[\s]*(ok|not ok) [0-9]+ - (.*) # SKIP(.*)$')
OK_NOT_OK_SUBTEST = re.compile(r'^[\s]+(ok|not ok) [0-9]+ - (.*)$')
OK_NOT_OK_MODULE = re.compile(r'^(ok|not ok) ([0-9]+) - (.*)$') @@ -125,6 +128,10 @@ def parse_ok_not_ok_test_case(lines: List[str], test_case: TestCase) -> bool: if match: test_case.log.append(lines.pop(0)) test_case.name = match.group(2)
skip_match = OK_NOT_OK_SKIP.match(line)
if skip_match:
test_case.status = TestStatus.SKIPPED
return True if test_case.status == TestStatus.TEST_CRASHED: return True if match.group(1) == 'ok':
@@ -188,16 +195,16 @@ def parse_subtest_plan(lines: List[str]) -> Optional[int]: return None
def max_status(left: TestStatus, right: TestStatus) -> TestStatus:
if left == TestStatus.TEST_CRASHED or right == TestStatus.TEST_CRASHED:
if left == right:
return left
elif left == TestStatus.TEST_CRASHED or right == TestStatus.TEST_CRASHED: return TestStatus.TEST_CRASHED elif left == TestStatus.FAILURE or right == TestStatus.FAILURE: return TestStatus.FAILURE
elif left != TestStatus.SUCCESS:
return left
elif right != TestStatus.SUCCESS:
elif left == TestStatus.SKIPPED: return right else:
return TestStatus.SUCCESS
return left
def parse_ok_not_ok_test_suite(lines: List[str], test_suite: TestSuite, @@ -214,6 +221,9 @@ def parse_ok_not_ok_test_suite(lines: List[str], test_suite.status = TestStatus.SUCCESS else: test_suite.status = TestStatus.FAILURE
skip_match = OK_NOT_OK_SKIP.match(line)
if skip_match:
test_suite.status = TestStatus.SKIPPED suite_index = int(match.group(2)) if suite_index != expected_suite_index: print_with_timestamp(
@@ -224,8 +234,8 @@ def parse_ok_not_ok_test_suite(lines: List[str], else: return False
-def bubble_up_errors(statuses: Iterable[TestStatus]) -> TestStatus:
return reduce(max_status, statuses, TestStatus.SUCCESS)
+def bubble_up_errors(status_list: Iterable[TestStatus]) -> TestStatus:
return reduce(max_status, status_list, TestStatus.SKIPPED)
def bubble_up_test_case_errors(test_suite: TestSuite) -> TestStatus: max_test_case_status = bubble_up_errors(x.status for x in test_suite.cases) @@ -315,9 +325,12 @@ def print_and_count_results(test_result: TestResult) -> Tuple[int, int, int]:
Btw, this type annotation is out of date.
Oops: will fix and/or replace with the below.
But I think an ever growing Tuple is too cumbersome, how about this?
Yeah, this does seem cleaner: I'll put this or something like it in v2.
diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py index 6b5dd26b479d..055ee1e4d19d 100644 --- a/tools/testing/kunit/kunit_parser.py +++ b/tools/testing/kunit/kunit_parser.py @@ -6,6 +6,7 @@ # Author: Felix Guo felixguoxiuping@gmail.com # Author: Brendan Higgins brendanhiggins@google.com
+from dataclasses import dataclass import re
from collections import namedtuple @@ -321,11 +322,19 @@ def parse_test_result(lines: List[str]) -> TestResult: else: return TestResult(TestStatus.NO_TESTS, [], lines)
-def print_and_count_results(test_result: TestResult) -> Tuple[int, int, int]:
total_tests = 0
failed_tests = 0
crashed_tests = 0
skipped_tests = 0
+# Note: This would require Python 3.7. We currently only required 3.6 (enum.auto). We can do it by hand to avoid that, if we want.
Hmm... I'm generally loath to increase the version requirement for something this simple, so might look into doing a version of this without the dataclass.
I think the same argument applies to enum.auto when we can just manually assign values :P
But yes, I'd suggest not using it. You'd just need to manually write the __init__() in that case (you can't use namedtuple since we need to modify the fields, but also I prefer having type annotations on my fields).
I only used @dataclass to make my example easier to write since I'm lazy.
+@dataclass +class TestCounts:
passed: int = 0
failed: int = 0
skipped: int = 0
crashed: int = 0
def total(self) -> int:
return self.passed + self.failed + self.skipped + self.crashed
+def print_and_count_results(test_result: TestResult) -> TestCounts:
counts = TestCounts() for test_suite in test_result.suites: if test_suite.status == TestStatus.SUCCESS: print_suite_divider(green('[PASSED] ') +
test_suite.name) @@ -336,39 +345,33 @@ def print_and_count_results(test_result: TestResult) -> Tuple[int, int, int]: else: print_suite_divider(red('[FAILED] ') + test_suite.name) for test_case in test_suite.cases:
total_tests += 1 if test_case.status == TestStatus.SUCCESS:
counts.passed += 1 print_with_timestamp(green('[PASSED]
') + test_case.name) elif test_case.status == TestStatus.SKIPPED:
skipped_tests += 1
counts.skipped += 1 print_with_timestamp(yellow('[SKIPPED]
') + test_case.name) elif test_case.status == TestStatus.TEST_CRASHED:
crashed_tests += 1
counts.crashed += 1 print_with_timestamp(red('[CRASHED] '
- test_case.name)) print_log(map(yellow, test_case.log)) print_with_timestamp('') else:
failed_tests += 1
counts.failed += 1 print_with_timestamp(red('[FAILED] ')
- test_case.name) print_log(map(yellow, test_case.log)) print_with_timestamp('')
return total_tests, failed_tests, crashed_tests, skipped_tests
return counts
def parse_run_tests(kernel_output) -> TestResult:
total_tests = 0
failed_tests = 0
crashed_tests = 0
skipped_tests = 0
counts = TestCounts() test_result =
parse_test_result(list(isolate_kunit_output(kernel_output))) if test_result.status == TestStatus.NO_TESTS: print(red('[ERROR] ') + yellow('no tests run!')) elif test_result.status == TestStatus.FAILURE_TO_PARSE_TESTS: print(red('[ERROR] ') + yellow('could not parse test results!')) else:
(total_tests,
failed_tests,
crashed_tests,
skipped_tests) = print_and_count_results(test_result)
counts = print_and_count_results(test_result) print_with_timestamp(DIVIDER) if test_result.status == TestStatus.SUCCESS: fmt = green
@@ -378,5 +381,5 @@ def parse_run_tests(kernel_output) -> TestResult: fmt =red print_with_timestamp( fmt('Testing complete. %d tests run. %d failed. %d crashed. %d skipped.' %
(total_tests, failed_tests, crashed_tests, skipped_tests)))
(counts.total(), counts.failed, counts.crashed,
counts.skipped))) return test_result
total_tests = 0 failed_tests = 0 crashed_tests = 0
skipped_tests = 0 for test_suite in test_result.suites: if test_suite.status == TestStatus.SUCCESS: print_suite_divider(green('[PASSED] ') + test_suite.name)
elif test_suite.status == TestStatus.SKIPPED:
print_suite_divider(yellow('[SKIPPED] ') + test_suite.name) elif test_suite.status == TestStatus.TEST_CRASHED: print_suite_divider(red('[CRASHED] ' + test_suite.name)) else:
@@ -326,6 +339,9 @@ def print_and_count_results(test_result: TestResult) -> Tuple[int, int, int]: total_tests += 1 if test_case.status == TestStatus.SUCCESS: print_with_timestamp(green('[PASSED] ') + test_case.name)
elif test_case.status == TestStatus.SKIPPED:
skipped_tests += 1
print_with_timestamp(yellow('[SKIPPED] ') + test_case.name) elif test_case.status == TestStatus.TEST_CRASHED: crashed_tests += 1 print_with_timestamp(red('[CRASHED] ' + test_case.name))
@@ -336,12 +352,13 @@ def print_and_count_results(test_result: TestResult) -> Tuple[int, int, int]: print_with_timestamp(red('[FAILED] ') + test_case.name) print_log(map(yellow, test_case.log)) print_with_timestamp('')
return total_tests, failed_tests, crashed_tests
return total_tests, failed_tests, crashed_tests, skipped_tests
def parse_run_tests(kernel_output) -> TestResult: total_tests = 0 failed_tests = 0 crashed_tests = 0
skipped_tests = 0 test_result = parse_test_result(list(isolate_kunit_output(kernel_output))) if test_result.status == TestStatus.NO_TESTS: print(red('[ERROR] ') + yellow('no tests run!'))
@@ -350,10 +367,16 @@ def parse_run_tests(kernel_output) -> TestResult: else: (total_tests, failed_tests,
crashed_tests) = print_and_count_results(test_result)
crashed_tests,
skipped_tests) = print_and_count_results(test_result) print_with_timestamp(DIVIDER)
fmt = green if test_result.status == TestStatus.SUCCESS else red
if test_result.status == TestStatus.SUCCESS:
fmt = green
elif test_result.status == TestStatus.SKIPPED:
fmt = yellow
else:
fmt =red print_with_timestamp(
fmt('Testing complete. %d tests run. %d failed. %d crashed.' %
(total_tests, failed_tests, crashed_tests)))
fmt('Testing complete. %d tests run. %d failed. %d crashed. %d skipped.' %
(total_tests, failed_tests, crashed_tests, skipped_tests))) return test_result
diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py index 2e809dd956a7..a51e70cafcc1 100755 --- a/tools/testing/kunit/kunit_tool_test.py +++ b/tools/testing/kunit/kunit_tool_test.py @@ -183,6 +183,28 @@ class KUnitParserTest(unittest.TestCase): kunit_parser.TestStatus.TEST_CRASHED, result.status)
def test_skipped_test(self):
skipped_log = test_data_path('test_skip_tests.log')
file = open(skipped_log)
result = kunit_parser.parse_run_tests(file.readlines())
# A skipped test does not fail the whole suite.
self.assertEqual(
kunit_parser.TestStatus.SUCCESS,
result.status)
file.close()
def test_skipped_all_tests(self):
skipped_log = test_data_path('test_skip_all_tests.log')
file = open(skipped_log)
result = kunit_parser.parse_run_tests(file.readlines())
self.assertEqual(
kunit_parser.TestStatus.SKIPPED,
result.status)
file.close()
def test_ignores_prefix_printk_time(self): prefix_log = test_data_path('test_config_printk_time.log') with open(prefix_log) as file:
-- 2.31.1.818.g46aad6cb9e-goog
-- You received this message because you are subscribed to the Google Groups "KUnit Development" group. To unsubscribe from this group and stop receiving emails from it, send an email to kunit-dev+unsubscribe@googlegroups.com. To view this discussion on the web visit https://groups.google.com/d/msgid/kunit-dev/20210526081112.3652290-2-davidgo....
Add a new KUnit test suite which contains tests which are always skipped. This is used as an example for how to write tests which are skipped, and to demonstrate the difference between kunit_skip() and kunit_mark_skipped().
Because these tests do not pass (they're skipped), they are not enabled by default, or by the KUNIT_ALL_TESTS config option: they must be enabled explicitly by setting CONFIG_KUNIT_EXAMPLE_SKIP_TEST=y in either a .config or .kunitconfig file.
Signed-off-by: David Gow davidgow@google.com --- lib/kunit/Kconfig | 15 +++++++++ lib/kunit/Makefile | 2 ++ lib/kunit/kunit-example-skip-test.c | 52 +++++++++++++++++++++++++++++ 3 files changed, 69 insertions(+) create mode 100644 lib/kunit/kunit-example-skip-test.c
diff --git a/lib/kunit/Kconfig b/lib/kunit/Kconfig index 0b5dfb001bac..399fe5f789f7 100644 --- a/lib/kunit/Kconfig +++ b/lib/kunit/Kconfig @@ -45,6 +45,21 @@ config KUNIT_EXAMPLE_TEST is intended for curious hackers who would like to understand how to use KUnit for kernel development.
+config KUNIT_EXAMPLE_SKIP_TEST + tristate "Skipped test example for KUnit" + default n + help + Enables an example unit test that is always skipped. + + This test only exists to help new users understand what KUnit is and + how it is used. Please refer to the example test itself, + lib/kunit/example-test.c, for more information. This option is + intended for curious hackers who would like to understand how to use + KUnit for kernel development. + + Because this test does not pass, it is not enabled by + CONFIG_KUNIT_ALL_TESTS + config KUNIT_ALL_TESTS tristate "All KUnit tests with satisfied dependencies" help diff --git a/lib/kunit/Makefile b/lib/kunit/Makefile index c49f4ffb6273..8a99ff2f83bd 100644 --- a/lib/kunit/Makefile +++ b/lib/kunit/Makefile @@ -18,3 +18,5 @@ obj-$(CONFIG_KUNIT_TEST) += string-stream-test.o endif
obj-$(CONFIG_KUNIT_EXAMPLE_TEST) += kunit-example-test.o + +obj-$(CONFIG_KUNIT_EXAMPLE_SKIP_TEST) += kunit-example-skip-test.o diff --git a/lib/kunit/kunit-example-skip-test.c b/lib/kunit/kunit-example-skip-test.c new file mode 100644 index 000000000000..5395ee0be485 --- /dev/null +++ b/lib/kunit/kunit-example-skip-test.c @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Example KUnit test which is always skipped. + * + * Copyright (C) 2021, Google LLC. + * Author: David Gow davidgow@google.com + */ + +#include <kunit/test.h> + +/* + * This test should always be skipped. + */ + +static void example_skip_test(struct kunit *test) +{ + /* This line should run */ + kunit_log(KERN_INFO, test, "You should not see a line below."); + + /* Skip (and abort) the test */ + kunit_skip(test, "this test should be skipped"); + + /* This line should not execute */ + kunit_log(KERN_INFO, test, "You should not see this line."); +} + +static void example_mark_skipped_test(struct kunit *test) +{ + /* This line should run */ + kunit_log(KERN_INFO, test, "You should see a line below."); + + /* Skip (but do not abort) the test */ + kunit_mark_skipped(test, "this test should be skipped"); + + /* This line should run */ + kunit_log(KERN_INFO, test, "You should see this line."); +} + +static struct kunit_case example_skip_test_cases[] = { + KUNIT_CASE(example_skip_test), + KUNIT_CASE(example_mark_skipped_test), + {} +}; + +static struct kunit_suite example_skip_test_suite = { + .name = "example_skip", + .test_cases = example_skip_test_cases, +}; + +kunit_test_suites(&example_skip_test_suite); + +MODULE_LICENSE("GPL v2");
On Wed, May 26, 2021 at 01:11AM -0700, David Gow wrote:
Add a new KUnit test suite which contains tests which are always skipped. This is used as an example for how to write tests which are skipped, and to demonstrate the difference between kunit_skip() and kunit_mark_skipped().
Because these tests do not pass (they're skipped), they are not enabled by default, or by the KUNIT_ALL_TESTS config option: they must be enabled explicitly by setting CONFIG_KUNIT_EXAMPLE_SKIP_TEST=y in either a .config or .kunitconfig file.
Signed-off-by: David Gow davidgow@google.com
lib/kunit/Kconfig | 15 +++++++++ lib/kunit/Makefile | 2 ++ lib/kunit/kunit-example-skip-test.c | 52 +++++++++++++++++++++++++++++ 3 files changed, 69 insertions(+) create mode 100644 lib/kunit/kunit-example-skip-test.c
I don't know if this test is useful for a user of KUnit. Given it's not testing KUnit functionality (I see you added tests that the feature works in patch 1/3), but rather a demonstration and therefore dead code. I don't think the feature is difficult to understand from the API doc text.
Instead, would it be more helpful to add something to Documentation/dev-tools/kunit? Or perhaps just add something to lib/kunit/kunit-example-test.c? It'd avoid introducing more Kconfig options at least.
Thanks, -- Marco
On Wed, May 26, 2021 at 1:56 AM 'Marco Elver' via KUnit Development kunit-dev@googlegroups.com wrote:
On Wed, May 26, 2021 at 01:11AM -0700, David Gow wrote:
Add a new KUnit test suite which contains tests which are always skipped. This is used as an example for how to write tests which are skipped, and to demonstrate the difference between kunit_skip() and kunit_mark_skipped().
Because these tests do not pass (they're skipped), they are not enabled by default, or by the KUNIT_ALL_TESTS config option: they must be enabled explicitly by setting CONFIG_KUNIT_EXAMPLE_SKIP_TEST=y in either a .config or .kunitconfig file.
Signed-off-by: David Gow davidgow@google.com
lib/kunit/Kconfig | 15 +++++++++ lib/kunit/Makefile | 2 ++ lib/kunit/kunit-example-skip-test.c | 52 +++++++++++++++++++++++++++++ 3 files changed, 69 insertions(+) create mode 100644 lib/kunit/kunit-example-skip-test.c
I don't know if this test is useful for a user of KUnit. Given it's not testing KUnit functionality (I see you added tests that the feature works in patch 1/3), but rather a demonstration and therefore dead code. I don't think the feature is difficult to understand from the API doc text.
Instead, would it be more helpful to add something to Documentation/dev-tools/kunit? Or perhaps just add something to lib/kunit/kunit-example-test.c? It'd avoid introducing more Kconfig
I'm in favor of putting it in kunit-example-test.c as well.
But I hear there was pushback to have a non-passing test in the example? I guess the fear is that someone will see something that doesn't say "passed" in the example output and think something has gone wrong?
Hence this more conservative change. But I hope that in the absence of any replies in opposition, we can just keep one example-test.c
options at least.
Thanks, -- Marco
-- You received this message because you are subscribed to the Google Groups "KUnit Development" group. To unsubscribe from this group and stop receiving emails from it, send an email to kunit-dev+unsubscribe@googlegroups.com. To view this discussion on the web visit https://groups.google.com/d/msgid/kunit-dev/YK4NRlyrYJ8ktsWQ%40elver.google.....
On Wed, May 26, 2021 at 11:29AM -0700, Daniel Latypov wrote:
On Wed, May 26, 2021 at 1:56 AM 'Marco Elver' via KUnit Development kunit-dev@googlegroups.com wrote:
On Wed, May 26, 2021 at 01:11AM -0700, David Gow wrote:
Add a new KUnit test suite which contains tests which are always skipped. This is used as an example for how to write tests which are skipped, and to demonstrate the difference between kunit_skip() and kunit_mark_skipped().
Because these tests do not pass (they're skipped), they are not enabled by default, or by the KUNIT_ALL_TESTS config option: they must be enabled explicitly by setting CONFIG_KUNIT_EXAMPLE_SKIP_TEST=y in either a .config or .kunitconfig file.
Signed-off-by: David Gow davidgow@google.com
lib/kunit/Kconfig | 15 +++++++++ lib/kunit/Makefile | 2 ++ lib/kunit/kunit-example-skip-test.c | 52 +++++++++++++++++++++++++++++ 3 files changed, 69 insertions(+) create mode 100644 lib/kunit/kunit-example-skip-test.c
I don't know if this test is useful for a user of KUnit. Given it's not testing KUnit functionality (I see you added tests that the feature works in patch 1/3), but rather a demonstration and therefore dead code. I don't think the feature is difficult to understand from the API doc text.
Instead, would it be more helpful to add something to Documentation/dev-tools/kunit? Or perhaps just add something to lib/kunit/kunit-example-test.c? It'd avoid introducing more Kconfig
I'm in favor of putting it in kunit-example-test.c as well.
But I hear there was pushback to have a non-passing test in the example? I guess the fear is that someone will see something that doesn't say "passed" in the example output and think something has gone wrong?
Hence this more conservative change. But I hope that in the absence of any replies in opposition, we can just keep one example-test.c
Maybe I misunderstood, but kunit_skip*() isn't supposed to change the test ok/fail state, right?
That's the behaviour I'd expect at least.
So if the test case deliberately doesn't change the state, but just skips, it should be fine in example-test.c.
Thanks, -- Marco
On Thu, May 27, 2021 at 2:29 AM Daniel Latypov dlatypov@google.com wrote:
On Wed, May 26, 2021 at 1:56 AM 'Marco Elver' via KUnit Development kunit-dev@googlegroups.com wrote:
On Wed, May 26, 2021 at 01:11AM -0700, David Gow wrote:
Add a new KUnit test suite which contains tests which are always skipped. This is used as an example for how to write tests which are skipped, and to demonstrate the difference between kunit_skip() and kunit_mark_skipped().
Because these tests do not pass (they're skipped), they are not enabled by default, or by the KUNIT_ALL_TESTS config option: they must be enabled explicitly by setting CONFIG_KUNIT_EXAMPLE_SKIP_TEST=y in either a .config or .kunitconfig file.
Signed-off-by: David Gow davidgow@google.com
lib/kunit/Kconfig | 15 +++++++++ lib/kunit/Makefile | 2 ++ lib/kunit/kunit-example-skip-test.c | 52 +++++++++++++++++++++++++++++ 3 files changed, 69 insertions(+) create mode 100644 lib/kunit/kunit-example-skip-test.c
I don't know if this test is useful for a user of KUnit. Given it's not testing KUnit functionality (I see you added tests that the feature works in patch 1/3), but rather a demonstration and therefore dead code. I don't think the feature is difficult to understand from the API doc text.
Instead, would it be more helpful to add something to Documentation/dev-tools/kunit? Or perhaps just add something to lib/kunit/kunit-example-test.c? It'd avoid introducing more Kconfig
I'm in favor of putting it in kunit-example-test.c as well.
But I hear there was pushback to have a non-passing test in the example? I guess the fear is that someone will see something that doesn't say "passed" in the example output and think something has gone wrong?
Yeah, (a simpler version of) this was in kunit-example-test.c before. Brendan brought up the question of if a test which skips all the time is useful in [1], but that was more in the context of this being a test of the functionality than an example. The other part of this did grow out of the discussion for whether skipped tests should be treated as 'ok' or 'not ok' in the KTAP spec (somewhere in [2], probably), and whether or not a test being skipped was indicative of something going wrong.
Ultimately, I think there's some value in having an example test that's skipped, and if people are okay with it being in the example suite (and therefore enabled by default), I'm okay with the default KUnit run having a splotch of yellow amongst the green in the kunit_tool output.
Hence this more conservative change. But I hope that in the absence of any replies in opposition, we can just keep one example-test.c
Agreed, I'll put this back in the existing example suite for v2: if there's any great opposition, I can always move it back.
options at least.
Thanks, -- Marco
Cheers, -- David
[1]: https://lore.kernel.org/linux-kselftest/CAFd5g47auKoQPhCeMHSTMtE_9+fZ6eOHZko... [2]: https://lore.kernel.org/linux-kselftest/CY4PR13MB1175B804E31E502221BC8163FD8...
On Wed, May 26, 2021 at 1:11 AM 'David Gow' via KUnit Development kunit-dev@googlegroups.com wrote:
Add a new KUnit test suite which contains tests which are always skipped. This is used as an example for how to write tests which are skipped, and to demonstrate the difference between kunit_skip() and kunit_mark_skipped().
Because these tests do not pass (they're skipped), they are not enabled by default, or by the KUNIT_ALL_TESTS config option: they must be enabled explicitly by setting CONFIG_KUNIT_EXAMPLE_SKIP_TEST=y in either a .config or .kunitconfig file.
Signed-off-by: David Gow davidgow@google.com
Reviewed-by: Daniel Latypov dlatypov@google.com
LGTM, two minor nits, and as noted elsewhere, I'd like it if we could have this be in the normal example test. But even if that doesn't happen, this seems fine to me.
lib/kunit/Kconfig | 15 +++++++++ lib/kunit/Makefile | 2 ++ lib/kunit/kunit-example-skip-test.c | 52 +++++++++++++++++++++++++++++ 3 files changed, 69 insertions(+) create mode 100644 lib/kunit/kunit-example-skip-test.c
diff --git a/lib/kunit/Kconfig b/lib/kunit/Kconfig index 0b5dfb001bac..399fe5f789f7 100644 --- a/lib/kunit/Kconfig +++ b/lib/kunit/Kconfig @@ -45,6 +45,21 @@ config KUNIT_EXAMPLE_TEST is intended for curious hackers who would like to understand how to use KUnit for kernel development.
+config KUNIT_EXAMPLE_SKIP_TEST
tristate "Skipped test example for KUnit"
default n
help
Enables an example unit test that is always skipped.
This test only exists to help new users understand what KUnit is and
how it is used. Please refer to the example test itself,
lib/kunit/example-test.c, for more information. This option is
intended for curious hackers who would like to understand how to use
KUnit for kernel development.
Because this test does not pass, it is not enabled by
CONFIG_KUNIT_ALL_TESTS
config KUNIT_ALL_TESTS tristate "All KUnit tests with satisfied dependencies" help diff --git a/lib/kunit/Makefile b/lib/kunit/Makefile index c49f4ffb6273..8a99ff2f83bd 100644 --- a/lib/kunit/Makefile +++ b/lib/kunit/Makefile @@ -18,3 +18,5 @@ obj-$(CONFIG_KUNIT_TEST) += string-stream-test.o endif
obj-$(CONFIG_KUNIT_EXAMPLE_TEST) += kunit-example-test.o
+obj-$(CONFIG_KUNIT_EXAMPLE_SKIP_TEST) += kunit-example-skip-test.o diff --git a/lib/kunit/kunit-example-skip-test.c b/lib/kunit/kunit-example-skip-test.c new file mode 100644 index 000000000000..5395ee0be485 --- /dev/null +++ b/lib/kunit/kunit-example-skip-test.c @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: GPL-2.0 +/*
- Example KUnit test which is always skipped.
- Copyright (C) 2021, Google LLC.
- Author: David Gow davidgow@google.com
- */
+#include <kunit/test.h>
+/*
- This test should always be skipped.
- */
+static void example_skip_test(struct kunit *test) +{
/* This line should run */
kunit_log(KERN_INFO, test, "You should not see a line below.");
Btw, why not "kunit_info(test, ...)" ?
/* Skip (and abort) the test */
kunit_skip(test, "this test should be skipped");
/* This line should not execute */
kunit_log(KERN_INFO, test, "You should not see this line.");
Would it be more or less confusing to have
KUNIT_FAIL(test, "We should not get to this line")
maybe in addition or instead of this log?
+}
+static void example_mark_skipped_test(struct kunit *test) +{
/* This line should run */
kunit_log(KERN_INFO, test, "You should see a line below.");
/* Skip (but do not abort) the test */
kunit_mark_skipped(test, "this test should be skipped");
/* This line should run */
kunit_log(KERN_INFO, test, "You should see this line.");
+}
+static struct kunit_case example_skip_test_cases[] = {
KUNIT_CASE(example_skip_test),
KUNIT_CASE(example_mark_skipped_test),
{}
+};
+static struct kunit_suite example_skip_test_suite = {
.name = "example_skip",
.test_cases = example_skip_test_cases,
+};
+kunit_test_suites(&example_skip_test_suite);
+MODULE_LICENSE("GPL v2");
2.31.1.818.g46aad6cb9e-goog
-- You received this message because you are subscribed to the Google Groups "KUnit Development" group. To unsubscribe from this group and stop receiving emails from it, send an email to kunit-dev+unsubscribe@googlegroups.com. To view this discussion on the web visit https://groups.google.com/d/msgid/kunit-dev/20210526081112.3652290-3-davidgo....
On Wed, May 26, 2021 at 01:11AM -0700, David Gow wrote:
The kunit_mark_skipped() macro marks the current test as "skipped", with the provided reason. The kunit_skip() macro will mark the test as skipped, and abort the test.
The TAP specification supports this "SKIP directive" as a comment after the "ok" / "not ok" for a test. See the "Directives" section of the TAP spec for details: https://testanything.org/tap-specification.html#directives
The 'success' field for KUnit tests is replaced with a kunit_status enum, which can be SUCCESS, FAILURE, or SKIPPED, combined with a 'status_comment' containing information on why a test was skipped.
A new 'kunit_status' test suite is added to test this.
Signed-off-by: David Gow davidgow@google.com
[...]
include/kunit/test.h | 68 ++++++++++++++++++++++++++++++++++++++---- lib/kunit/kunit-test.c | 42 +++++++++++++++++++++++++- lib/kunit/test.c | 51 ++++++++++++++++++------------- 3 files changed, 134 insertions(+), 27 deletions(-)
Very nice, thank you.
Tested-by: Marco Elver elver@google.com
, with the below changes to test_kasan.c. If you would like an immediate user of kunit_skip(), please feel free to add the below patch to your series.
Thanks, -- Marco
------ >8 ------
From: Marco Elver elver@google.com Date: Wed, 26 May 2021 10:43:12 +0200 Subject: [PATCH] kasan: test: make use of kunit_skip()
Make use of the recently added kunit_skip() to skip tests, as it permits TAP parsers to recognize if a test was deliberately skipped.
Signed-off-by: Marco Elver elver@google.com --- lib/test_kasan.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-)
diff --git a/lib/test_kasan.c b/lib/test_kasan.c index cacbbbdef768..0a2029d14c91 100644 --- a/lib/test_kasan.c +++ b/lib/test_kasan.c @@ -111,17 +111,13 @@ static void kasan_test_exit(struct kunit *test) } while (0)
#define KASAN_TEST_NEEDS_CONFIG_ON(test, config) do { \ - if (!IS_ENABLED(config)) { \ - kunit_info((test), "skipping, " #config " required"); \ - return; \ - } \ + if (!IS_ENABLED(config)) \ + kunit_skip((test), "Test requires " #config "=y"); \ } while (0)
#define KASAN_TEST_NEEDS_CONFIG_OFF(test, config) do { \ - if (IS_ENABLED(config)) { \ - kunit_info((test), "skipping, " #config " enabled"); \ - return; \ - } \ + if (IS_ENABLED(config)) \ + kunit_skip((test), "Test requires " #config "=n"); \ } while (0)
static void kmalloc_oob_right(struct kunit *test)
On Wed, May 26, 2021 at 5:03 PM Marco Elver elver@google.com wrote:
On Wed, May 26, 2021 at 01:11AM -0700, David Gow wrote:
The kunit_mark_skipped() macro marks the current test as "skipped", with the provided reason. The kunit_skip() macro will mark the test as skipped, and abort the test.
The TAP specification supports this "SKIP directive" as a comment after the "ok" / "not ok" for a test. See the "Directives" section of the TAP spec for details: https://testanything.org/tap-specification.html#directives
The 'success' field for KUnit tests is replaced with a kunit_status enum, which can be SUCCESS, FAILURE, or SKIPPED, combined with a 'status_comment' containing information on why a test was skipped.
A new 'kunit_status' test suite is added to test this.
Signed-off-by: David Gow davidgow@google.com
[...]
include/kunit/test.h | 68 ++++++++++++++++++++++++++++++++++++++---- lib/kunit/kunit-test.c | 42 +++++++++++++++++++++++++- lib/kunit/test.c | 51 ++++++++++++++++++------------- 3 files changed, 134 insertions(+), 27 deletions(-)
Very nice, thank you.
Tested-by: Marco Elver <elver@google.com>
, with the below changes to test_kasan.c. If you would like an immediate user of kunit_skip(), please feel free to add the below patch to your series.
Thanks, -- Marco
Thanks! I'll add this to the next version.
Cheers, -- David
------ >8 ------
From: Marco Elver elver@google.com Date: Wed, 26 May 2021 10:43:12 +0200 Subject: [PATCH] kasan: test: make use of kunit_skip()
Make use of the recently added kunit_skip() to skip tests, as it permits TAP parsers to recognize if a test was deliberately skipped.
Signed-off-by: Marco Elver elver@google.com
lib/test_kasan.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-)
diff --git a/lib/test_kasan.c b/lib/test_kasan.c index cacbbbdef768..0a2029d14c91 100644 --- a/lib/test_kasan.c +++ b/lib/test_kasan.c @@ -111,17 +111,13 @@ static void kasan_test_exit(struct kunit *test) } while (0)
#define KASAN_TEST_NEEDS_CONFIG_ON(test, config) do { \
if (!IS_ENABLED(config)) { \
kunit_info((test), "skipping, " #config " required"); \
return; \
} \
if (!IS_ENABLED(config)) \
kunit_skip((test), "Test requires " #config "=y"); \
} while (0)
#define KASAN_TEST_NEEDS_CONFIG_OFF(test, config) do { \
if (IS_ENABLED(config)) { \
kunit_info((test), "skipping, " #config " enabled"); \
return; \
} \
if (IS_ENABLED(config)) \
kunit_skip((test), "Test requires " #config "=n"); \
} while (0)
static void kmalloc_oob_right(struct kunit *test)
2.31.1.818.g46aad6cb9e-goog
Hi David,
I love your patch! Perhaps something to improve:
[auto build test WARNING on linus/master] [also build test WARNING on v5.13-rc3 next-20210526] [If your patch is applied to the wrong git tree, kindly drop us a note. And when submitting patch, we suggest to use '--base' as documented in https://git-scm.com/docs/git-format-patch]
url: https://github.com/0day-ci/linux/commits/David-Gow/kunit-Support-skipped-tes... base: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git ad9f25d338605d26acedcaf3ba5fab5ca26f1c10 config: x86_64-randconfig-r025-20210526 (attached as .config) compiler: clang version 13.0.0 (https://github.com/llvm/llvm-project 99155e913e9bad5f7f8a247f8bb3a3ff3da74af1) reproduce (this is a W=1 build): wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross chmod +x ~/bin/make.cross # install x86_64 cross compiling tool for clang build # apt-get install binutils-x86-64-linux-gnu # https://github.com/0day-ci/linux/commit/83c919857a4ca319ed69d6feaf3d5b5325db... git remote add linux-review https://github.com/0day-ci/linux git fetch --no-tags linux-review David-Gow/kunit-Support-skipped-tests/20210526-161324 git checkout 83c919857a4ca319ed69d6feaf3d5b5325dbdc29 # save the attached .config to linux build tree COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross ARCH=x86_64
If you fix the issue, kindly add following tag as appropriate Reported-by: kernel test robot lkp@intel.com
All warnings (new ones prefixed by >>):
lib/kunit/kunit-test.c:458:2: warning: comparison of distinct pointer types ('typeof (__left) *' (aka 'enum kunit_status *') and 'typeof (__right) *' (aka 'int *')) [-Wcompare-distinct-pointer-types]
KUNIT_EXPECT_EQ(test, fake.status, KUNIT_SUCCESS); ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ include/kunit/test.h:1320:2: note: expanded from macro 'KUNIT_EXPECT_EQ' KUNIT_BINARY_EQ_ASSERTION(test, KUNIT_EXPECTATION, left, right) ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ include/kunit/test.h:957:2: note: expanded from macro 'KUNIT_BINARY_EQ_ASSERTION' KUNIT_BINARY_EQ_MSG_ASSERTION(test, \ ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ include/kunit/test.h:947:2: note: expanded from macro 'KUNIT_BINARY_EQ_MSG_ASSERTION' KUNIT_BASE_EQ_MSG_ASSERTION(test, \ ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ include/kunit/test.h:858:2: note: expanded from macro 'KUNIT_BASE_EQ_MSG_ASSERTION' KUNIT_BASE_BINARY_ASSERTION(test, \ ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ include/kunit/test.h:834:9: note: expanded from macro 'KUNIT_BASE_BINARY_ASSERTION' ((void)__typecheck(__left, __right)); \ ^~~~~~~~~~~~~~~~~~~~~~~~~~~~ include/linux/minmax.h:20:28: note: expanded from macro '__typecheck' (!!(sizeof((typeof(x) *)1 == (typeof(y) *)1))) ~~~~~~~~~~~~~~ ^ ~~~~~~~~~~~~~~ lib/kunit/kunit-test.c:459:2: error: array initializer must be an initializer list or string literal KUNIT_EXPECT_STREQ(test, fake.status_comment, ""); ^ include/kunit/test.h:1502:2: note: expanded from macro 'KUNIT_EXPECT_STREQ' KUNIT_BINARY_STR_EQ_ASSERTION(test, KUNIT_EXPECTATION, left, right) ^ include/kunit/test.h:1218:2: note: expanded from macro 'KUNIT_BINARY_STR_EQ_ASSERTION' KUNIT_BINARY_STR_EQ_MSG_ASSERTION(test, \ ^ include/kunit/test.h:1211:2: note: expanded from macro 'KUNIT_BINARY_STR_EQ_MSG_ASSERTION' KUNIT_BINARY_STR_ASSERTION(test, \ ^ include/kunit/test.h:1188:15: note: expanded from macro 'KUNIT_BINARY_STR_ASSERTION' typeof(left) __left = (left); \ ^ lib/kunit/kunit-test.c:466:2: error: array initializer must be an initializer list or string literal KUNIT_EXPECT_STREQ(test, fake.status_comment, "Accepts format string: YES"); ^ include/kunit/test.h:1502:2: note: expanded from macro 'KUNIT_EXPECT_STREQ' KUNIT_BINARY_STR_EQ_ASSERTION(test, KUNIT_EXPECTATION, left, right) ^ include/kunit/test.h:1218:2: note: expanded from macro 'KUNIT_BINARY_STR_EQ_ASSERTION' KUNIT_BINARY_STR_EQ_MSG_ASSERTION(test, \ ^ include/kunit/test.h:1211:2: note: expanded from macro 'KUNIT_BINARY_STR_EQ_MSG_ASSERTION' KUNIT_BINARY_STR_ASSERTION(test, \ ^ include/kunit/test.h:1188:15: note: expanded from macro 'KUNIT_BINARY_STR_ASSERTION' typeof(left) __left = (left); \ ^ 1 warning and 2 errors generated.
vim +458 lib/kunit/kunit-test.c
450 451 static void kunit_status_mark_skipped_test(struct kunit *test) 452 { 453 struct kunit fake; 454 455 kunit_init_test(&fake, "fake test", NULL); 456 457 /* Before: Should be SUCCESS with no comment. */
458 KUNIT_EXPECT_EQ(test, fake.status, KUNIT_SUCCESS);
459 KUNIT_EXPECT_STREQ(test, fake.status_comment, ""); 460 461 /* Mark the test as skipped. */ 462 kunit_mark_skipped(&fake, "Accepts format string: %s", "YES"); 463 464 /* After: Should be SKIPPED with our comment. */ 465 KUNIT_EXPECT_EQ(test, fake.status, (enum kunit_status)KUNIT_SKIPPED); 466 KUNIT_EXPECT_STREQ(test, fake.status_comment, "Accepts format string: YES"); 467 } 468
--- 0-DAY CI Kernel Test Service, Intel Corporation https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
Hi David,
I love your patch! Yet something to improve:
[auto build test ERROR on linus/master] [also build test ERROR on v5.13-rc3 next-20210526] [If your patch is applied to the wrong git tree, kindly drop us a note. And when submitting patch, we suggest to use '--base' as documented in https://git-scm.com/docs/git-format-patch]
url: https://github.com/0day-ci/linux/commits/David-Gow/kunit-Support-skipped-tes... base: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git ad9f25d338605d26acedcaf3ba5fab5ca26f1c10 config: x86_64-randconfig-r025-20210526 (attached as .config) compiler: clang version 13.0.0 (https://github.com/llvm/llvm-project 99155e913e9bad5f7f8a247f8bb3a3ff3da74af1) reproduce (this is a W=1 build): wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross chmod +x ~/bin/make.cross # install x86_64 cross compiling tool for clang build # apt-get install binutils-x86-64-linux-gnu # https://github.com/0day-ci/linux/commit/83c919857a4ca319ed69d6feaf3d5b5325db... git remote add linux-review https://github.com/0day-ci/linux git fetch --no-tags linux-review David-Gow/kunit-Support-skipped-tests/20210526-161324 git checkout 83c919857a4ca319ed69d6feaf3d5b5325dbdc29 # save the attached .config to linux build tree COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross ARCH=x86_64
If you fix the issue, kindly add following tag as appropriate Reported-by: kernel test robot lkp@intel.com
All errors (new ones prefixed by >>):
lib/kunit/kunit-test.c:458:2: warning: comparison of distinct pointer types ('typeof (__left) *' (aka 'enum kunit_status *') and 'typeof (__right) *' (aka 'int *')) [-Wcompare-distinct-pointer-types] KUNIT_EXPECT_EQ(test, fake.status, KUNIT_SUCCESS); ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ include/kunit/test.h:1320:2: note: expanded from macro 'KUNIT_EXPECT_EQ' KUNIT_BINARY_EQ_ASSERTION(test, KUNIT_EXPECTATION, left, right) ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ include/kunit/test.h:957:2: note: expanded from macro 'KUNIT_BINARY_EQ_ASSERTION' KUNIT_BINARY_EQ_MSG_ASSERTION(test, \ ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ include/kunit/test.h:947:2: note: expanded from macro 'KUNIT_BINARY_EQ_MSG_ASSERTION' KUNIT_BASE_EQ_MSG_ASSERTION(test, \ ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ include/kunit/test.h:858:2: note: expanded from macro 'KUNIT_BASE_EQ_MSG_ASSERTION' KUNIT_BASE_BINARY_ASSERTION(test, \ ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ include/kunit/test.h:834:9: note: expanded from macro 'KUNIT_BASE_BINARY_ASSERTION' ((void)__typecheck(__left, __right)); \ ^~~~~~~~~~~~~~~~~~~~~~~~~~~~ include/linux/minmax.h:20:28: note: expanded from macro '__typecheck' (!!(sizeof((typeof(x) *)1 == (typeof(y) *)1))) ~~~~~~~~~~~~~~ ^ ~~~~~~~~~~~~~~
lib/kunit/kunit-test.c:459:2: error: array initializer must be an initializer list or string literal
KUNIT_EXPECT_STREQ(test, fake.status_comment, ""); ^ include/kunit/test.h:1502:2: note: expanded from macro 'KUNIT_EXPECT_STREQ' KUNIT_BINARY_STR_EQ_ASSERTION(test, KUNIT_EXPECTATION, left, right) ^ include/kunit/test.h:1218:2: note: expanded from macro 'KUNIT_BINARY_STR_EQ_ASSERTION' KUNIT_BINARY_STR_EQ_MSG_ASSERTION(test, \ ^ include/kunit/test.h:1211:2: note: expanded from macro 'KUNIT_BINARY_STR_EQ_MSG_ASSERTION' KUNIT_BINARY_STR_ASSERTION(test, \ ^ include/kunit/test.h:1188:15: note: expanded from macro 'KUNIT_BINARY_STR_ASSERTION' typeof(left) __left = (left); \ ^ lib/kunit/kunit-test.c:466:2: error: array initializer must be an initializer list or string literal KUNIT_EXPECT_STREQ(test, fake.status_comment, "Accepts format string: YES"); ^ include/kunit/test.h:1502:2: note: expanded from macro 'KUNIT_EXPECT_STREQ' KUNIT_BINARY_STR_EQ_ASSERTION(test, KUNIT_EXPECTATION, left, right) ^ include/kunit/test.h:1218:2: note: expanded from macro 'KUNIT_BINARY_STR_EQ_ASSERTION' KUNIT_BINARY_STR_EQ_MSG_ASSERTION(test, \ ^ include/kunit/test.h:1211:2: note: expanded from macro 'KUNIT_BINARY_STR_EQ_MSG_ASSERTION' KUNIT_BINARY_STR_ASSERTION(test, \ ^ include/kunit/test.h:1188:15: note: expanded from macro 'KUNIT_BINARY_STR_ASSERTION' typeof(left) __left = (left); \ ^ 1 warning and 2 errors generated.
vim +459 lib/kunit/kunit-test.c
450 451 static void kunit_status_mark_skipped_test(struct kunit *test) 452 { 453 struct kunit fake; 454 455 kunit_init_test(&fake, "fake test", NULL); 456 457 /* Before: Should be SUCCESS with no comment. */ 458 KUNIT_EXPECT_EQ(test, fake.status, KUNIT_SUCCESS);
459 KUNIT_EXPECT_STREQ(test, fake.status_comment, "");
460 461 /* Mark the test as skipped. */ 462 kunit_mark_skipped(&fake, "Accepts format string: %s", "YES"); 463 464 /* After: Should be SKIPPED with our comment. */ 465 KUNIT_EXPECT_EQ(test, fake.status, (enum kunit_status)KUNIT_SKIPPED); 466 KUNIT_EXPECT_STREQ(test, fake.status_comment, "Accepts format string: YES"); 467 } 468
--- 0-DAY CI Kernel Test Service, Intel Corporation https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
On Wed, May 26, 2021 at 1:11 AM 'David Gow' via KUnit Development kunit-dev@googlegroups.com wrote:
The kunit_mark_skipped() macro marks the current test as "skipped", with the provided reason. The kunit_skip() macro will mark the test as skipped, and abort the test.
The TAP specification supports this "SKIP directive" as a comment after the "ok" / "not ok" for a test. See the "Directives" section of the TAP spec for details: https://testanything.org/tap-specification.html#directives
The 'success' field for KUnit tests is replaced with a kunit_status enum, which can be SUCCESS, FAILURE, or SKIPPED, combined with a 'status_comment' containing information on why a test was skipped.
A new 'kunit_status' test suite is added to test this.
Signed-off-by: David Gow davidgow@google.com
Reviewed-by: Daniel Latypov dlatypov@google.com
This is pretty exciting to see. Some minor nits below.
This change depends on the assertion typechecking fix here: https://lore.kernel.org/linux-kselftest/20210513193204.816681-1-davidgow@goo... Only the first two patches in the series are required.
This is the long-awaited follow-up to the skip tests RFC: https://lore.kernel.org/linux-kselftest/20200513042956.109987-1-davidgow@goo...
There are quite a few changes since that version, principally:
- A kunit_status enum is now used, with SKIPPED a distinct state
- The kunit_mark_skipped() and kunit_skip() macros now take printf-style format strings.
- There is now a kunit_status test suite providing basic tests of this functionality.
- The kunit_tool changes have been split into a separate commit.
- The example skipped tests have been expanded an moved to their own suite, which is not enabled by KUNIT_ALL_TESTS.
- A number of other fixes and changes here and there.
Cheers, -- David
include/kunit/test.h | 68 ++++++++++++++++++++++++++++++++++++++---- lib/kunit/kunit-test.c | 42 +++++++++++++++++++++++++- lib/kunit/test.c | 51 ++++++++++++++++++------------- 3 files changed, 134 insertions(+), 27 deletions(-)
diff --git a/include/kunit/test.h b/include/kunit/test.h index b68c61348121..40b536da027e 100644 --- a/include/kunit/test.h +++ b/include/kunit/test.h @@ -105,6 +105,18 @@ struct kunit; #define KUNIT_SUBTEST_INDENT " " #define KUNIT_SUBSUBTEST_INDENT " "
+/**
- enum kunit_status - Type of result for a test or test suite
- @KUNIT_SUCCESS: Denotes the test suite has not failed nor been skipped
- @KUNIT_FAILURE: Denotes the test has failed.
- @KUNIT_SKIPPED: Denotes the test has been skipped.
- */
+enum kunit_status {
KUNIT_SUCCESS,
KUNIT_FAILURE,
KUNIT_SKIPPED,
+};
/**
- struct kunit_case - represents an individual test case.
@@ -148,13 +160,20 @@ struct kunit_case { const void* (*generate_params)(const void *prev, char *desc);
/* private: internal use only. */
bool success;
enum kunit_status status; char *log;
};
-static inline char *kunit_status_to_string(bool status) +static inline char *kunit_status_to_string(enum kunit_status status)
nit: I personally would think this maps SKIPPED => "SKIPPED", etc. (If I didn't know all that logic lived in kunit tool).
I don't have any replacement names to suggest that I'm fully happy with, however. kunit_status_to_tap_str(), kunit_status_to_ok_notok(), eh.
{
return status ? "ok" : "not ok";
switch (status) {
case KUNIT_SKIPPED:
case KUNIT_SUCCESS:
return "ok";
case KUNIT_FAILURE:
return "not ok";
}
return "invalid";
}
/** @@ -212,6 +231,7 @@ struct kunit_suite { struct kunit_case *test_cases;
/* private: internal use only */
char status_comment[256]; struct dentry *debugfs; char *log;
}; @@ -245,19 +265,21 @@ struct kunit { * be read after the test case finishes once all threads associated * with the test case have terminated. */
bool success; /* Read only after test_case finishes! */ spinlock_t lock; /* Guards all mutable test state. */
enum kunit_status status; /* Read only after test_case finishes! */ /* * Because resources is a list that may be updated multiple times (with * new resources) from any thread associated with a test case, we must * protect it with some type of lock. */ struct list_head resources; /* Protected by lock. */
char status_comment[256];
};
static inline void kunit_set_failure(struct kunit *test) {
WRITE_ONCE(test->success, false);
WRITE_ONCE(test->status, KUNIT_FAILURE);
}
void kunit_init_test(struct kunit *test, const char *name, char *log); @@ -348,7 +370,7 @@ static inline int kunit_run_all_tests(void) #define kunit_suite_for_each_test_case(suite, test_case) \ for (test_case = suite->test_cases; test_case->run_case; test_case++)
-bool kunit_suite_has_succeeded(struct kunit_suite *suite); +enum kunit_status kunit_suite_has_succeeded(struct kunit_suite *suite);
/*
- Like kunit_alloc_resource() below, but returns the struct kunit_resource
@@ -612,6 +634,40 @@ void kunit_cleanup(struct kunit *test);
void kunit_log_append(char *log, const char *fmt, ...);
+/**
- kunit_mark_skipped() - Marks @test_or_suite as skipped
- @test_or_suite: The test context object.
- @fmt: A printk() style format string.
- Marks the test as skipped. @fmt is given output as the test status
- comment, typically the reason the test was skipped.
- Test execution continues after kunit_mark_skipped() is called.
- */
+#define kunit_mark_skipped(test_or_suite, fmt, ...) \
do { \
WRITE_ONCE((test_or_suite)->status, KUNIT_SKIPPED); \
scnprintf((test_or_suite)->status_comment, 256, fmt, ##__VA_ARGS__); \
} while (0)
+/**
- kunit_skip() - Marks @test_or_suite as skipped
- @test_or_suite: The test context object.
- @fmt: A printk() style format string.
- Skips the test. @fmt is given output as the test status
- comment, typically the reason the test was skipped.
- Test execution is halted after kunit_skip() is called.
- */
+#define kunit_skip(test_or_suite, fmt, ...) \
do { \
kunit_mark_skipped((test_or_suite), fmt, ##__VA_ARGS__);\
kunit_try_catch_throw(&((test_or_suite)->try_catch)); \
} while (0)
/*
- printk and log to per-test or per-suite log buffer. Logging only done
- if CONFIG_KUNIT_DEBUGFS is 'y'; if it is 'n', no log is allocated/used.
diff --git a/lib/kunit/kunit-test.c b/lib/kunit/kunit-test.c index 69f902440a0e..d69efcbed624 100644 --- a/lib/kunit/kunit-test.c +++ b/lib/kunit/kunit-test.c @@ -437,7 +437,47 @@ static void kunit_log_test(struct kunit *test) #endif }
+static void kunit_status_set_failure_test(struct kunit *test) +{
struct kunit fake;
kunit_init_test(&fake, "fake test", NULL);
KUNIT_EXPECT_EQ(test, fake.status, (enum kunit_status)KUNIT_SUCCESS);
kunit_set_failure(&fake);
KUNIT_EXPECT_EQ(test, fake.status, (enum kunit_status)KUNIT_FAILURE);
+}
+static void kunit_status_mark_skipped_test(struct kunit *test) +{
struct kunit fake;
kunit_init_test(&fake, "fake test", NULL);
/* Before: Should be SUCCESS with no comment. */
KUNIT_EXPECT_EQ(test, fake.status, KUNIT_SUCCESS);
KUNIT_EXPECT_STREQ(test, fake.status_comment, "");
/* Mark the test as skipped. */
kunit_mark_skipped(&fake, "Accepts format string: %s", "YES");
/* After: Should be SKIPPED with our comment. */
KUNIT_EXPECT_EQ(test, fake.status, (enum kunit_status)KUNIT_SKIPPED);
KUNIT_EXPECT_STREQ(test, fake.status_comment, "Accepts format string: YES");
+}
+static struct kunit_case kunit_status_test_cases[] = {
KUNIT_CASE(kunit_status_set_failure_test),
KUNIT_CASE(kunit_status_mark_skipped_test),
{}
+};
+static struct kunit_suite kunit_status_test_suite = {
.name = "kunit_status",
.test_cases = kunit_status_test_cases,
+};
kunit_test_suites(&kunit_try_catch_test_suite, &kunit_resource_test_suite,
&kunit_log_test_suite);
&kunit_log_test_suite, &kunit_status_test_suite);
MODULE_LICENSE("GPL v2"); diff --git a/lib/kunit/test.c b/lib/kunit/test.c index 2f6cc0123232..0ee07705d2b0 100644 --- a/lib/kunit/test.c +++ b/lib/kunit/test.c @@ -98,12 +98,14 @@ static void kunit_print_subtest_start(struct kunit_suite *suite)
static void kunit_print_ok_not_ok(void *test_or_suite, bool is_test,
bool is_ok,
enum kunit_status status, size_t test_number,
const char *description)
const char *description,
const char *directive)
{ struct kunit_suite *suite = is_test ? NULL : test_or_suite; struct kunit *test = is_test ? test_or_suite : NULL;
const char *directive_header = (status == KUNIT_SKIPPED) ? " # SKIP " : ""; /* * We do not log the test suite results as doing so would
@@ -114,25 +116,31 @@ static void kunit_print_ok_not_ok(void *test_or_suite, * representation. */ if (suite)
pr_info("%s %zd - %s\n",
kunit_status_to_string(is_ok),
test_number, description);
pr_info("%s %zd - %s%s%s\n",
kunit_status_to_string(status),
test_number, description,
directive_header, directive ? directive : ""); else
kunit_log(KERN_INFO, test, KUNIT_SUBTEST_INDENT "%s %zd - %s",
Hmm, why not kunit_info(test, ...)?
kunit_status_to_string(is_ok),
test_number, description);
kunit_log(KERN_INFO, test,
KUNIT_SUBTEST_INDENT "%s %zd - %s%s%s",
kunit_status_to_string(status),
test_number, description,
directive_header, directive ? directive : "");
}
-bool kunit_suite_has_succeeded(struct kunit_suite *suite) +enum kunit_status kunit_suite_has_succeeded(struct kunit_suite *suite) { const struct kunit_case *test_case;
enum kunit_status status = KUNIT_SKIPPED; kunit_suite_for_each_test_case(suite, test_case) {
if (!test_case->success)
return false;
if (test_case->status == KUNIT_FAILURE)
return KUNIT_FAILURE;
else if (test_case->status == KUNIT_SUCCESS)
status = KUNIT_SUCCESS; }
return true;
return status;
} EXPORT_SYMBOL_GPL(kunit_suite_has_succeeded);
@@ -143,7 +151,8 @@ static void kunit_print_subtest_end(struct kunit_suite *suite) kunit_print_ok_not_ok((void *)suite, false, kunit_suite_has_succeeded(suite), kunit_suite_counter++,
suite->name);
suite->name,
suite->status_comment);
}
unsigned int kunit_test_case_num(struct kunit_suite *suite, @@ -252,7 +261,8 @@ void kunit_init_test(struct kunit *test, const char *name, char *log) test->log = log; if (test->log) test->log[0] = '\0';
test->success = true;
test->status = KUNIT_SUCCESS;
test->status_comment[0] = '\0';
} EXPORT_SYMBOL_GPL(kunit_init_test);
@@ -376,7 +386,8 @@ static void kunit_run_case_catch_errors(struct kunit_suite *suite, context.test_case = test_case; kunit_try_catch_run(try_catch, &context);
test_case->success = test->success;
test_case->status = test->status;
}
int kunit_run_tests(struct kunit_suite *suite) @@ -388,7 +399,6 @@ int kunit_run_tests(struct kunit_suite *suite)
kunit_suite_for_each_test_case(suite, test_case) { struct kunit test = { .param_value = NULL, .param_index = 0 };
bool test_success = true; if (test_case->generate_params) { /* Get initial param. */
@@ -398,7 +408,6 @@ int kunit_run_tests(struct kunit_suite *suite)
do { kunit_run_case_catch_errors(suite, test_case, &test);
test_success &= test_case->success; if (test_case->generate_params) { if (param_desc[0] == '\0') {
@@ -410,7 +419,7 @@ int kunit_run_tests(struct kunit_suite *suite) KUNIT_SUBTEST_INDENT "# %s: %s %d - %s", test_case->name,
kunit_status_to_string(test.success),
kunit_status_to_string(test.status), test.param_index + 1, param_desc); /* Get next param. */
@@ -420,9 +429,10 @@ int kunit_run_tests(struct kunit_suite *suite) } } while (test.param_value);
kunit_print_ok_not_ok(&test, true, test_success,
kunit_print_ok_not_ok(&test, true, test_case->status, kunit_test_case_num(suite, test_case),
test_case->name);
test_case->name,
test.status_comment); } kunit_print_subtest_end(suite);
@@ -434,6 +444,7 @@ EXPORT_SYMBOL_GPL(kunit_run_tests); static void kunit_init_suite(struct kunit_suite *suite) { kunit_debugfs_create_suite(suite);
suite->status_comment[0] = '\0';
}
int __kunit_test_suites_init(struct kunit_suite * const * const suites)
2.31.1.818.g46aad6cb9e-goog
-- You received this message because you are subscribed to the Google Groups "KUnit Development" group. To unsubscribe from this group and stop receiving emails from it, send an email to kunit-dev+unsubscribe@googlegroups.com. To view this discussion on the web visit https://groups.google.com/d/msgid/kunit-dev/20210526081112.3652290-1-davidgo....
On Thu, May 27, 2021 at 4:49 AM Daniel Latypov dlatypov@google.com wrote:
On Wed, May 26, 2021 at 1:11 AM 'David Gow' via KUnit Development kunit-dev@googlegroups.com wrote:
The kunit_mark_skipped() macro marks the current test as "skipped", with the provided reason. The kunit_skip() macro will mark the test as skipped, and abort the test.
The TAP specification supports this "SKIP directive" as a comment after the "ok" / "not ok" for a test. See the "Directives" section of the TAP spec for details: https://testanything.org/tap-specification.html#directives
The 'success' field for KUnit tests is replaced with a kunit_status enum, which can be SUCCESS, FAILURE, or SKIPPED, combined with a 'status_comment' containing information on why a test was skipped.
A new 'kunit_status' test suite is added to test this.
Signed-off-by: David Gow davidgow@google.com
Reviewed-by: Daniel Latypov dlatypov@google.com
This is pretty exciting to see. Some minor nits below.
Thanks: I'll take these suggestions on board for v2.
This change depends on the assertion typechecking fix here: https://lore.kernel.org/linux-kselftest/20210513193204.816681-1-davidgow@goo... Only the first two patches in the series are required.
This is the long-awaited follow-up to the skip tests RFC: https://lore.kernel.org/linux-kselftest/20200513042956.109987-1-davidgow@goo...
There are quite a few changes since that version, principally:
- A kunit_status enum is now used, with SKIPPED a distinct state
- The kunit_mark_skipped() and kunit_skip() macros now take printf-style format strings.
- There is now a kunit_status test suite providing basic tests of this functionality.
- The kunit_tool changes have been split into a separate commit.
- The example skipped tests have been expanded an moved to their own suite, which is not enabled by KUNIT_ALL_TESTS.
- A number of other fixes and changes here and there.
Cheers, -- David
include/kunit/test.h | 68 ++++++++++++++++++++++++++++++++++++++---- lib/kunit/kunit-test.c | 42 +++++++++++++++++++++++++- lib/kunit/test.c | 51 ++++++++++++++++++------------- 3 files changed, 134 insertions(+), 27 deletions(-)
diff --git a/include/kunit/test.h b/include/kunit/test.h index b68c61348121..40b536da027e 100644 --- a/include/kunit/test.h +++ b/include/kunit/test.h @@ -105,6 +105,18 @@ struct kunit; #define KUNIT_SUBTEST_INDENT " " #define KUNIT_SUBSUBTEST_INDENT " "
+/**
- enum kunit_status - Type of result for a test or test suite
- @KUNIT_SUCCESS: Denotes the test suite has not failed nor been skipped
- @KUNIT_FAILURE: Denotes the test has failed.
- @KUNIT_SKIPPED: Denotes the test has been skipped.
- */
+enum kunit_status {
KUNIT_SUCCESS,
KUNIT_FAILURE,
KUNIT_SKIPPED,
+};
/**
- struct kunit_case - represents an individual test case.
@@ -148,13 +160,20 @@ struct kunit_case { const void* (*generate_params)(const void *prev, char *desc);
/* private: internal use only. */
bool success;
enum kunit_status status; char *log;
};
-static inline char *kunit_status_to_string(bool status) +static inline char *kunit_status_to_string(enum kunit_status status)
nit: I personally would think this maps SKIPPED => "SKIPPED", etc. (If I didn't know all that logic lived in kunit tool).
I don't have any replacement names to suggest that I'm fully happy with, however. kunit_status_to_tap_str(), kunit_status_to_ok_notok(), eh.
Yeah: I kept the existing names for these functions, which which worked well when it was just a bool.
The TAP spec seems to just call this "ok/not ok", and given we already have kunit_print_okay_not_ok(), kunit_status_to_ok_not_ok() seems the best of those options.
{
return status ? "ok" : "not ok";
switch (status) {
case KUNIT_SKIPPED:
case KUNIT_SUCCESS:
return "ok";
case KUNIT_FAILURE:
return "not ok";
}
return "invalid";
}
/** @@ -212,6 +231,7 @@ struct kunit_suite { struct kunit_case *test_cases;
/* private: internal use only */
char status_comment[256]; struct dentry *debugfs; char *log;
}; @@ -245,19 +265,21 @@ struct kunit { * be read after the test case finishes once all threads associated * with the test case have terminated. */
bool success; /* Read only after test_case finishes! */ spinlock_t lock; /* Guards all mutable test state. */
enum kunit_status status; /* Read only after test_case finishes! */ /* * Because resources is a list that may be updated multiple times (with * new resources) from any thread associated with a test case, we must * protect it with some type of lock. */ struct list_head resources; /* Protected by lock. */
char status_comment[256];
};
static inline void kunit_set_failure(struct kunit *test) {
WRITE_ONCE(test->success, false);
WRITE_ONCE(test->status, KUNIT_FAILURE);
}
void kunit_init_test(struct kunit *test, const char *name, char *log); @@ -348,7 +370,7 @@ static inline int kunit_run_all_tests(void) #define kunit_suite_for_each_test_case(suite, test_case) \ for (test_case = suite->test_cases; test_case->run_case; test_case++)
-bool kunit_suite_has_succeeded(struct kunit_suite *suite); +enum kunit_status kunit_suite_has_succeeded(struct kunit_suite *suite);
/*
- Like kunit_alloc_resource() below, but returns the struct kunit_resource
@@ -612,6 +634,40 @@ void kunit_cleanup(struct kunit *test);
void kunit_log_append(char *log, const char *fmt, ...);
+/**
- kunit_mark_skipped() - Marks @test_or_suite as skipped
- @test_or_suite: The test context object.
- @fmt: A printk() style format string.
- Marks the test as skipped. @fmt is given output as the test status
- comment, typically the reason the test was skipped.
- Test execution continues after kunit_mark_skipped() is called.
- */
+#define kunit_mark_skipped(test_or_suite, fmt, ...) \
do { \
WRITE_ONCE((test_or_suite)->status, KUNIT_SKIPPED); \
scnprintf((test_or_suite)->status_comment, 256, fmt, ##__VA_ARGS__); \
} while (0)
+/**
- kunit_skip() - Marks @test_or_suite as skipped
- @test_or_suite: The test context object.
- @fmt: A printk() style format string.
- Skips the test. @fmt is given output as the test status
- comment, typically the reason the test was skipped.
- Test execution is halted after kunit_skip() is called.
- */
+#define kunit_skip(test_or_suite, fmt, ...) \
do { \
kunit_mark_skipped((test_or_suite), fmt, ##__VA_ARGS__);\
kunit_try_catch_throw(&((test_or_suite)->try_catch)); \
} while (0)
/*
- printk and log to per-test or per-suite log buffer. Logging only done
- if CONFIG_KUNIT_DEBUGFS is 'y'; if it is 'n', no log is allocated/used.
diff --git a/lib/kunit/kunit-test.c b/lib/kunit/kunit-test.c index 69f902440a0e..d69efcbed624 100644 --- a/lib/kunit/kunit-test.c +++ b/lib/kunit/kunit-test.c @@ -437,7 +437,47 @@ static void kunit_log_test(struct kunit *test) #endif }
+static void kunit_status_set_failure_test(struct kunit *test) +{
struct kunit fake;
kunit_init_test(&fake, "fake test", NULL);
KUNIT_EXPECT_EQ(test, fake.status, (enum kunit_status)KUNIT_SUCCESS);
kunit_set_failure(&fake);
KUNIT_EXPECT_EQ(test, fake.status, (enum kunit_status)KUNIT_FAILURE);
+}
+static void kunit_status_mark_skipped_test(struct kunit *test) +{
struct kunit fake;
kunit_init_test(&fake, "fake test", NULL);
/* Before: Should be SUCCESS with no comment. */
KUNIT_EXPECT_EQ(test, fake.status, KUNIT_SUCCESS);
KUNIT_EXPECT_STREQ(test, fake.status_comment, "");
/* Mark the test as skipped. */
kunit_mark_skipped(&fake, "Accepts format string: %s", "YES");
/* After: Should be SKIPPED with our comment. */
KUNIT_EXPECT_EQ(test, fake.status, (enum kunit_status)KUNIT_SKIPPED);
KUNIT_EXPECT_STREQ(test, fake.status_comment, "Accepts format string: YES");
+}
+static struct kunit_case kunit_status_test_cases[] = {
KUNIT_CASE(kunit_status_set_failure_test),
KUNIT_CASE(kunit_status_mark_skipped_test),
{}
+};
+static struct kunit_suite kunit_status_test_suite = {
.name = "kunit_status",
.test_cases = kunit_status_test_cases,
+};
kunit_test_suites(&kunit_try_catch_test_suite, &kunit_resource_test_suite,
&kunit_log_test_suite);
&kunit_log_test_suite, &kunit_status_test_suite);
MODULE_LICENSE("GPL v2"); diff --git a/lib/kunit/test.c b/lib/kunit/test.c index 2f6cc0123232..0ee07705d2b0 100644 --- a/lib/kunit/test.c +++ b/lib/kunit/test.c @@ -98,12 +98,14 @@ static void kunit_print_subtest_start(struct kunit_suite *suite)
static void kunit_print_ok_not_ok(void *test_or_suite, bool is_test,
bool is_ok,
enum kunit_status status, size_t test_number,
const char *description)
const char *description,
const char *directive)
{ struct kunit_suite *suite = is_test ? NULL : test_or_suite; struct kunit *test = is_test ? test_or_suite : NULL;
const char *directive_header = (status == KUNIT_SKIPPED) ? " # SKIP " : ""; /* * We do not log the test suite results as doing so would
@@ -114,25 +116,31 @@ static void kunit_print_ok_not_ok(void *test_or_suite, * representation. */ if (suite)
pr_info("%s %zd - %s\n",
kunit_status_to_string(is_ok),
test_number, description);
pr_info("%s %zd - %s%s%s\n",
kunit_status_to_string(status),
test_number, description,
directive_header, directive ? directive : ""); else
kunit_log(KERN_INFO, test, KUNIT_SUBTEST_INDENT "%s %zd - %s",
Hmm, why not kunit_info(test, ...)?
No reason: it was kunit_log() originally, and I didn't change it. I can replace this for v2 if you prefer.
kunit_status_to_string(is_ok),
test_number, description);
kunit_log(KERN_INFO, test,
KUNIT_SUBTEST_INDENT "%s %zd - %s%s%s",
kunit_status_to_string(status),
test_number, description,
directive_header, directive ? directive : "");
}
-bool kunit_suite_has_succeeded(struct kunit_suite *suite) +enum kunit_status kunit_suite_has_succeeded(struct kunit_suite *suite) { const struct kunit_case *test_case;
enum kunit_status status = KUNIT_SKIPPED; kunit_suite_for_each_test_case(suite, test_case) {
if (!test_case->success)
return false;
if (test_case->status == KUNIT_FAILURE)
return KUNIT_FAILURE;
else if (test_case->status == KUNIT_SUCCESS)
status = KUNIT_SUCCESS; }
return true;
return status;
} EXPORT_SYMBOL_GPL(kunit_suite_has_succeeded);
@@ -143,7 +151,8 @@ static void kunit_print_subtest_end(struct kunit_suite *suite) kunit_print_ok_not_ok((void *)suite, false, kunit_suite_has_succeeded(suite), kunit_suite_counter++,
suite->name);
suite->name,
suite->status_comment);
}
unsigned int kunit_test_case_num(struct kunit_suite *suite, @@ -252,7 +261,8 @@ void kunit_init_test(struct kunit *test, const char *name, char *log) test->log = log; if (test->log) test->log[0] = '\0';
test->success = true;
test->status = KUNIT_SUCCESS;
test->status_comment[0] = '\0';
} EXPORT_SYMBOL_GPL(kunit_init_test);
@@ -376,7 +386,8 @@ static void kunit_run_case_catch_errors(struct kunit_suite *suite, context.test_case = test_case; kunit_try_catch_run(try_catch, &context);
test_case->success = test->success;
test_case->status = test->status;
}
int kunit_run_tests(struct kunit_suite *suite) @@ -388,7 +399,6 @@ int kunit_run_tests(struct kunit_suite *suite)
kunit_suite_for_each_test_case(suite, test_case) { struct kunit test = { .param_value = NULL, .param_index = 0 };
bool test_success = true; if (test_case->generate_params) { /* Get initial param. */
@@ -398,7 +408,6 @@ int kunit_run_tests(struct kunit_suite *suite)
do { kunit_run_case_catch_errors(suite, test_case, &test);
test_success &= test_case->success; if (test_case->generate_params) { if (param_desc[0] == '\0') {
@@ -410,7 +419,7 @@ int kunit_run_tests(struct kunit_suite *suite) KUNIT_SUBTEST_INDENT "# %s: %s %d - %s", test_case->name,
kunit_status_to_string(test.success),
kunit_status_to_string(test.status), test.param_index + 1, param_desc); /* Get next param. */
@@ -420,9 +429,10 @@ int kunit_run_tests(struct kunit_suite *suite) } } while (test.param_value);
kunit_print_ok_not_ok(&test, true, test_success,
kunit_print_ok_not_ok(&test, true, test_case->status, kunit_test_case_num(suite, test_case),
test_case->name);
test_case->name,
test.status_comment); } kunit_print_subtest_end(suite);
@@ -434,6 +444,7 @@ EXPORT_SYMBOL_GPL(kunit_run_tests); static void kunit_init_suite(struct kunit_suite *suite) { kunit_debugfs_create_suite(suite);
suite->status_comment[0] = '\0';
}
int __kunit_test_suites_init(struct kunit_suite * const * const suites)
2.31.1.818.g46aad6cb9e-goog
-- You received this message because you are subscribed to the Google Groups "KUnit Development" group. To unsubscribe from this group and stop receiving emails from it, send an email to kunit-dev+unsubscribe@googlegroups.com. To view this discussion on the web visit https://groups.google.com/d/msgid/kunit-dev/20210526081112.3652290-1-davidgo....
linux-kselftest-mirror@lists.linaro.org