This logic depends on the kernel logging a message containing 'kunit test case crashed', but there is no corresponding logic to do so.
This is likely a relic of the revision process KUnit initially went through when being upstreamed.
Delete it given 1) it's been missing for years and likely won't get implemented 2) the parser has been moving to be a more general KTAP parser, kunit-only magic like this isn't how we'd want to implement it.
Signed-off-by: Daniel Latypov dlatypov@google.com --- tools/testing/kunit/kunit_parser.py | 21 ------ tools/testing/kunit/kunit_tool_test.py | 17 ++--- .../test_data/test_is_test_passed-crash.log | 70 ------------------- 3 files changed, 4 insertions(+), 104 deletions(-) delete mode 100644 tools/testing/kunit/test_data/test_is_test_passed-crash.log
diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py index 807ed2bd6832..7a0faf527a98 100644 --- a/tools/testing/kunit/kunit_parser.py +++ b/tools/testing/kunit/kunit_parser.py @@ -475,26 +475,6 @@ def parse_diagnostic(lines: LineStream) -> List[str]: log.append(lines.pop()) return log
-DIAGNOSTIC_CRASH_MESSAGE = re.compile(r'^# .*?: kunit test case crashed!$') - -def parse_crash_in_log(test: Test) -> bool: - """ - Iterate through the lines of the log to parse for crash message. - If crash message found, set status to crashed and return True. - Otherwise return False. - - Parameters: - test - Test object for current test being parsed - - Return: - True if crash message found in log - """ - for line in test.log: - if DIAGNOSTIC_CRASH_MESSAGE.match(line): - test.status = TestStatus.TEST_CRASHED - return True - return False -
# Printing helper methods:
@@ -682,7 +662,6 @@ def bubble_up_test_results(test: Test) -> None: Parameters: test - Test object for current test being parsed """ - parse_crash_in_log(test) subtests = test.subtests counts = test.counts status = test.status diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py index 210df0f443e6..1200e451c418 100755 --- a/tools/testing/kunit/kunit_tool_test.py +++ b/tools/testing/kunit/kunit_tool_test.py @@ -230,15 +230,6 @@ class KUnitParserTest(unittest.TestCase): print_mock.stop() self.assertEqual(0, len(result.subtests))
- def test_crashed_test(self): - crashed_log = test_data_path('test_is_test_passed-crash.log') - with open(crashed_log) as file: - result = kunit_parser.parse_run_tests( - file.readlines()) - self.assertEqual( - kunit_parser.TestStatus.TEST_CRASHED, - result.status) - def test_skipped_test(self): skipped_log = test_data_path('test_skip_tests.log') with open(skipped_log) as file: @@ -478,10 +469,10 @@ class KUnitJsonTest(unittest.TestCase): result["sub_groups"][1]["test_cases"][0])
def test_crashed_test_json(self): - result = self._json_for('test_is_test_passed-crash.log') + result = self._json_for('test_kernel_panic_interrupt.log') self.assertEqual( - {'name': 'example_simple_test', 'status': 'ERROR'}, - result["sub_groups"][1]["test_cases"][0]) + {'name': '', 'status': 'ERROR'}, + result["sub_groups"][2]["test_cases"][1])
def test_skipped_test_json(self): result = self._json_for('test_skip_tests.log') @@ -562,7 +553,7 @@ class KUnitMainTest(unittest.TestCase): def test_exec_no_tests(self): self.linux_source_mock.run_kernel = mock.Mock(return_value=['TAP version 14', '1..0']) with self.assertRaises(SystemExit) as e: - kunit.main(['run'], self.linux_source_mock) + kunit.main(['run'], self.linux_source_mock) self.linux_source_mock.run_kernel.assert_called_once_with( args=None, build_dir='.kunit', filter_glob='', timeout=300) self.print_mock.assert_any_call(StrContains(' 0 tests run!')) diff --git a/tools/testing/kunit/test_data/test_is_test_passed-crash.log b/tools/testing/kunit/test_data/test_is_test_passed-crash.log deleted file mode 100644 index 4d97f6708c4a..000000000000 --- a/tools/testing/kunit/test_data/test_is_test_passed-crash.log +++ /dev/null @@ -1,70 +0,0 @@ -printk: console [tty0] enabled -printk: console [mc-1] enabled -TAP version 14 -1..2 - # Subtest: sysctl_test - 1..8 - # sysctl_test_dointvec_null_tbl_data: sysctl_test_dointvec_null_tbl_data passed - ok 1 - sysctl_test_dointvec_null_tbl_data - # sysctl_test_dointvec_table_maxlen_unset: sysctl_test_dointvec_table_maxlen_unset passed - ok 2 - sysctl_test_dointvec_table_maxlen_unset - # sysctl_test_dointvec_table_len_is_zero: sysctl_test_dointvec_table_len_is_zero passed - ok 3 - sysctl_test_dointvec_table_len_is_zero - # sysctl_test_dointvec_table_read_but_position_set: sysctl_test_dointvec_table_read_but_position_set passed - ok 4 - sysctl_test_dointvec_table_read_but_position_set - # sysctl_test_dointvec_happy_single_positive: sysctl_test_dointvec_happy_single_positive passed - ok 5 - sysctl_test_dointvec_happy_single_positive - # sysctl_test_dointvec_happy_single_negative: sysctl_test_dointvec_happy_single_negative passed - ok 6 - sysctl_test_dointvec_happy_single_negative - # sysctl_test_dointvec_single_less_int_min: sysctl_test_dointvec_single_less_int_min passed - ok 7 - sysctl_test_dointvec_single_less_int_min - # sysctl_test_dointvec_single_greater_int_max: sysctl_test_dointvec_single_greater_int_max passed - ok 8 - sysctl_test_dointvec_single_greater_int_max -kunit sysctl_test: all tests passed -ok 1 - sysctl_test - # Subtest: example - 1..2 -init_suite - # example_simple_test: initializing -Stack: - 6016f7db 6f81bd30 6f81bdd0 60021450 - 6024b0e8 60021440 60018bbe 16f81bdc0 - 00000001 6f81bd30 6f81bd20 6f81bdd0 -Call Trace: - [<6016f7db>] ? kunit_try_run_case+0xab/0xf0 - [<60021450>] ? set_signals+0x0/0x60 - [<60021440>] ? get_signals+0x0/0x10 - [<60018bbe>] ? kunit_um_run_try_catch+0x5e/0xc0 - [<60021450>] ? set_signals+0x0/0x60 - [<60021440>] ? get_signals+0x0/0x10 - [<60018bb3>] ? kunit_um_run_try_catch+0x53/0xc0 - [<6016f321>] ? kunit_run_case_catch_errors+0x121/0x1a0 - [<60018b60>] ? kunit_um_run_try_catch+0x0/0xc0 - [<600189e0>] ? kunit_um_throw+0x0/0x180 - [<6016f730>] ? kunit_try_run_case+0x0/0xf0 - [<6016f600>] ? kunit_catch_run_case+0x0/0x130 - [<6016edd0>] ? kunit_vprintk+0x0/0x30 - [<6016ece0>] ? kunit_fail+0x0/0x40 - [<6016eca0>] ? kunit_abort+0x0/0x40 - [<6016ed20>] ? kunit_printk_emit+0x0/0xb0 - [<6016f200>] ? kunit_run_case_catch_errors+0x0/0x1a0 - [<6016f46e>] ? kunit_run_tests+0xce/0x260 - [<6005b390>] ? unregister_console+0x0/0x190 - [<60175b70>] ? suite_kunit_initexample_test_suite+0x0/0x20 - [<60001cbb>] ? do_one_initcall+0x0/0x197 - [<60001d47>] ? do_one_initcall+0x8c/0x197 - [<6005cd20>] ? irq_to_desc+0x0/0x30 - [<60002005>] ? kernel_init_freeable+0x1b3/0x272 - [<6005c5ec>] ? printk+0x0/0x9b - [<601c0086>] ? kernel_init+0x26/0x160 - [<60014442>] ? new_thread_handler+0x82/0xc0 - - # example_simple_test: kunit test case crashed! - # example_simple_test: example_simple_test failed - not ok 1 - example_simple_test - # example_mock_test: initializing - # example_mock_test: example_mock_test passed - ok 2 - example_mock_test -kunit example: one or more tests failed -not ok 2 - example -List of all partitions:
base-commit: 59729170afcd4900e08997a482467ffda8d88c7f
Consider this invocation $ ./tools/testing/kunit/kunit.py parse <<EOF TAP version 14 1..2 ok 1 - suite # Subtest: no_tests_suite # catastrophic error! not ok 1 - no_tests_suite EOF
It will have a 0 exit code even though there's a "not ok".
Consider this one: $ ./tools/testing/kunit/kunit.py parse <<EOF TAP version 14 1..2 ok 1 - suite not ok 1 - no_tests_suite EOF
It will a non-zero exit code.
Why? We have this line in the kunit_parser.py
parent_test = parse_test_header(lines, test)
where we have special handling when we see "# Subtest" and we ignore the explicit reported "not ok 1" status!
Also, NO_TESTS at a suite-level only results in a non-zero status code where then there's only one suite atm.
This change is the minimal one to make sure we don't overwrite it.
Signed-off-by: Daniel Latypov dlatypov@google.com --- tools/testing/kunit/kunit_parser.py | 7 +++++-- .../test_data/test_is_test_passed-no_tests_no_plan.log | 2 +- 2 files changed, 6 insertions(+), 3 deletions(-)
diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py index 7a0faf527a98..45c2c5837281 100644 --- a/tools/testing/kunit/kunit_parser.py +++ b/tools/testing/kunit/kunit_parser.py @@ -775,8 +775,11 @@ def parse_test(lines: LineStream, expected_num: int, log: List[str]) -> Test:
# Check for there being no tests if parent_test and len(subtests) == 0: - test.status = TestStatus.NO_TESTS - test.add_error('0 tests run!') + # Don't override a bad status if this test had one reported. + # Assumption: no subtests means CRASHED is from Test.__init__() + if test.status in (TestStatus.TEST_CRASHED, TestStatus.SUCCESS): + test.status = TestStatus.NO_TESTS + test.add_error('0 tests run!')
# Add statuses to TestCounts attribute in Test object bubble_up_test_results(test) diff --git a/tools/testing/kunit/test_data/test_is_test_passed-no_tests_no_plan.log b/tools/testing/kunit/test_data/test_is_test_passed-no_tests_no_plan.log index dd873c981108..4f81876ee6f1 100644 --- a/tools/testing/kunit/test_data/test_is_test_passed-no_tests_no_plan.log +++ b/tools/testing/kunit/test_data/test_is_test_passed-no_tests_no_plan.log @@ -3,5 +3,5 @@ TAP version 14 # Subtest: suite 1..1 # Subtest: case - ok 1 - case # SKIP + ok 1 - case ok 1 - suite
On Wed, Apr 27, 2022 at 1:33 AM Daniel Latypov dlatypov@google.com wrote:
Consider this invocation $ ./tools/testing/kunit/kunit.py parse <<EOF TAP version 14 1..2 ok 1 - suite # Subtest: no_tests_suite # catastrophic error! not ok 1 - no_tests_suite EOF
It will have a 0 exit code even though there's a "not ok".
Consider this one: $ ./tools/testing/kunit/kunit.py parse <<EOF TAP version 14 1..2 ok 1 - suite not ok 1 - no_tests_suite EOF
It will a non-zero exit code.
Why? We have this line in the kunit_parser.py
parent_test = parse_test_header(lines, test)
where we have special handling when we see "# Subtest" and we ignore the explicit reported "not ok 1" status!
Also, NO_TESTS at a suite-level only results in a non-zero status code where then there's only one suite atm.
This change is the minimal one to make sure we don't overwrite it.
Signed-off-by: Daniel Latypov dlatypov@google.com
This seems sensible to me, though it doesn't change a lot in practice given that the in-kernel KUnit will mark empty suites as skipped: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/lib/...
(And I think that is probably the correct thing for it to do, as "no tests run" seems closer to skipped than to passed or failed, which are the other options KTAP gives us.)
That being said, it's definitely true that we don't want to override a "not ok" needlessly in kunit_tool, so this patch is definite improvement.
Reviewed-by: David Gow davidgow@google.com
-- David
tools/testing/kunit/kunit_parser.py | 7 +++++-- .../test_data/test_is_test_passed-no_tests_no_plan.log | 2 +- 2 files changed, 6 insertions(+), 3 deletions(-)
diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py index 7a0faf527a98..45c2c5837281 100644 --- a/tools/testing/kunit/kunit_parser.py +++ b/tools/testing/kunit/kunit_parser.py @@ -775,8 +775,11 @@ def parse_test(lines: LineStream, expected_num: int, log: List[str]) -> Test:
# Check for there being no tests if parent_test and len(subtests) == 0:
test.status = TestStatus.NO_TESTS
test.add_error('0 tests run!')
# Don't override a bad status if this test had one reported.
# Assumption: no subtests means CRASHED is from Test.__init__()
if test.status in (TestStatus.TEST_CRASHED, TestStatus.SUCCESS):
test.status = TestStatus.NO_TESTS
test.add_error('0 tests run!') # Add statuses to TestCounts attribute in Test object bubble_up_test_results(test)
diff --git a/tools/testing/kunit/test_data/test_is_test_passed-no_tests_no_plan.log b/tools/testing/kunit/test_data/test_is_test_passed-no_tests_no_plan.log index dd873c981108..4f81876ee6f1 100644 --- a/tools/testing/kunit/test_data/test_is_test_passed-no_tests_no_plan.log +++ b/tools/testing/kunit/test_data/test_is_test_passed-no_tests_no_plan.log @@ -3,5 +3,5 @@ TAP version 14 # Subtest: suite 1..1 # Subtest: case
- ok 1 - case # SKIP
- ok 1 - case
ok 1 - suite
2.36.0.rc2.479.g8af0fa9b8e-goog
On Tue, Apr 26, 2022 at 1:33 PM Daniel Latypov dlatypov@google.com wrote:
Consider this invocation $ ./tools/testing/kunit/kunit.py parse <<EOF TAP version 14 1..2 ok 1 - suite # Subtest: no_tests_suite # catastrophic error! not ok 1 - no_tests_suite EOF
It will have a 0 exit code even though there's a "not ok".
Consider this one: $ ./tools/testing/kunit/kunit.py parse <<EOF TAP version 14 1..2 ok 1 - suite not ok 1 - no_tests_suite EOF
It will a non-zero exit code.
Why? We have this line in the kunit_parser.py
parent_test = parse_test_header(lines, test)
where we have special handling when we see "# Subtest" and we ignore the explicit reported "not ok 1" status!
Also, NO_TESTS at a suite-level only results in a non-zero status code where then there's only one suite atm.
This change is the minimal one to make sure we don't overwrite it.
Signed-off-by: Daniel Latypov dlatypov@google.com
Reviewed-by: Brendan Higgins brendanhiggins@google.com
There should be no behavioral changes from this patch.
This patch removes redundant comment text, inlines a function used in only one place, and other such minor tweaks.
Signed-off-by: Daniel Latypov dlatypov@google.com --- tools/testing/kunit/kunit_parser.py | 71 +++++++---------------------- 1 file changed, 17 insertions(+), 54 deletions(-)
diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py index 45c2c5837281..d56d530fab24 100644 --- a/tools/testing/kunit/kunit_parser.py +++ b/tools/testing/kunit/kunit_parser.py @@ -46,10 +46,8 @@ class Test(object):
def __str__(self) -> str: """Returns string representation of a Test class object.""" - return ('Test(' + str(self.status) + ', ' + self.name + - ', ' + str(self.expected_count) + ', ' + - str(self.subtests) + ', ' + str(self.log) + ', ' + - str(self.counts) + ')') + return (f'Test({self.status}, {self.name}, {self.expected_count}, ' + f'{self.subtests}, {self.log}, {self.counts})')
def __repr__(self) -> str: """Returns string representation of a Test class object.""" @@ -58,7 +56,7 @@ class Test(object): def add_error(self, error_message: str) -> None: """Records an error that occurred while parsing this test.""" self.counts.errors += 1 - print_error('Test ' + self.name + ': ' + error_message) + print_with_timestamp(red('[ERROR]') + f' Test: {self.name}: {error_message}')
class TestStatus(Enum): """An enumeration class to represent the status of a test.""" @@ -92,8 +90,7 @@ class TestCounts: self.errors = 0
def __str__(self) -> str: - """Returns the string representation of a TestCounts object. - """ + """Returns the string representation of a TestCounts object.""" return ('Passed: ' + str(self.passed) + ', Failed: ' + str(self.failed) + ', Crashed: ' + str(self.crashed) + @@ -130,30 +127,19 @@ class TestCounts: if self.total() == 0: return TestStatus.NO_TESTS elif self.crashed: - # If one of the subtests crash, the expected status - # of the Test is crashed. + # Crashes should take priority. return TestStatus.TEST_CRASHED elif self.failed: - # Otherwise if one of the subtests fail, the - # expected status of the Test is failed. return TestStatus.FAILURE elif self.passed: - # Otherwise if one of the subtests pass, the - # expected status of the Test is passed. + # No failures or crashes, looks good! return TestStatus.SUCCESS else: - # Finally, if none of the subtests have failed, - # crashed, or passed, the expected status of the - # Test is skipped. + # We have only skipped tests. return TestStatus.SKIPPED
def add_status(self, status: TestStatus) -> None: - """ - Increments count of inputted status. - - Parameters: - status - status to be added to the TestCounts object - """ + """Increments the count for `status`.""" if status == TestStatus.SUCCESS: self.passed += 1 elif status == TestStatus.FAILURE: @@ -283,11 +269,9 @@ def check_version(version_num: int, accepted_versions: List[int], test - Test object for current test being parsed """ if version_num < min(accepted_versions): - test.add_error(version_type + - ' version lower than expected!') + test.add_error(f'{version_type} version lower than expected!') elif version_num > max(accepted_versions): - test.add_error( - version_type + ' version higher than expected!') + test.add_error(f'{version_type} version higer than expected!')
def parse_ktap_header(lines: LineStream, test: Test) -> bool: """ @@ -440,8 +424,7 @@ def parse_test_result(lines: LineStream, test: Test, # Check test num num = int(match.group(2)) if num != expected_num: - test.add_error('Expected test number ' + - str(expected_num) + ' but found ' + str(num)) + test.add_error(f'Expected test number {expected_num} but found {num}')
# Set status of test object status = match.group(1) @@ -529,7 +512,7 @@ def format_test_divider(message: str, len_message: int) -> str: # calculate number of dashes for each side of the divider len_1 = int(difference / 2) len_2 = difference - len_1 - return ('=' * len_1) + ' ' + message + ' ' + ('=' * len_2) + return ('=' * len_1) + f' {message} ' + ('=' * len_2)
def print_test_header(test: Test) -> None: """ @@ -545,20 +528,13 @@ def print_test_header(test: Test) -> None: message = test.name if test.expected_count: if test.expected_count == 1: - message += (' (' + str(test.expected_count) + - ' subtest)') + message += ' (1 subtest)' else: - message += (' (' + str(test.expected_count) + - ' subtests)') + message += f' ({test.expected_count} subtests)' print_with_timestamp(format_test_divider(message, len(message)))
def print_log(log: Iterable[str]) -> None: - """ - Prints all strings in saved log for test in yellow. - - Parameters: - log - Iterable object with all strings saved in log for test - """ + """Prints all strings in saved log for test in yellow.""" for m in log: print_with_timestamp(yellow(m))
@@ -635,20 +611,7 @@ def print_summary_line(test: Test) -> None: color = yellow else: color = red - counts = test.counts - print_with_timestamp(color('Testing complete. ' + str(counts))) - -def print_error(error_message: str) -> None: - """ - Prints error message with error format. - - Example: - "[ERROR] Test example: missing test plan!" - - Parameters: - error_message - message describing error - """ - print_with_timestamp(red('[ERROR] ') + error_message) + print_with_timestamp(color(f'Testing complete. {test.counts}'))
# Other methods:
@@ -794,7 +757,7 @@ def parse_test(lines: LineStream, expected_num: int, log: List[str]) -> Test: def parse_run_tests(kernel_output: Iterable[str]) -> Test: """ Using kernel output, extract KTAP lines, parse the lines for test - results and print condensed test results and summary line . + results and print condensed test results and summary line.
Parameters: kernel_output - Iterable object contains lines of kernel output
On Wed, Apr 27, 2022 at 1:33 AM Daniel Latypov dlatypov@google.com wrote:
There should be no behavioral changes from this patch.
This patch removes redundant comment text, inlines a function used in only one place, and other such minor tweaks.
Signed-off-by: Daniel Latypov dlatypov@google.com
These all look pretty sensible to me.
Reviewed-by: David Gow davidgow@google.com
-- David
tools/testing/kunit/kunit_parser.py | 71 +++++++---------------------- 1 file changed, 17 insertions(+), 54 deletions(-)
diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py index 45c2c5837281..d56d530fab24 100644 --- a/tools/testing/kunit/kunit_parser.py +++ b/tools/testing/kunit/kunit_parser.py @@ -46,10 +46,8 @@ class Test(object):
def __str__(self) -> str: """Returns string representation of a Test class object."""
return ('Test(' + str(self.status) + ', ' + self.name +
', ' + str(self.expected_count) + ', ' +
str(self.subtests) + ', ' + str(self.log) + ', ' +
str(self.counts) + ')')
return (f'Test({self.status}, {self.name}, {self.expected_count}, '
f'{self.subtests}, {self.log}, {self.counts})') def __repr__(self) -> str: """Returns string representation of a Test class object."""
@@ -58,7 +56,7 @@ class Test(object): def add_error(self, error_message: str) -> None: """Records an error that occurred while parsing this test.""" self.counts.errors += 1
print_error('Test ' + self.name + ': ' + error_message)
print_with_timestamp(red('[ERROR]') + f' Test: {self.name}: {error_message}')
class TestStatus(Enum): """An enumeration class to represent the status of a test.""" @@ -92,8 +90,7 @@ class TestCounts: self.errors = 0
def __str__(self) -> str:
"""Returns the string representation of a TestCounts object.
"""
"""Returns the string representation of a TestCounts object.""" return ('Passed: ' + str(self.passed) + ', Failed: ' + str(self.failed) + ', Crashed: ' + str(self.crashed) +
@@ -130,30 +127,19 @@ class TestCounts: if self.total() == 0: return TestStatus.NO_TESTS elif self.crashed:
# If one of the subtests crash, the expected status
# of the Test is crashed.
# Crashes should take priority. return TestStatus.TEST_CRASHED elif self.failed:
# Otherwise if one of the subtests fail, the
# expected status of the Test is failed. return TestStatus.FAILURE elif self.passed:
# Otherwise if one of the subtests pass, the
# expected status of the Test is passed.
# No failures or crashes, looks good! return TestStatus.SUCCESS else:
# Finally, if none of the subtests have failed,
# crashed, or passed, the expected status of the
# Test is skipped.
# We have only skipped tests. return TestStatus.SKIPPED def add_status(self, status: TestStatus) -> None:
"""
Increments count of inputted status.
Parameters:
status - status to be added to the TestCounts object
"""
"""Increments the count for `status`.""" if status == TestStatus.SUCCESS: self.passed += 1 elif status == TestStatus.FAILURE:
@@ -283,11 +269,9 @@ def check_version(version_num: int, accepted_versions: List[int], test - Test object for current test being parsed """ if version_num < min(accepted_versions):
test.add_error(version_type +
' version lower than expected!')
test.add_error(f'{version_type} version lower than expected!') elif version_num > max(accepted_versions):
test.add_error(
version_type + ' version higher than expected!')
test.add_error(f'{version_type} version higer than expected!')
def parse_ktap_header(lines: LineStream, test: Test) -> bool: """ @@ -440,8 +424,7 @@ def parse_test_result(lines: LineStream, test: Test, # Check test num num = int(match.group(2)) if num != expected_num:
test.add_error('Expected test number ' +
str(expected_num) + ' but found ' + str(num))
test.add_error(f'Expected test number {expected_num} but found {num}') # Set status of test object status = match.group(1)
@@ -529,7 +512,7 @@ def format_test_divider(message: str, len_message: int) -> str: # calculate number of dashes for each side of the divider len_1 = int(difference / 2) len_2 = difference - len_1
return ('=' * len_1) + ' ' + message + ' ' + ('=' * len_2)
return ('=' * len_1) + f' {message} ' + ('=' * len_2)
def print_test_header(test: Test) -> None: """ @@ -545,20 +528,13 @@ def print_test_header(test: Test) -> None: message = test.name if test.expected_count: if test.expected_count == 1:
message += (' (' + str(test.expected_count) +
' subtest)')
message += ' (1 subtest)' else:
message += (' (' + str(test.expected_count) +
' subtests)')
message += f' ({test.expected_count} subtests)' print_with_timestamp(format_test_divider(message, len(message)))
def print_log(log: Iterable[str]) -> None:
"""
Prints all strings in saved log for test in yellow.
Parameters:
log - Iterable object with all strings saved in log for test
"""
"""Prints all strings in saved log for test in yellow.""" for m in log: print_with_timestamp(yellow(m))
@@ -635,20 +611,7 @@ def print_summary_line(test: Test) -> None: color = yellow else: color = red
counts = test.counts
print_with_timestamp(color('Testing complete. ' + str(counts)))
-def print_error(error_message: str) -> None:
"""
Prints error message with error format.
Example:
"[ERROR] Test example: missing test plan!"
Parameters:
error_message - message describing error
"""
print_with_timestamp(red('[ERROR] ') + error_message)
print_with_timestamp(color(f'Testing complete. {test.counts}'))
# Other methods:
@@ -794,7 +757,7 @@ def parse_test(lines: LineStream, expected_num: int, log: List[str]) -> Test: def parse_run_tests(kernel_output: Iterable[str]) -> Test: """ Using kernel output, extract KTAP lines, parse the lines for test
results and print condensed test results and summary line .
results and print condensed test results and summary line. Parameters: kernel_output - Iterable object contains lines of kernel output
-- 2.36.0.rc2.479.g8af0fa9b8e-goog
On Tue, Apr 26, 2022 at 1:33 PM Daniel Latypov dlatypov@google.com wrote:
There should be no behavioral changes from this patch.
This patch removes redundant comment text, inlines a function used in only one place, and other such minor tweaks.
Signed-off-by: Daniel Latypov dlatypov@google.com
Reviewed-by: Brendan Higgins brendanhiggins@google.com
On Wed, Apr 27, 2022 at 1:33 AM Daniel Latypov dlatypov@google.com wrote:
This logic depends on the kernel logging a message containing 'kunit test case crashed', but there is no corresponding logic to do so.
This is likely a relic of the revision process KUnit initially went through when being upstreamed.
Delete it given
- it's been missing for years and likely won't get implemented
- the parser has been moving to be a more general KTAP parser, kunit-only magic like this isn't how we'd want to implement it.
Signed-off-by: Daniel Latypov dlatypov@google.com
Makes sense, thanks!
Reviewed-by: David Gow davidgow@google.com
-- David
tools/testing/kunit/kunit_parser.py | 21 ------ tools/testing/kunit/kunit_tool_test.py | 17 ++--- .../test_data/test_is_test_passed-crash.log | 70 ------------------- 3 files changed, 4 insertions(+), 104 deletions(-) delete mode 100644 tools/testing/kunit/test_data/test_is_test_passed-crash.log
diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py index 807ed2bd6832..7a0faf527a98 100644 --- a/tools/testing/kunit/kunit_parser.py +++ b/tools/testing/kunit/kunit_parser.py @@ -475,26 +475,6 @@ def parse_diagnostic(lines: LineStream) -> List[str]: log.append(lines.pop()) return log
-DIAGNOSTIC_CRASH_MESSAGE = re.compile(r'^# .*?: kunit test case crashed!$')
-def parse_crash_in_log(test: Test) -> bool:
"""
Iterate through the lines of the log to parse for crash message.
If crash message found, set status to crashed and return True.
Otherwise return False.
Parameters:
test - Test object for current test being parsed
Return:
True if crash message found in log
"""
for line in test.log:
if DIAGNOSTIC_CRASH_MESSAGE.match(line):
test.status = TestStatus.TEST_CRASHED
return True
return False
# Printing helper methods:
@@ -682,7 +662,6 @@ def bubble_up_test_results(test: Test) -> None: Parameters: test - Test object for current test being parsed """
parse_crash_in_log(test) subtests = test.subtests counts = test.counts status = test.status
diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py index 210df0f443e6..1200e451c418 100755 --- a/tools/testing/kunit/kunit_tool_test.py +++ b/tools/testing/kunit/kunit_tool_test.py @@ -230,15 +230,6 @@ class KUnitParserTest(unittest.TestCase): print_mock.stop() self.assertEqual(0, len(result.subtests))
def test_crashed_test(self):
crashed_log = test_data_path('test_is_test_passed-crash.log')
with open(crashed_log) as file:
result = kunit_parser.parse_run_tests(
file.readlines())
self.assertEqual(
kunit_parser.TestStatus.TEST_CRASHED,
result.status)
def test_skipped_test(self): skipped_log = test_data_path('test_skip_tests.log') with open(skipped_log) as file:
@@ -478,10 +469,10 @@ class KUnitJsonTest(unittest.TestCase): result["sub_groups"][1]["test_cases"][0])
def test_crashed_test_json(self):
result = self._json_for('test_is_test_passed-crash.log')
result = self._json_for('test_kernel_panic_interrupt.log') self.assertEqual(
{'name': 'example_simple_test', 'status': 'ERROR'},
result["sub_groups"][1]["test_cases"][0])
{'name': '', 'status': 'ERROR'},
result["sub_groups"][2]["test_cases"][1]) def test_skipped_test_json(self): result = self._json_for('test_skip_tests.log')
@@ -562,7 +553,7 @@ class KUnitMainTest(unittest.TestCase): def test_exec_no_tests(self): self.linux_source_mock.run_kernel = mock.Mock(return_value=['TAP version 14', '1..0']) with self.assertRaises(SystemExit) as e:
kunit.main(['run'], self.linux_source_mock)
kunit.main(['run'], self.linux_source_mock)
Noting that this is just an indentation fix.
self.linux_source_mock.run_kernel.assert_called_once_with( args=None, build_dir='.kunit', filter_glob='', timeout=300) self.print_mock.assert_any_call(StrContains(' 0 tests run!'))
diff --git a/tools/testing/kunit/test_data/test_is_test_passed-crash.log b/tools/testing/kunit/test_data/test_is_test_passed-crash.log deleted file mode 100644 index 4d97f6708c4a..000000000000 --- a/tools/testing/kunit/test_data/test_is_test_passed-crash.log +++ /dev/null @@ -1,70 +0,0 @@ -printk: console [tty0] enabled -printk: console [mc-1] enabled -TAP version 14 -1..2
# Subtest: sysctl_test
1..8
# sysctl_test_dointvec_null_tbl_data: sysctl_test_dointvec_null_tbl_data passed
ok 1 - sysctl_test_dointvec_null_tbl_data
# sysctl_test_dointvec_table_maxlen_unset: sysctl_test_dointvec_table_maxlen_unset passed
ok 2 - sysctl_test_dointvec_table_maxlen_unset
# sysctl_test_dointvec_table_len_is_zero: sysctl_test_dointvec_table_len_is_zero passed
ok 3 - sysctl_test_dointvec_table_len_is_zero
# sysctl_test_dointvec_table_read_but_position_set: sysctl_test_dointvec_table_read_but_position_set passed
ok 4 - sysctl_test_dointvec_table_read_but_position_set
# sysctl_test_dointvec_happy_single_positive: sysctl_test_dointvec_happy_single_positive passed
ok 5 - sysctl_test_dointvec_happy_single_positive
# sysctl_test_dointvec_happy_single_negative: sysctl_test_dointvec_happy_single_negative passed
ok 6 - sysctl_test_dointvec_happy_single_negative
# sysctl_test_dointvec_single_less_int_min: sysctl_test_dointvec_single_less_int_min passed
ok 7 - sysctl_test_dointvec_single_less_int_min
# sysctl_test_dointvec_single_greater_int_max: sysctl_test_dointvec_single_greater_int_max passed
ok 8 - sysctl_test_dointvec_single_greater_int_max
-kunit sysctl_test: all tests passed -ok 1 - sysctl_test
# Subtest: example
1..2
-init_suite
# example_simple_test: initializing
-Stack:
- 6016f7db 6f81bd30 6f81bdd0 60021450
- 6024b0e8 60021440 60018bbe 16f81bdc0
- 00000001 6f81bd30 6f81bd20 6f81bdd0
-Call Trace:
- [<6016f7db>] ? kunit_try_run_case+0xab/0xf0
- [<60021450>] ? set_signals+0x0/0x60
- [<60021440>] ? get_signals+0x0/0x10
- [<60018bbe>] ? kunit_um_run_try_catch+0x5e/0xc0
- [<60021450>] ? set_signals+0x0/0x60
- [<60021440>] ? get_signals+0x0/0x10
- [<60018bb3>] ? kunit_um_run_try_catch+0x53/0xc0
- [<6016f321>] ? kunit_run_case_catch_errors+0x121/0x1a0
- [<60018b60>] ? kunit_um_run_try_catch+0x0/0xc0
- [<600189e0>] ? kunit_um_throw+0x0/0x180
- [<6016f730>] ? kunit_try_run_case+0x0/0xf0
- [<6016f600>] ? kunit_catch_run_case+0x0/0x130
- [<6016edd0>] ? kunit_vprintk+0x0/0x30
- [<6016ece0>] ? kunit_fail+0x0/0x40
- [<6016eca0>] ? kunit_abort+0x0/0x40
- [<6016ed20>] ? kunit_printk_emit+0x0/0xb0
- [<6016f200>] ? kunit_run_case_catch_errors+0x0/0x1a0
- [<6016f46e>] ? kunit_run_tests+0xce/0x260
- [<6005b390>] ? unregister_console+0x0/0x190
- [<60175b70>] ? suite_kunit_initexample_test_suite+0x0/0x20
- [<60001cbb>] ? do_one_initcall+0x0/0x197
- [<60001d47>] ? do_one_initcall+0x8c/0x197
- [<6005cd20>] ? irq_to_desc+0x0/0x30
- [<60002005>] ? kernel_init_freeable+0x1b3/0x272
- [<6005c5ec>] ? printk+0x0/0x9b
- [<601c0086>] ? kernel_init+0x26/0x160
- [<60014442>] ? new_thread_handler+0x82/0xc0
# example_simple_test: kunit test case crashed!
# example_simple_test: example_simple_test failed
not ok 1 - example_simple_test
# example_mock_test: initializing
# example_mock_test: example_mock_test passed
ok 2 - example_mock_test
-kunit example: one or more tests failed -not ok 2 - example -List of all partitions:
base-commit: 59729170afcd4900e08997a482467ffda8d88c7f
2.36.0.rc2.479.g8af0fa9b8e-goog
On Tue, Apr 26, 2022 at 1:33 PM Daniel Latypov dlatypov@google.com wrote:
This logic depends on the kernel logging a message containing 'kunit test case crashed', but there is no corresponding logic to do so.
This is likely a relic of the revision process KUnit initially went through when being upstreamed.
Delete it given
- it's been missing for years and likely won't get implemented
- the parser has been moving to be a more general KTAP parser, kunit-only magic like this isn't how we'd want to implement it.
Signed-off-by: Daniel Latypov dlatypov@google.com
Reviewed-by: Brendan Higgins brendanhiggins@google.com
linux-kselftest-mirror@lists.linaro.org