From: Oliver Upton <oupton(a)google.com>
[ Upstream commit 01f91acb55be7aac3950b89c458bcea9ef6e4f49 ]
The SMC64 calling convention passes a function identifier in w0 and its
parameters in x1-x17. Given this, there are two deviations in the
SMC64 call performed by the steal_time test: the function identifier is
assigned to a 64 bit register and the parameter is only 32 bits wide.
Align the call with the SMCCC by using a 32 bit register to handle the
function identifier and increasing the parameter width to 64 bits.
Suggested-by: Andrew Jones <drjones(a)redhat.com>
Signed-off-by: Oliver Upton <oupton(a)google.com>
Reviewed-by: Andrew Jones <drjones(a)redhat.com>
Message-Id: <20210921171121.2148982-3-oupton(a)google.com>
Signed-off-by: Paolo Bonzini <pbonzini(a)redhat.com>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
---
tools/testing/selftests/kvm/steal_time.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/tools/testing/selftests/kvm/steal_time.c b/tools/testing/selftests/kvm/steal_time.c
index ecec30865a74..aafaa8e38b7c 100644
--- a/tools/testing/selftests/kvm/steal_time.c
+++ b/tools/testing/selftests/kvm/steal_time.c
@@ -118,12 +118,12 @@ struct st_time {
uint64_t st_time;
};
-static int64_t smccc(uint32_t func, uint32_t arg)
+static int64_t smccc(uint32_t func, uint64_t arg)
{
unsigned long ret;
asm volatile(
- "mov x0, %1\n"
+ "mov w0, %w1\n"
"mov x1, %2\n"
"hvc #0\n"
"mov %0, x0\n"
--
2.33.0
[root@iaas-rpma gpio]# make
gcc gpio-mockup-cdev.c -o /home/lizhijian/linux/tools/testing/selftests/gpio/gpio-mockup-cdev
gpio-mockup-cdev.c: In function ‘request_line_v2’:
gpio-mockup-cdev.c:24:30: error: storage size of ‘req’ isn’t known
24 | struct gpio_v2_line_request req;
| ^~~
gpio-mockup-cdev.c:32:14: error: ‘GPIO_V2_LINE_FLAG_OUTPUT’ undeclared (first use in this function); did you mean ‘GPIOLINE_FLAG_IS_OUT’?
32 | if (flags & GPIO_V2_LINE_FLAG_OUTPUT) {
| ^~~~~~~~~~~~~~~~~~~~~~~~
gpio-mockup-cdev.c includes <linux/gpio.h> which could be provided by
kernel-headers package, and where it's expected to declare
GPIO_V2_LINE_FLAG_OUTPUT. However distros or developers will not always
install the same kernel-header as we are compiling.
So we can tell compiler to search headers from linux tree simply like others,
such as sched.
CC: Philip Li <philip.li(a)intel.com>
Reported-by: kernel test robot <lkp(a)intel.com>
Signed-off-by: Li Zhijian <lizhijian(a)cn.fujitsu.com>
---
V2: add more details about the fix
---
tools/testing/selftests/gpio/Makefile | 1 +
1 file changed, 1 insertion(+)
diff --git a/tools/testing/selftests/gpio/Makefile b/tools/testing/selftests/gpio/Makefile
index 39f2bbe8dd3d..42ea7d2aa844 100644
--- a/tools/testing/selftests/gpio/Makefile
+++ b/tools/testing/selftests/gpio/Makefile
@@ -3,5 +3,6 @@
TEST_PROGS := gpio-mockup.sh
TEST_FILES := gpio-mockup-sysfs.sh
TEST_GEN_PROGS_EXTENDED := gpio-mockup-cdev
+CFLAGS += -I../../../../usr/include
include ../lib.mk
--
2.31.1
Consider this attempt to run KUnit in QEMU:
$ ./tools/testing/kunit/kunit.py run --arch=x86
Before you'd get this error message:
kunit_kernel.ConfigError: x86 is not a valid arch
After:
kunit_kernel.ConfigError: x86 is not a valid arch, options are ['alpha', 'arm', 'arm64', 'i386', 'powerpc', 'riscv', 's390', 'sparc', 'x86_64']
This should make it a bit easier for people to notice when they make
typos, etc. Currently, one would have to dive into the python code to
figure out what the valid set is.
Signed-off-by: Daniel Latypov <dlatypov(a)google.com>
---
tools/testing/kunit/kunit_kernel.py | 5 +++--
tools/testing/kunit/kunit_tool_test.py | 4 ++++
2 files changed, 7 insertions(+), 2 deletions(-)
diff --git a/tools/testing/kunit/kunit_kernel.py b/tools/testing/kunit/kunit_kernel.py
index 1870e75ff153..a6b3cee3f0d0 100644
--- a/tools/testing/kunit/kunit_kernel.py
+++ b/tools/testing/kunit/kunit_kernel.py
@@ -198,8 +198,9 @@ def get_source_tree_ops(arch: str, cross_compile: Optional[str]) -> LinuxSourceT
return LinuxSourceTreeOperationsUml(cross_compile=cross_compile)
elif os.path.isfile(config_path):
return get_source_tree_ops_from_qemu_config(config_path, cross_compile)[1]
- else:
- raise ConfigError(arch + ' is not a valid arch')
+
+ options = [f[:-3] for f in os.listdir(QEMU_CONFIGS_DIR) if f.endswith('.py')]
+ raise ConfigError(arch + ' is not a valid arch, options are ' + str(sorted(options)))
def get_source_tree_ops_from_qemu_config(config_path: str,
cross_compile: Optional[str]) -> Tuple[
diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py
index cad37a98e599..2ae72f04cbe0 100755
--- a/tools/testing/kunit/kunit_tool_test.py
+++ b/tools/testing/kunit/kunit_tool_test.py
@@ -289,6 +289,10 @@ class LinuxSourceTreeTest(unittest.TestCase):
pass
kunit_kernel.LinuxSourceTree('', kunitconfig_path=dir)
+ def test_invalid_arch(self):
+ with self.assertRaisesRegex(kunit_kernel.ConfigError, 'not a valid arch, options are.*x86_64'):
+ kunit_kernel.LinuxSourceTree('', arch='invalid')
+
# TODO: add more test cases.
base-commit: 865a0a8025ee0b54d1cc74834c57197d184a441e
--
2.33.0.685.g46640cef36-goog
Fix documentation build warnings in <kunit/test.h>:
../include/kunit/test.h:616: warning: Function parameter or member 'flags' not described in 'kunit_kmalloc_array'
../include/kunit/test.h:616: warning: Excess function parameter 'gfp' description in 'kunit_kmalloc_array'
../include/kunit/test.h:661: warning: Function parameter or member 'flags' not described in 'kunit_kcalloc'
../include/kunit/test.h:661: warning: Excess function parameter 'gfp' description in 'kunit_kcalloc'
Fixes: 0a756853586c ("kunit: test: add test resource management API")
Fixes: 7122debb4367 ("kunit: introduce kunit_kmalloc_array/kunit_kcalloc() helpers")
Signed-off-by: Randy Dunlap <rdunlap(a)infradead.org>
Cc: Brendan Higgins <brendanhiggins(a)google.com>
Cc: linux-kselftest(a)vger.kernel.org
Cc: kunit-dev(a)googlegroups.com
Cc: Daniel Latypov <dlatypov(a)google.com>
Cc: Shuah Khan <skhan(a)linuxfoundation.org>
---
include/kunit/test.h | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
--- lnx-515-rc4.orig/include/kunit/test.h
+++ lnx-515-rc4/include/kunit/test.h
@@ -607,7 +607,7 @@ void kunit_remove_resource(struct kunit
* @test: The test context object.
* @n: number of elements.
* @size: The size in bytes of the desired memory.
- * @gfp: flags passed to underlying kmalloc().
+ * @flags: gfp flags passed to underlying kmalloc().
*
* Just like `kmalloc_array(...)`, except the allocation is managed by the test case
* and is automatically cleaned up after the test case concludes. See &struct
@@ -653,7 +653,7 @@ static inline void *kunit_kzalloc(struct
* @test: The test context object.
* @n: number of elements.
* @size: The size in bytes of the desired memory.
- * @gfp: flags passed to underlying kmalloc().
+ * @flags: gfp flags passed to underlying kmalloc().
*
* See kcalloc() and kunit_kmalloc_array() for more information.
*/
From: Rae Moar <rmoar(a)google.com>
Update to kunit_parser to improve compatibility with KTAP
specification including arbitrarily nested tests. Patch accomplishes
three major changes:
- Use a general Test object to represent all tests rather than TestCase
and TestSuite objects. This allows for easier implementation of arbitrary
levels of nested tests and promotes the idea that both test suites and test
cases are tests.
- Print errors incrementally rather than all at once after the
parsing finishes to maximize information given to the user in the
case of the parser given invalid input and to increase the helpfulness
of the timestamps given during printing. Note that kunit.py parse does
not print incrementally yet. However, this fix brings us closer to
this feature.
- Increase compatibility for different formats of input. Arbitrary levels
of nested tests supported. Also, test cases and test suites are now
supported to be present on the same level of testing.
This patch now implements the KTAP specification as described here:
https://lore.kernel.org/linux-kselftest/CA+GJov6tdjvY9x12JsJT14qn6c7NViJxqa….
This patch adjusts the kunit_tool_test.py file to check for
the correct outputs from the new parser and adds a new test to check
the parsing for a KTAP result log with correct format for multiple nested
subtests (test_is_test_passed-all_passed_nested.log).
This patch also alters the kunit_json.py file to allow for arbitrarily
nested tests.
Signed-off-by: Rae Moar <rmoar(a)google.com>
Reviewed-by: Brendan Higgins <brendanhiggins(a)google.com>
Signed-off-by: Daniel Latypov <dlatypov(a)google.com>
---
NOTE: this patch is now applied on top of
https://lore.kernel.org/linux-kselftest/20210930222048.1692635-5-dlatypov@g…
to resolve a conflict.
Change log from v3:
https://lore.kernel.org/linux-kselftest/20210901190623.315736-1-rmoar@googl…
- Resolve conflict with hermetic testing patches
- Fix pytype error (str -> Optional[str] in kunit_json.py)
Change log from v2:
https://lore.kernel.org/linux-kselftest/20210826195505.3066755-1-rmoar@goog…
- Fixes bug of type disagreement in kunit_json.py for build_dir
- Removes raw_output()
- Changes docstrings in kunit_parser.py (class docstring, LineStream
docstrings, add_error(), total(), get_status(), all parsing methods)
- Fixes bug of not printing diagnostic log in the case of end of lines
- Sets default status of all tests to TEST_CRASHED
- Adds and prints empty tests with crashed status in case of missing
tests
- Prints 'subtest' in instance of 1 subtest instead of 'subtests'
- Includes checking for 'BUG:' message in search of crash messages in
log (note that parse_crash_in_log method could be removed but would
require deleting tests in kunit_tool_test.py that include the crash
message that is no longer used. If removed, parser would still print
log in cases of test crashed or failure, which would now include
missing subtests)
- Fixes bug of including directives (other than SKIP) in test name
when matching name in result line for subtests
Change log from v1:
https://lore.kernel.org/linux-kselftest/20210820200032.2178134-1-rmoar@goog…
- Rebase onto kselftest/kunit branch
- Add tests to kunit_tool_test.py to check parser is correctly stripping
hyphen, producing correct json objects with nested tests, correctly
passing kselftest TAP output, and correctly deals with missing test plan.
- Fix bug to correctly match test name in instance of a missing test plan.
- Fix bug in kunit_tool_test.py pointed out by Daniel where it was not
correctly checking for a proper match to the '0 tests run!' error
message. Reverts changes back to original.
- A few minor changes to commit message using Daniel's comments.
- Change docstrings using Daniel's comments to reduce:
- Shortens some docstrings to be one-line or just description if it is
self explanatory.
- Remove explicit respecification of types of parameters and returns
because this is already specified in the function annoations. However,
some descriptions of the parameters and returns remain and some contain
the type for context. Additionally, the types of public attributes of
classes remain.
- Remove any documentation of 'Return: None'
- Remove docstrings of helper methods within other methods
---
tools/testing/kunit/kunit.py | 6 +-
tools/testing/kunit/kunit_json.py | 55 +-
tools/testing/kunit/kunit_parser.py | 1056 ++++++++++++-----
tools/testing/kunit/kunit_tool_test.py | 134 ++-
.../test_is_test_passed-all_passed_nested.log | 34 +
.../test_is_test_passed-kselftest.log | 14 +
.../test_is_test_passed-missing_plan.log | 31 +
.../kunit/test_data/test_strip_hyphen.log | 16 +
8 files changed, 954 insertions(+), 392 deletions(-)
create mode 100644 tools/testing/kunit/test_data/test_is_test_passed-all_passed_nested.log
create mode 100644 tools/testing/kunit/test_data/test_is_test_passed-kselftest.log
create mode 100644 tools/testing/kunit/test_data/test_is_test_passed-missing_plan.log
create mode 100644 tools/testing/kunit/test_data/test_strip_hyphen.log
diff --git a/tools/testing/kunit/kunit.py b/tools/testing/kunit/kunit.py
index 8c7e8c7b2c97..9e2a96e45e3b 100755
--- a/tools/testing/kunit/kunit.py
+++ b/tools/testing/kunit/kunit.py
@@ -136,7 +136,7 @@ def exec_tests(linux: kunit_kernel.LinuxSourceTree, request: KunitExecRequest,
test_glob = request.filter_glob.split('.', maxsplit=2)[1]
filter_globs = [g + '.'+ test_glob for g in filter_globs]
- overall_status = kunit_parser.TestStatus.SUCCESS
+ test_counts = kunit_parser.TestCounts()
exec_time = 0.0
for i, filter_glob in enumerate(filter_globs):
kunit_parser.print_with_timestamp('Starting KUnit Kernel ({}/{})...'.format(i+1, len(filter_globs)))
@@ -155,9 +155,9 @@ def exec_tests(linux: kunit_kernel.LinuxSourceTree, request: KunitExecRequest,
test_end = time.time()
exec_time += test_end - test_start
- overall_status = kunit_parser.max_status(overall_status, result.status)
+ test_counts.add_subtest_counts(result.result.test.counts)
- return KunitResult(status=result.status, result=result.result, elapsed_time=exec_time)
+ return KunitResult(status=test_counts.get_status(), result=result.result, elapsed_time=exec_time)
def parse_tests(request: KunitParseRequest, input_data: Iterable[str]) -> KunitResult:
parse_start = time.time()
diff --git a/tools/testing/kunit/kunit_json.py b/tools/testing/kunit/kunit_json.py
index f5cca5c38cac..d01581d068ac 100644
--- a/tools/testing/kunit/kunit_json.py
+++ b/tools/testing/kunit/kunit_json.py
@@ -11,47 +11,46 @@ import os
import kunit_parser
-from kunit_parser import TestStatus
-
-def get_json_result(test_result, def_config, build_dir, json_path) -> str:
- sub_groups = []
-
- # Each test suite is mapped to a KernelCI sub_group
- for test_suite in test_result.suites:
- sub_group = {
- "name": test_suite.name,
- "arch": "UM",
- "defconfig": def_config,
- "build_environment": build_dir,
- "test_cases": [],
- "lab_name": None,
- "kernel": None,
- "job": None,
- "git_branch": "kselftest",
- }
- test_cases = []
- # TODO: Add attachments attribute in test_case with detailed
- # failure message, see https://api.kernelci.org/schema-test-case.html#get
- for case in test_suite.cases:
- test_case = {"name": case.name, "status": "FAIL"}
- if case.status == TestStatus.SUCCESS:
+from kunit_parser import Test, TestResult, TestStatus
+from typing import Any, Dict, Optional
+
+JsonObj = Dict[str, Any]
+
+def _get_group_json(test: Test, def_config: str, build_dir: Optional[str]) -> JsonObj:
+ sub_groups = [] # List[JsonObj]
+ test_cases = [] # List[JsonObj]
+
+ for subtest in test.subtests:
+ if len(subtest.subtests):
+ sub_group = _get_group_json(subtest, def_config,
+ build_dir)
+ sub_groups.append(sub_group)
+ else:
+ test_case = {"name": subtest.name, "status": "FAIL"}
+ if subtest.status == TestStatus.SUCCESS:
test_case["status"] = "PASS"
- elif case.status == TestStatus.TEST_CRASHED:
+ elif subtest.status == TestStatus.TEST_CRASHED:
test_case["status"] = "ERROR"
test_cases.append(test_case)
- sub_group["test_cases"] = test_cases
- sub_groups.append(sub_group)
+
test_group = {
- "name": "KUnit Test Group",
+ "name": test.name,
"arch": "UM",
"defconfig": def_config,
"build_environment": build_dir,
"sub_groups": sub_groups,
+ "test_cases": test_cases,
"lab_name": None,
"kernel": None,
"job": None,
"git_branch": "kselftest",
}
+ return test_group
+
+def get_json_result(test_result: TestResult, def_config: str, build_dir: Optional[str],
+ json_path: str) -> str:
+ test_group = _get_group_json(test_result.test, def_config, build_dir)
+ test_group["name"] = "KUnit Test Group"
json_obj = json.dumps(test_group, indent=4)
if json_path != 'stdout':
with open(json_path, 'w') as result_path:
diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py
index 6310a641b151..4b6086159c7f 100644
--- a/tools/testing/kunit/kunit_parser.py
+++ b/tools/testing/kunit/kunit_parser.py
@@ -1,11 +1,15 @@
# SPDX-License-Identifier: GPL-2.0
#
-# Parses test results from a kernel dmesg log.
+# Parses KTAP test results from a kernel dmesg log and incrementally prints
+# results with reader-friendly format. Stores and returns test results in a
+# Test object.
#
# Copyright (C) 2019, Google LLC.
# Author: Felix Guo <felixguoxiuping(a)gmail.com>
# Author: Brendan Higgins <brendanhiggins(a)google.com>
+# Author: Rae Moar <rmoar(a)google.com>
+from __future__ import annotations
import re
from collections import namedtuple
@@ -14,33 +18,55 @@ from enum import Enum, auto
from functools import reduce
from typing import Iterable, Iterator, List, Optional, Tuple
-TestResult = namedtuple('TestResult', ['status','suites','log'])
-
-class TestSuite(object):
- def __init__(self) -> None:
- self.status = TestStatus.SUCCESS
- self.name = ''
- self.cases = [] # type: List[TestCase]
-
- def __str__(self) -> str:
- return 'TestSuite(' + str(self.status) + ',' + self.name + ',' + str(self.cases) + ')'
-
- def __repr__(self) -> str:
- return str(self)
-
-class TestCase(object):
+TestResult = namedtuple('TestResult', ['status','test','log'])
+
+class Test(object):
+ """
+ A class to represent a test parsed from KTAP results. All KTAP
+ results within a test log are stored in a main Test object as
+ subtests.
+
+ Attributes:
+ status : TestStatus - status of the test
+ name : str - name of the test
+ expected_count : int - expected number of subtests (0 if single
+ test case and None if unknown expected number of subtests)
+ subtests : List[Test] - list of subtests
+ log : List[str] - log of KTAP lines that correspond to the test
+ counts : TestCounts - counts of the test statuses and errors of
+ subtests or of the test itself if the test is a single
+ test case.
+ """
def __init__(self) -> None:
+ """Constructs the default attributes of a Test class object.
+ """
self.status = TestStatus.SUCCESS
self.name = ''
+ self.expected_count = 0 # type: Optional[int]
+ self.subtests = [] # type: List[Test]
self.log = [] # type: List[str]
+ self.counts = TestCounts()
def __str__(self) -> str:
- return 'TestCase(' + str(self.status) + ',' + self.name + ',' + str(self.log) + ')'
+ """Returns string representation of a Test class object."""
+ return ('Test(' + str(self.status) + ', ' + self.name +
+ ', ' + str(self.expected_count) + ', ' +
+ str(self.subtests) + ', ' + str(self.log) + ', ' +
+ str(self.counts) + ')')
def __repr__(self) -> str:
+ """Returns string representation of a Test class object."""
return str(self)
+ def add_error(self, error_message: str) -> None:
+ """Adds error to test object by incrementing the error count
+ and printing the error message.
+ """
+ self.counts.errors += 1
+ print_error('Test ' + self.name + ': ' + error_message)
+
class TestStatus(Enum):
+ """An enumeration class to represent the status of a test."""
SUCCESS = auto()
FAILURE = auto()
SKIPPED = auto()
@@ -48,381 +74,769 @@ class TestStatus(Enum):
NO_TESTS = auto()
FAILURE_TO_PARSE_TESTS = auto()
+class TestCounts:
+ """
+ A class to represent the counts of statuses and test errors of
+ subtests or of the test itself if the test is a single test case with
+ no subtests. Note that the sum of the counts of passed, failed,
+ crashed, and skipped should sum to the total number of subtests for
+ the test.
+
+ Attributes:
+ passed : int - the number of tests that have passed
+ failed : int - the number of tests that have failed
+ crashed : int - the number of tests that have crashed
+ skipped : int - the number of tests that have skipped
+ errors : int - the number of errors in the test and subtests
+ """
+ def __init__(self):
+ """Contructs the default attributes of a TestCounts class
+ object. Sets the counts of all test statuses and test
+ errors to be 0.
+ """
+ self.passed = 0
+ self.failed = 0
+ self.crashed = 0
+ self.skipped = 0
+ self.errors = 0
+
+ def __str__(self) -> str:
+ """Returns the string representation of a TestCounts object.
+ """
+ return ('Passed: ' + str(self.passed) +
+ ', Failed: ' + str(self.failed) +
+ ', Crashed: ' + str(self.crashed) +
+ ', Skipped: ' + str(self.skipped) +
+ ', Errors: ' + str(self.errors))
+
+ def total(self) -> int:
+ """Returns total number of subtests or 1 if the test object
+ has no subtests to represent the test itself. This number is
+ calculated by the sum of the passed, failed, crashed, and
+ skipped subtests.
+ """
+ return (self.passed + self.failed + self.crashed +
+ self.skipped)
+
+ def add_subtest_counts(self, counts: TestCounts) -> None:
+ """
+ Adds the counts of another TestCounts object to the current
+ TestCounts object. Used to add the counts of a subtest to the
+ parent test.
+
+ Parameters:
+ counts - a different TestCounts object whose counts
+ will be added to the counts of the TestCounts object
+ """
+ self.passed += counts.passed
+ self.failed += counts.failed
+ self.crashed += counts.crashed
+ self.skipped += counts.skipped
+ self.errors += counts.errors
+
+ def get_status(self) -> TestStatus:
+ """Returns the expected status of a Test using test counts."""
+ if self.crashed:
+ # If one of the subtests crash, the expected status
+ # of the Test is crashed.
+ return TestStatus.TEST_CRASHED
+ elif self.failed:
+ # Otherwise if one of the subtests fail, the
+ # expected status of the Test is failed.
+ return TestStatus.FAILURE
+ elif self.passed:
+ # Otherwise if one of the subtests pass, the
+ # expected status of the Test is passed.
+ return TestStatus.SUCCESS
+ else:
+ # Finally, if none of the subtests have failed,
+ # crashed, or passed, the expected status of the
+ # Test is skipped.
+ return TestStatus.SKIPPED
+
+ def add_status(self, status: TestStatus) -> None:
+ """
+ Given inputted status, increments corresponding attribute of
+ TestCounts object.
+
+ Parameters:
+ status - status to be added to the TestCounts object
+ """
+ if status == TestStatus.SUCCESS or \
+ status == TestStatus.NO_TESTS:
+ # if status is NO_TESTS the most appropriate
+ # attribute to increment is passed because
+ # the test did not fail, crash or get skipped.
+ self.passed += 1
+ elif status == TestStatus.FAILURE:
+ self.failed += 1
+ elif status == TestStatus.SKIPPED:
+ self.skipped += 1
+ else:
+ self.crashed += 1
+
class LineStream:
- """Provides a peek()/pop() interface over an iterator of (line#, text)."""
+ """
+ A class to represent the lines of kernel output.
+ Provides a peek()/pop() interface over an iterator of
+ (line#, text).
+ """
_lines: Iterator[Tuple[int, str]]
_next: Tuple[int, str]
_done: bool
def __init__(self, lines: Iterator[Tuple[int, str]]):
+ """Set defaults for LineStream object and sets _lines
+ attribute to lines parameter.
+ """
self._lines = lines
self._done = False
self._next = (0, '')
self._get_next()
def _get_next(self) -> None:
+ """Advances the LineSteam to the next line or sets the _done
+ attribute if the LineStream has reached the end of the lines.
+ """
try:
self._next = next(self._lines)
except StopIteration:
self._done = True
def peek(self) -> str:
+ """Returns the next line in the LineStream without advancing
+ the LineStream.
+ """
return self._next[1]
def pop(self) -> str:
+ """Returns the next line in the LineStream and advances the
+ LineStream to the next line.
+ """
n = self._next
self._get_next()
return n[1]
def __bool__(self) -> bool:
+ """Returns whether the LineStream has reached the end of the
+ lines.
+ """
return not self._done
# Only used by kunit_tool_test.py.
def __iter__(self) -> Iterator[str]:
+ """Empties all lines stored in LineStream object into
+ Iterator object and returns the Iterator object.
+ """
while bool(self):
yield self.pop()
def line_number(self) -> int:
+ """Returns the line number of the next line in the
+ LineStream.
+ """
return self._next[0]
-kunit_start_re = re.compile(r'TAP version [0-9]+$')
-kunit_end_re = re.compile('(List of all partitions:|'
- 'Kernel panic - not syncing: VFS:|reboot: System halted)')
+# Parsing helper methods:
+
+KTAP_START = re.compile(r'KTAP version ([0-9]+)$')
+TAP_START = re.compile(r'TAP version ([0-9]+)$')
+KTAP_END = re.compile('(List of all partitions:|'
+ 'Kernel panic - not syncing: VFS:|reboot: System halted)')
def extract_tap_lines(kernel_output: Iterable[str]) -> LineStream:
- def isolate_kunit_output(kernel_output: Iterable[str]) -> Iterator[Tuple[int, str]]:
+ """Extracts KTAP lines from inputted kernel output in LineStream
+ object."""
+ def isolate_ktap_output(kernel_output: Iterable[str]) \
+ -> Iterator[Tuple[int, str]]:
line_num = 0
started = False
for line in kernel_output:
line_num += 1
- line = line.rstrip() # line always has a trailing \n
- if kunit_start_re.search(line):
+ line = line.rstrip() # remove trailing \n
+ if not started and KTAP_START.search(line):
+ # start extracting KTAP lines and set prefix
+ # to number of characters before version line
+ prefix_len = len(
+ line.split('KTAP version')[0])
+ started = True
+ yield line_num, line[prefix_len:]
+ elif not started and TAP_START.search(line):
+ # start extracting KTAP lines and set prefix
+ # to number of characters before version line
prefix_len = len(line.split('TAP version')[0])
started = True
yield line_num, line[prefix_len:]
- elif kunit_end_re.search(line):
+ elif started and KTAP_END.search(line):
+ # stop extracting KTAP lines
break
elif started:
- yield line_num, line[prefix_len:]
- return LineStream(lines=isolate_kunit_output(kernel_output))
-
-DIVIDER = '=' * 60
-
-RESET = '\033[0;0m'
-
-def red(text) -> str:
- return '\033[1;31m' + text + RESET
-
-def yellow(text) -> str:
- return '\033[1;33m' + text + RESET
-
-def green(text) -> str:
- return '\033[1;32m' + text + RESET
-
-def print_with_timestamp(message) -> None:
- print('[%s] %s' % (datetime.now().strftime('%H:%M:%S'), message))
-
-def format_suite_divider(message) -> str:
- return '======== ' + message + ' ========'
-
-def print_suite_divider(message) -> None:
- print_with_timestamp(DIVIDER)
- print_with_timestamp(format_suite_divider(message))
-
-def print_log(log) -> None:
- for m in log:
- print_with_timestamp(m)
-
-TAP_ENTRIES = re.compile(r'^(TAP|[\s]*ok|[\s]*not ok|[\s]*[0-9]+\.\.[0-9]+|[\s]*# (Subtest:|.*: kunit test case crashed!)).*$')
-
-def consume_non_diagnostic(lines: LineStream) -> None:
- while lines and not TAP_ENTRIES.match(lines.peek()):
- lines.pop()
-
-def save_non_diagnostic(lines: LineStream, test_case: TestCase) -> None:
- while lines and not TAP_ENTRIES.match(lines.peek()):
- test_case.log.append(lines.peek())
- lines.pop()
-
-OkNotOkResult = namedtuple('OkNotOkResult', ['is_ok','description', 'text'])
-
-OK_NOT_OK_SKIP = re.compile(r'^[\s]*(ok|not ok) [0-9]+ - (.*) # SKIP(.*)$')
-
-OK_NOT_OK_SUBTEST = re.compile(r'^[\s]+(ok|not ok) [0-9]+ - (.*)$')
-
-OK_NOT_OK_MODULE = re.compile(r'^(ok|not ok) ([0-9]+) - (.*)$')
-
-def parse_ok_not_ok_test_case(lines: LineStream, test_case: TestCase) -> bool:
- save_non_diagnostic(lines, test_case)
- if not lines:
- test_case.status = TestStatus.TEST_CRASHED
- return True
- line = lines.peek()
- match = OK_NOT_OK_SUBTEST.match(line)
- while not match and lines:
- line = lines.pop()
- match = OK_NOT_OK_SUBTEST.match(line)
- if match:
- test_case.log.append(lines.pop())
- test_case.name = match.group(2)
- skip_match = OK_NOT_OK_SKIP.match(line)
- if skip_match:
- test_case.status = TestStatus.SKIPPED
- return True
- if test_case.status == TestStatus.TEST_CRASHED:
- return True
- if match.group(1) == 'ok':
- test_case.status = TestStatus.SUCCESS
- else:
- test_case.status = TestStatus.FAILURE
- return True
+ # remove prefix and any indention and yield
+ # line with line number
+ line = line[prefix_len:].lstrip()
+ yield line_num, line
+ return LineStream(lines=isolate_ktap_output(kernel_output))
+
+def raw_output(kernel_output: Iterable[str]) -> None:
+ """Prints all lines of kernel output."""
+ for line in kernel_output:
+ print(line.rstrip())
+
+KTAP_VERSIONS = [1]
+TAP_VERSIONS = [13, 14]
+
+def check_version(version_num: int, accepted_versions: List[int],
+ version_type: str, test: Test) -> None:
+ """
+ Adds error to test object if version number is too high or too
+ low.
+
+ Parameters:
+ version_num - The inputted version number from the parsed KTAP or TAP
+ header line
+ accepted_version - List of accepted KTAP or TAP versions
+ version_type - 'KTAP' or 'TAP' depending on the type of
+ version line.
+ test - Test object for current test being parsed
+ """
+ if version_num < min(accepted_versions):
+ test.add_error(version_type +
+ ' version lower than expected!')
+ elif version_num > max(accepted_versions):
+ test.add_error(
+ version_type + ' version higher than expected!')
+
+def parse_ktap_header(lines: LineStream, test: Test) -> bool:
+ """
+ If the next line in LineStream matches the format of KTAP or TAP
+ header line, the version number is checked, the line is popped,
+ and returns True. Otherwise the method returns False.
+
+ Accepted formats:
+ - 'KTAP version [version number]'
+ - 'TAP version [version number]'
+
+ Parameters:
+ lines - LineStream of KTAP output to parse
+ test - Test object for current test being parsed
+
+ Return:
+ Boolean that represents if the next line in the LineStream was parsed
+ as the KTAP or TAP header line
+ """
+ ktap_match = KTAP_START.match(lines.peek())
+ tap_match = TAP_START.match(lines.peek())
+ if ktap_match:
+ version_num = int(ktap_match.group(1))
+ check_version(version_num, KTAP_VERSIONS, 'KTAP', test)
+ elif tap_match:
+ version_num = int(tap_match.group(1))
+ check_version(version_num, TAP_VERSIONS, 'TAP', test)
else:
return False
-
-SUBTEST_DIAGNOSTIC = re.compile(r'^[\s]+# (.*)$')
-DIAGNOSTIC_CRASH_MESSAGE = re.compile(r'^[\s]+# .*?: kunit test case crashed!$')
-
-def parse_diagnostic(lines: LineStream, test_case: TestCase) -> bool:
- save_non_diagnostic(lines, test_case)
- if not lines:
+ test.log.append(lines.pop())
+ return True
+
+TEST_HEADER = re.compile(r'^# Subtest: (.*)$')
+
+def parse_test_header(lines: LineStream, test: Test) -> bool:
+ """
+ If the next line in LineStream matches the format of a test
+ header line, the name of test is set, the line is popped,
+ and returns True. Otherwise the method returns False.
+
+ Accepted format:
+ - '# Subtest: [test name]'
+
+ Parameters:
+ lines - LineStream of ktap output to parse
+ test - Test object for current test being parsed
+
+ Return:
+ Boolean that represents if the next line in the LineStream was parsed
+ as a test header
+ """
+ match = TEST_HEADER.match(lines.peek())
+ if not match:
return False
+ test.log.append(lines.pop())
+ test.name = match.group(1)
+ return True
+
+TEST_PLAN = re.compile(r'1\.\.([0-9]+)')
+
+def parse_test_plan(lines: LineStream, test: Test) -> bool:
+ """
+ If the next line in LineStream matches the format of a test
+ plan line, the expected number of subtests is set in test object, an
+ error is thrown if there are 0 tests, the line is popped,
+ and returns True. Otherwise the method adds an error that the test
+ plan is missing to the test object and returns False.
+
+ Accepted format:
+ - '1..[number of subtests]'
+
+ Parameters:
+ lines - LineStream of ktap output to parse
+ test - Test object for current test being parsed
+
+ Return:
+ Boolean that represents if the next line in the LineStream was parsed
+ as a test plan
+ """
+ match = TEST_PLAN.match(lines.peek())
+ if not match:
+ test.expected_count = None
+ test.add_error('missing plan line!')
+ return False
+ test.log.append(lines.pop())
+ expected_count = int(match.group(1))
+ test.expected_count = expected_count
+ if expected_count == 0:
+ test.status = TestStatus.NO_TESTS
+ test.add_error('0 tests run!')
+ return True
+
+TEST_RESULT = re.compile(r'^(ok|not ok) ([0-9]+) (- )?(.*)$')
+
+TEST_RESULT_SKIP = re.compile(r'^(ok|not ok) ([0-9]+) (- )?(.*) # SKIP(.*)$')
+
+def peek_test_name_match(lines: LineStream, test: Test) -> bool:
+ """
+ If the next line in LineStream matches the format of a test
+ result line and the name of the result line matches the name of the
+ current test, the method returns True. Otherwise it returns False.
+
+ Accepted format:
+ - '[ok|not ok] [test number] [-] [test name] [optional skip
+ directive]'
+
+ Parameters:
+ lines - LineStream of KTAP output to parse
+ test - Test object for current test being parsed
+
+ Return:
+ Boolean that represents if the next line in the LineStream matched a
+ test result line and the name matched the expected test name
+ """
line = lines.peek()
- match = SUBTEST_DIAGNOSTIC.match(line)
- if match:
- test_case.log.append(lines.pop())
- crash_match = DIAGNOSTIC_CRASH_MESSAGE.match(line)
- if crash_match:
- test_case.status = TestStatus.TEST_CRASHED
- return True
- else:
+ match = TEST_RESULT.match(line)
+ if not match:
return False
+ name = match.group(4)
+ return (name == test.name)
+
+def parse_test_result(lines: LineStream, test: Test,
+ expected_num: int) -> bool:
+ """
+ If the next line in LineStream matches the format of a test
+ result line, the status in the result line is added to the test
+ object, the test number is checked to match the expected test number
+ and if not an error is added to the test object, and returns True.
+ Otherwise it returns False.
+
+ Note that the skip diirective is the only
+ directive that causes a change in status and otherwise the directive
+ is included in the name of the test.
+
+ Accepted format:
+ - '[ok|not ok] [test number] [-] [test name] [optional skip
+ directive]'
+
+ Parameters:
+ lines - LineStream of KTAP output to parse
+ test - Test object for current test being parsed
+ expected_num - expected test number for current test
+
+ Return:
+ Boolean that represents if the next line in the LineStream was parsed
+ as a test result line.
+ """
+ line = lines.peek()
+ match = TEST_RESULT.match(line)
+ skip_match = TEST_RESULT_SKIP.match(line)
-def parse_test_case(lines: LineStream) -> Optional[TestCase]:
- test_case = TestCase()
- save_non_diagnostic(lines, test_case)
- while parse_diagnostic(lines, test_case):
- pass
- if parse_ok_not_ok_test_case(lines, test_case):
- return test_case
- else:
- return None
-
-SUBTEST_HEADER = re.compile(r'^[\s]+# Subtest: (.*)$')
+ # Check if line matches test result line format
+ if not match:
+ return False
+ test.log.append(lines.pop())
-def parse_subtest_header(lines: LineStream) -> Optional[str]:
- consume_non_diagnostic(lines)
- if not lines:
- return None
- match = SUBTEST_HEADER.match(lines.peek())
- if match:
- lines.pop()
- return match.group(1)
+ # Set name of test object
+ if skip_match:
+ test.name = skip_match.group(4)
else:
- return None
+ test.name = match.group(4)
-SUBTEST_PLAN = re.compile(r'[\s]+[0-9]+\.\.([0-9]+)')
+ # Check test num
+ num = int(match.group(2))
+ if num != expected_num:
+ test.add_error('Expected test number ' +
+ str(expected_num) + ' but found ' + str(num))
-def parse_subtest_plan(lines: LineStream) -> Optional[int]:
- consume_non_diagnostic(lines)
- match = SUBTEST_PLAN.match(lines.peek())
- if match:
- lines.pop()
- return int(match.group(1))
- else:
- return None
-
-def max_status(left: TestStatus, right: TestStatus) -> TestStatus:
- if left == right:
- return left
- elif left == TestStatus.TEST_CRASHED or right == TestStatus.TEST_CRASHED:
- return TestStatus.TEST_CRASHED
- elif left == TestStatus.FAILURE or right == TestStatus.FAILURE:
- return TestStatus.FAILURE
- elif left == TestStatus.SKIPPED:
- return right
- else:
- return left
-
-def parse_ok_not_ok_test_suite(lines: LineStream,
- test_suite: TestSuite,
- expected_suite_index: int) -> bool:
- consume_non_diagnostic(lines)
- if not lines:
- test_suite.status = TestStatus.TEST_CRASHED
- return False
- line = lines.peek()
- match = OK_NOT_OK_MODULE.match(line)
- if match:
- lines.pop()
- if match.group(1) == 'ok':
- test_suite.status = TestStatus.SUCCESS
- else:
- test_suite.status = TestStatus.FAILURE
- skip_match = OK_NOT_OK_SKIP.match(line)
- if skip_match:
- test_suite.status = TestStatus.SKIPPED
- suite_index = int(match.group(2))
- if suite_index != expected_suite_index:
- print_with_timestamp(
- red('[ERROR] ') + 'expected_suite_index ' +
- str(expected_suite_index) + ', but got ' +
- str(suite_index))
+ # Set status of test object
+ status = match.group(1)
+ if test.status == TestStatus.TEST_CRASHED:
return True
+ elif skip_match:
+ test.status = TestStatus.SKIPPED
+ elif status == 'ok':
+ test.status = TestStatus.SUCCESS
else:
- return False
-
-def bubble_up_errors(status_list: Iterable[TestStatus]) -> TestStatus:
- return reduce(max_status, status_list, TestStatus.SKIPPED)
+ test.status = TestStatus.FAILURE
+ return True
+
+def parse_diagnostic(lines: LineStream) -> List[str]:
+ """
+ If the next line in LineStream does not match the format of a test
+ case line or test header line, the line is checked if the test has
+ crashed and if so adds an error message, pops the line and adds it to
+ the log.
+
+ Line formats that are not parsed:
+ - '# Subtest: [test name]'
+ - '[ok|not ok] [test number] [-] [test name] [optional skip
+ directive]'
+
+ Parameters:
+ lines - LineStream of KTAP output to parse
+
+ Return:
+ Log of diagnostic lines
+ """
+ log = [] # type: List[str]
+ while lines and not TEST_RESULT.match(lines.peek()) and not \
+ TEST_HEADER.match(lines.peek()):
+ log.append(lines.pop())
+ return log
+
+DIAGNOSTIC_CRASH_MESSAGE = re.compile(r'^# .*?: kunit test case crashed!$')
+
+def parse_crash_in_log(test: Test) -> bool:
+ """
+ Iterate through the lines of the log to parse for crash message.
+ If crash message found, set status to crashed and return True.
+ Otherwise return False.
+
+ Parameters:
+ test - Test object for current test being parsed
+
+ Return:
+ Boolean that represents if crash message found in log
+ """
+ for line in test.log:
+ if DIAGNOSTIC_CRASH_MESSAGE.match(line):
+ test.status = TestStatus.TEST_CRASHED
+ return True
+ return False
-def bubble_up_test_case_errors(test_suite: TestSuite) -> TestStatus:
- max_test_case_status = bubble_up_errors(x.status for x in test_suite.cases)
- return max_status(max_test_case_status, test_suite.status)
+# Printing helper methods:
-def parse_test_suite(lines: LineStream, expected_suite_index: int) -> Optional[TestSuite]:
- if not lines:
- return None
- consume_non_diagnostic(lines)
- test_suite = TestSuite()
- test_suite.status = TestStatus.SUCCESS
- name = parse_subtest_header(lines)
- if not name:
- return None
- test_suite.name = name
- expected_test_case_num = parse_subtest_plan(lines)
- if expected_test_case_num is None:
- return None
- while expected_test_case_num > 0:
- test_case = parse_test_case(lines)
- if not test_case:
- break
- test_suite.cases.append(test_case)
- expected_test_case_num -= 1
- if parse_ok_not_ok_test_suite(lines, test_suite, expected_suite_index):
- test_suite.status = bubble_up_test_case_errors(test_suite)
- return test_suite
- elif not lines:
- print_with_timestamp(red('[ERROR] ') + 'ran out of lines before end token')
- return test_suite
- else:
- print(f'failed to parse end of suite "{name}", at line {lines.line_number()}: {lines.peek()}')
- return None
+DIVIDER = '=' * 60
-TAP_HEADER = re.compile(r'^TAP version 14$')
+RESET = '\033[0;0m'
-def parse_tap_header(lines: LineStream) -> bool:
- consume_non_diagnostic(lines)
- if TAP_HEADER.match(lines.peek()):
- lines.pop()
- return True
- else:
- return False
+def red(text: str) -> str:
+ """Returns inputted string with red color code."""
+ return '\033[1;31m' + text + RESET
-TEST_PLAN = re.compile(r'[0-9]+\.\.([0-9]+)')
+def yellow(text: str) -> str:
+ """Returns inputted string with yellow color code."""
+ return '\033[1;33m' + text + RESET
-def parse_test_plan(lines: LineStream) -> Optional[int]:
- consume_non_diagnostic(lines)
- match = TEST_PLAN.match(lines.peek())
- if match:
- lines.pop()
- return int(match.group(1))
- else:
- return None
-
-def bubble_up_suite_errors(test_suites: Iterable[TestSuite]) -> TestStatus:
- return bubble_up_errors(x.status for x in test_suites)
-
-def parse_test_result(lines: LineStream) -> TestResult:
- consume_non_diagnostic(lines)
- if not lines or not parse_tap_header(lines):
- return TestResult(TestStatus.FAILURE_TO_PARSE_TESTS, [], lines)
- expected_test_suite_num = parse_test_plan(lines)
- if expected_test_suite_num == 0:
- return TestResult(TestStatus.NO_TESTS, [], lines)
- elif expected_test_suite_num is None:
- return TestResult(TestStatus.FAILURE_TO_PARSE_TESTS, [], lines)
- test_suites = []
- for i in range(1, expected_test_suite_num + 1):
- test_suite = parse_test_suite(lines, i)
- if test_suite:
- test_suites.append(test_suite)
- else:
- print_with_timestamp(
- red('[ERROR] ') + ' expected ' +
- str(expected_test_suite_num) +
- ' test suites, but got ' + str(i - 2))
- break
- test_suite = parse_test_suite(lines, -1)
- if test_suite:
- print_with_timestamp(red('[ERROR] ') +
- 'got unexpected test suite: ' + test_suite.name)
- if test_suites:
- return TestResult(bubble_up_suite_errors(test_suites), test_suites, lines)
- else:
- return TestResult(TestStatus.NO_TESTS, [], lines)
+def green(text: str) -> str:
+ """Returns inputted string with green color code."""
+ return '\033[1;32m' + text + RESET
-class TestCounts:
- passed: int
- failed: int
- crashed: int
- skipped: int
+ANSI_LEN = len(red(''))
- def __init__(self):
- self.passed = 0
- self.failed = 0
- self.crashed = 0
- self.skipped = 0
+def print_with_timestamp(message: str) -> None:
+ """Prints message with timestamp at beginning."""
+ print('[%s] %s' % (datetime.now().strftime('%H:%M:%S'), message))
- def total(self) -> int:
- return self.passed + self.failed + self.crashed + self.skipped
-
-def print_and_count_results(test_result: TestResult) -> TestCounts:
- counts = TestCounts()
- for test_suite in test_result.suites:
- if test_suite.status == TestStatus.SUCCESS:
- print_suite_divider(green('[PASSED] ') + test_suite.name)
- elif test_suite.status == TestStatus.SKIPPED:
- print_suite_divider(yellow('[SKIPPED] ') + test_suite.name)
- elif test_suite.status == TestStatus.TEST_CRASHED:
- print_suite_divider(red('[CRASHED] ' + test_suite.name))
+def format_test_divider(message: str, len_message: int) -> str:
+ """
+ Returns string with message centered in fixed width divider.
+
+ Example:
+ '===================== message example ====================='
+
+ Parameters:
+ message - message to be centered in divider line
+ len_message - length of the message to be printed such that
+ any characters of the color codes are not counted
+
+ Return:
+ String containing message centered in fixed width divider
+ """
+ default_count = 3 # default number of dashes
+ len_1 = default_count
+ len_2 = default_count
+ difference = len(DIVIDER) - len_message - 2 # 2 spaces added
+ if difference > 0:
+ # calculate number of dashes for each side of the divider
+ len_1 = int(difference / 2)
+ len_2 = difference - len_1
+ return ('=' * len_1) + ' ' + message + ' ' + ('=' * len_2)
+
+def print_test_header(test: Test) -> None:
+ """
+ Prints test header with test name and optionally the expected number
+ of subtests.
+
+ Example:
+ '=================== example (2 subtests) ==================='
+
+ Parameters:
+ test - Test object representing current test being printed
+ """
+ message = test.name
+ if test.expected_count:
+ message += ' (' + str(test.expected_count) + ' subtests)'
+ print_with_timestamp(format_test_divider(message, len(message)))
+
+def print_log(log: Iterable[str]) -> None:
+ """
+ Prints all strings in saved log for test in yellow.
+
+ Parameters:
+ log - Iterable object with all strings saved in log for test
+ """
+ for m in log:
+ print_with_timestamp(yellow(m))
+ print_with_timestamp('')
+
+def format_test_result(test: Test) -> str:
+ """
+ Returns string with formatted test result with colored status and test
+ name.
+
+ Example:
+ '[PASSED] example'
+
+ Parameters:
+ test - Test object representing current test being printed
+
+ Return:
+ String containing formatted test result
+ """
+ if test.status == TestStatus.SUCCESS:
+ return (green('[PASSED] ') + test.name)
+ elif test.status == TestStatus.SKIPPED:
+ return (yellow('[SKIPPED] ') + test.name)
+ elif test.status == TestStatus.TEST_CRASHED:
+ print_log(test.log)
+ return (red('[CRASHED] ') + test.name)
+ else:
+ print_log(test.log)
+ return (red('[FAILED] ') + test.name)
+
+def print_test_result(test: Test) -> None:
+ """
+ Prints result line with status of test.
+
+ Example:
+ '[PASSED] example'
+
+ Parameters:
+ test - Test object representing current test being printed
+ """
+ print_with_timestamp(format_test_result(test))
+
+def print_test_footer(test: Test) -> None:
+ """
+ Prints test footer with status of test.
+
+ Example:
+ '===================== [PASSED] example ====================='
+
+ Parameters:
+ test - Test object representing current test being printed
+ """
+ message = format_test_result(test)
+ print_with_timestamp(format_test_divider(message,
+ len(message) - ANSI_LEN))
+
+def print_summary_line(test: Test) -> None:
+ """
+ Prints summary line of test object. Color of line is dependent on
+ status of test. Color is green if test passes, yellow if test is
+ skipped, and red if the test fails or crashes. Summary line contains
+ counts of the statuses of the tests subtests or the test itself if it
+ has no subtests.
+
+ Example:
+ "Testing complete. Passed: 2, Failed: 0, Crashed: 0, Skipped: 0,
+ Errors: 0"
+
+ test - Test object representing current test being printed
+ """
+ if test.status == TestStatus.SUCCESS or \
+ test.status == TestStatus.NO_TESTS:
+ color = green
+ elif test.status == TestStatus.SKIPPED:
+ color = yellow
+ else:
+ color = red
+ counts = test.counts
+ print_with_timestamp(color('Testing complete. ' + str(counts)))
+
+def print_error(error_message: str) -> None:
+ """
+ Prints error message with error format.
+
+ Example:
+ "[ERROR] Test example: missing test plan!"
+
+ Parameters:
+ error_message - message describing error
+ """
+ print_with_timestamp(red('[ERROR] ') + error_message)
+
+# Other methods:
+
+def bubble_up_test_results(test: Test) -> None:
+ """
+ If the test has subtests, add the test counts of the subtests to the
+ test and check if any of the tests crashed and if so set the test
+ status to crashed. Otherwise if the test has no subtests add the
+ status of the test to the test counts.
+
+ Parameters:
+ test - Test object for current test being parsed
+ """
+ parse_crash_in_log(test)
+ subtests = test.subtests
+ counts = test.counts
+ status = test.status
+ for t in subtests:
+ counts.add_subtest_counts(t.counts)
+ if counts.total() == 0:
+ counts.add_status(status)
+ elif test.counts.get_status() == TestStatus.TEST_CRASHED:
+ test.status = TestStatus.TEST_CRASHED
+
+def parse_test(lines: LineStream, expected_num: int, log: List[str]) -> Test:
+ """
+ Finds next test to parse in LineStream, creates new Test object,
+ parses any subtests of the test, populates Test object with all
+ information (status, name) about the test and the Test objects for
+ any subtests, and then returns the Test object. The method accepts
+ three formats of tests:
+
+ Accepted test formats:
+
+ - Main KTAP/TAP header
+
+ Example:
+
+ KTAP version 1
+ 1..4
+ [subtests]
+
+ - Subtest header line
+
+ Example:
+
+ # Subtest: name
+ 1..3
+ [subtests]
+ ok 1 name
+
+ - Test result line
+
+ Example:
+
+ ok 1 - test
+
+ Parameters:
+ lines - LineStream of KTAP output to parse
+ expected_num - expected test number for test to be parsed
+ log - list of strings containing any preceding diagnostic lines
+ corresponding to the current test
+
+ Return:
+ Test object populated with characteristics and any subtests
+ """
+ test = Test()
+ test.log.extend(log)
+ parent_test = False
+ main = parse_ktap_header(lines, test)
+ if main:
+ # If KTAP/TAP header is found, attempt to parse
+ # test plan
+ test.name = "main"
+ parse_test_plan(lines, test)
+ else:
+ # If KTAP/TAP header is not found, test must be subtest
+ # header or test result line so parse attempt to parser
+ # subtest header
+ parent_test = parse_test_header(lines, test)
+ if parent_test:
+ # If subtest header is found, attempt to parse
+ # test plan and print header
+ parse_test_plan(lines, test)
+ print_test_header(test)
+ expected_count = test.expected_count
+ subtests = []
+ test_num = 1
+ while main or expected_count is None or test_num <= expected_count:
+ # Loop to parse any subtests.
+ # If test is main test, do not break until no lines left.
+ # Otherwise, break after parsing expected number of tests or
+ # if expected number of tests is unknown break when found
+ # test result line with matching name to subtest header.
+ if not lines:
+ if expected_count and test_num <= expected_count:
+ test.add_error('missing expected subtests!')
+ break
+ sub_log = parse_diagnostic(lines)
+ if not expected_count and not main and \
+ peek_test_name_match(lines, test):
+ test.log.extend(sub_log)
+ break
+ subtests.append(parse_test(lines, test_num, sub_log))
+ test_num += 1
+ test.subtests = subtests
+ if not main:
+ # If not main test, look for test result line
+ test.log.extend(parse_diagnostic(lines))
+ if (parent_test and peek_test_name_match(lines, test)) or \
+ not parent_test:
+ parse_test_result(lines, test, expected_num)
else:
- print_suite_divider(red('[FAILED] ') + test_suite.name)
- for test_case in test_suite.cases:
- if test_case.status == TestStatus.SUCCESS:
- counts.passed += 1
- print_with_timestamp(green('[PASSED] ') + test_case.name)
- elif test_case.status == TestStatus.SKIPPED:
- counts.skipped += 1
- print_with_timestamp(yellow('[SKIPPED] ') + test_case.name)
- elif test_case.status == TestStatus.TEST_CRASHED:
- counts.crashed += 1
- print_with_timestamp(red('[CRASHED] ' + test_case.name))
- print_log(map(yellow, test_case.log))
- print_with_timestamp('')
- else:
- counts.failed += 1
- print_with_timestamp(red('[FAILED] ') + test_case.name)
- print_log(map(yellow, test_case.log))
- print_with_timestamp('')
- return counts
+ test.add_error('missing subtest result line!')
+ # Add statuses to TestCounts attribute in Test object
+ bubble_up_test_results(test)
+ if parent_test:
+ # If test has subtests and is not the main test object, print
+ # footer.
+ print_test_footer(test)
+ elif not main:
+ print_test_result(test)
+ return test
def parse_run_tests(kernel_output: Iterable[str]) -> TestResult:
- counts = TestCounts()
+ """
+ Using kernel output, extract KTAP lines, parse the lines for test
+ results and print condensed test results and summary line .
+
+ Parameters:
+ kernel_output - Iterable object contains lines of kernel output
+
+ Return:
+ TestResult - Tuple containg status of main test object, main test
+ object with all subtests, and log of all KTAP lines.
+ """
+ print_with_timestamp(DIVIDER)
lines = extract_tap_lines(kernel_output)
- test_result = parse_test_result(lines)
- if test_result.status == TestStatus.NO_TESTS:
- print(red('[ERROR] ') + yellow('no tests run!'))
- elif test_result.status == TestStatus.FAILURE_TO_PARSE_TESTS:
- print(red('[ERROR] ') + yellow('could not parse test results!'))
+ test = Test()
+ if not lines:
+ test.add_error('invalid KTAP input!')
+ test.status = TestStatus.FAILURE_TO_PARSE_TESTS
else:
- counts = print_and_count_results(test_result)
+ test = parse_test(lines, 0, [])
+ if test.status != TestStatus.NO_TESTS:
+ test.status = test.counts.get_status()
print_with_timestamp(DIVIDER)
- if test_result.status == TestStatus.SUCCESS:
- fmt = green
- elif test_result.status == TestStatus.SKIPPED:
- fmt = yellow
- else:
- fmt =red
- print_with_timestamp(
- fmt('Testing complete. %d tests run. %d failed. %d crashed. %d skipped.' %
- (counts.total(), counts.failed, counts.crashed, counts.skipped)))
- return test_result
+ print_summary_line(test)
+ return TestResult(test.status, test, lines)
diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py
index 3251cef25b76..50c4d3e74115 100755
--- a/tools/testing/kunit/kunit_tool_test.py
+++ b/tools/testing/kunit/kunit_tool_test.py
@@ -106,10 +106,10 @@ class KUnitParserTest(unittest.TestCase):
with open(log_path) as file:
result = kunit_parser.extract_tap_lines(file.readlines())
self.assertContains('TAP version 14', result)
- self.assertContains(' # Subtest: example', result)
- self.assertContains(' 1..2', result)
- self.assertContains(' ok 1 - example_simple_test', result)
- self.assertContains(' ok 2 - example_mock_test', result)
+ self.assertContains('# Subtest: example', result)
+ self.assertContains('1..2', result)
+ self.assertContains('ok 1 - example_simple_test', result)
+ self.assertContains('ok 2 - example_mock_test', result)
self.assertContains('ok 1 - example', result)
def test_output_with_prefix_isolated_correctly(self):
@@ -117,28 +117,28 @@ class KUnitParserTest(unittest.TestCase):
with open(log_path) as file:
result = kunit_parser.extract_tap_lines(file.readlines())
self.assertContains('TAP version 14', result)
- self.assertContains(' # Subtest: kunit-resource-test', result)
- self.assertContains(' 1..5', result)
- self.assertContains(' ok 1 - kunit_resource_test_init_resources', result)
- self.assertContains(' ok 2 - kunit_resource_test_alloc_resource', result)
- self.assertContains(' ok 3 - kunit_resource_test_destroy_resource', result)
- self.assertContains(' foo bar #', result)
- self.assertContains(' ok 4 - kunit_resource_test_cleanup_resources', result)
- self.assertContains(' ok 5 - kunit_resource_test_proper_free_ordering', result)
+ self.assertContains('# Subtest: kunit-resource-test', result)
+ self.assertContains('1..5', result)
+ self.assertContains('ok 1 - kunit_resource_test_init_resources', result)
+ self.assertContains('ok 2 - kunit_resource_test_alloc_resource', result)
+ self.assertContains('ok 3 - kunit_resource_test_destroy_resource', result)
+ self.assertContains('foo bar #', result)
+ self.assertContains('ok 4 - kunit_resource_test_cleanup_resources', result)
+ self.assertContains('ok 5 - kunit_resource_test_proper_free_ordering', result)
self.assertContains('ok 1 - kunit-resource-test', result)
- self.assertContains(' foo bar # non-kunit output', result)
- self.assertContains(' # Subtest: kunit-try-catch-test', result)
- self.assertContains(' 1..2', result)
- self.assertContains(' ok 1 - kunit_test_try_catch_successful_try_no_catch',
+ self.assertContains('foo bar # non-kunit output', result)
+ self.assertContains('# Subtest: kunit-try-catch-test', result)
+ self.assertContains('1..2', result)
+ self.assertContains('ok 1 - kunit_test_try_catch_successful_try_no_catch',
result)
- self.assertContains(' ok 2 - kunit_test_try_catch_unsuccessful_try_does_catch',
+ self.assertContains('ok 2 - kunit_test_try_catch_unsuccessful_try_does_catch',
result)
self.assertContains('ok 2 - kunit-try-catch-test', result)
- self.assertContains(' # Subtest: string-stream-test', result)
- self.assertContains(' 1..3', result)
- self.assertContains(' ok 1 - string_stream_test_empty_on_creation', result)
- self.assertContains(' ok 2 - string_stream_test_not_empty_after_add', result)
- self.assertContains(' ok 3 - string_stream_test_get_string', result)
+ self.assertContains('# Subtest: string-stream-test', result)
+ self.assertContains('1..3', result)
+ self.assertContains('ok 1 - string_stream_test_empty_on_creation', result)
+ self.assertContains('ok 2 - string_stream_test_not_empty_after_add', result)
+ self.assertContains('ok 3 - string_stream_test_get_string', result)
self.assertContains('ok 3 - string-stream-test', result)
def test_parse_successful_test_log(self):
@@ -148,6 +148,13 @@ class KUnitParserTest(unittest.TestCase):
self.assertEqual(
kunit_parser.TestStatus.SUCCESS,
result.status)
+ def test_parse_successful_nested_tests_log(self):
+ all_passed_log = test_data_path('test_is_test_passed-all_passed_nested.log')
+ with open(all_passed_log) as file:
+ result = kunit_parser.parse_run_tests(file.readlines())
+ self.assertEqual(
+ kunit_parser.TestStatus.SUCCESS,
+ result.status)
def test_parse_failed_test_log(self):
failed_log = test_data_path('test_is_test_passed-failure.log')
@@ -162,17 +169,31 @@ class KUnitParserTest(unittest.TestCase):
with open(empty_log) as file:
result = kunit_parser.parse_run_tests(
kunit_parser.extract_tap_lines(file.readlines()))
- self.assertEqual(0, len(result.suites))
+ self.assertEqual(0, len(result.test.subtests))
self.assertEqual(
kunit_parser.TestStatus.FAILURE_TO_PARSE_TESTS,
result.status)
+ def test_missing_test_plan(self):
+ missing_plan_log = test_data_path('test_is_test_passed-'
+ 'missing_plan.log')
+ with open(missing_plan_log) as file:
+ result = kunit_parser.parse_run_tests(
+ kunit_parser.extract_tap_lines(
+ file.readlines()))
+ self.assertEqual(2, result.test.counts.errors)
+ self.assertEqual(
+ kunit_parser.TestStatus.SUCCESS,
+ result.status)
+
def test_no_tests(self):
- empty_log = test_data_path('test_is_test_passed-no_tests_run_with_header.log')
- with open(empty_log) as file:
+ header_log = test_data_path('test_is_test_passed-'
+ 'no_tests_run_with_header.log')
+ with open(header_log) as file:
result = kunit_parser.parse_run_tests(
- kunit_parser.extract_tap_lines(file.readlines()))
- self.assertEqual(0, len(result.suites))
+ kunit_parser.extract_tap_lines(
+ file.readlines()))
+ self.assertEqual(0, len(result.test.subtests))
self.assertEqual(
kunit_parser.TestStatus.NO_TESTS,
result.status)
@@ -182,15 +203,17 @@ class KUnitParserTest(unittest.TestCase):
print_mock = mock.patch('builtins.print').start()
with open(crash_log) as file:
result = kunit_parser.parse_run_tests(
- kunit_parser.extract_tap_lines(file.readlines()))
- print_mock.assert_any_call(StrContains('could not parse test results!'))
+ kunit_parser.extract_tap_lines(
+ file.readlines()))
+ print_mock.assert_any_call(StrContains('invalid KTAP input!'))
print_mock.stop()
file.close()
def test_crashed_test(self):
crashed_log = test_data_path('test_is_test_passed-crash.log')
with open(crashed_log) as file:
- result = kunit_parser.parse_run_tests(file.readlines())
+ result = kunit_parser.parse_run_tests(
+ file.readlines())
self.assertEqual(
kunit_parser.TestStatus.TEST_CRASHED,
result.status)
@@ -216,6 +239,23 @@ class KUnitParserTest(unittest.TestCase):
result.status)
file.close()
+ def test_ignores_hyphen(self):
+ hyphen_log = test_data_path('test_strip_hyphen.log')
+ file = open(hyphen_log)
+ result = kunit_parser.parse_run_tests(file.readlines())
+
+ # A skipped test does not fail the whole suite.
+ self.assertEqual(
+ kunit_parser.TestStatus.SUCCESS,
+ result.status)
+ self.assertEqual(
+ "sysctl_test",
+ result.test.subtests[0].name)
+ self.assertEqual(
+ "example",
+ result.test.subtests[1].name)
+ file.close()
+
def test_ignores_prefix_printk_time(self):
prefix_log = test_data_path('test_config_printk_time.log')
@@ -224,7 +264,7 @@ class KUnitParserTest(unittest.TestCase):
self.assertEqual(
kunit_parser.TestStatus.SUCCESS,
result.status)
- self.assertEqual('kunit-resource-test', result.suites[0].name)
+ self.assertEqual('kunit-resource-test', result.test.subtests[0].name)
def test_ignores_multiple_prefixes(self):
prefix_log = test_data_path('test_multiple_prefixes.log')
@@ -233,7 +273,7 @@ class KUnitParserTest(unittest.TestCase):
self.assertEqual(
kunit_parser.TestStatus.SUCCESS,
result.status)
- self.assertEqual('kunit-resource-test', result.suites[0].name)
+ self.assertEqual('kunit-resource-test', result.test.subtests[0].name)
def test_prefix_mixed_kernel_output(self):
mixed_prefix_log = test_data_path('test_interrupted_tap_output.log')
@@ -242,7 +282,7 @@ class KUnitParserTest(unittest.TestCase):
self.assertEqual(
kunit_parser.TestStatus.SUCCESS,
result.status)
- self.assertEqual('kunit-resource-test', result.suites[0].name)
+ self.assertEqual('kunit-resource-test', result.test.subtests[0].name)
def test_prefix_poundsign(self):
pound_log = test_data_path('test_pound_sign.log')
@@ -251,16 +291,16 @@ class KUnitParserTest(unittest.TestCase):
self.assertEqual(
kunit_parser.TestStatus.SUCCESS,
result.status)
- self.assertEqual('kunit-resource-test', result.suites[0].name)
+ self.assertEqual('kunit-resource-test', result.test.subtests[0].name)
def test_kernel_panic_end(self):
panic_log = test_data_path('test_kernel_panic_interrupt.log')
with open(panic_log) as file:
result = kunit_parser.parse_run_tests(file.readlines())
self.assertEqual(
- kunit_parser.TestStatus.TEST_CRASHED,
+ kunit_parser.TestStatus.SUCCESS,
result.status)
- self.assertEqual('kunit-resource-test', result.suites[0].name)
+ self.assertEqual('kunit-resource-test', result.test.subtests[0].name)
def test_pound_no_prefix(self):
pound_log = test_data_path('test_pound_no_prefix.log')
@@ -269,7 +309,7 @@ class KUnitParserTest(unittest.TestCase):
self.assertEqual(
kunit_parser.TestStatus.SUCCESS,
result.status)
- self.assertEqual('kunit-resource-test', result.suites[0].name)
+ self.assertEqual('kunit-resource-test', result.test.subtests[0].name)
class LinuxSourceTreeTest(unittest.TestCase):
@@ -291,6 +331,14 @@ class LinuxSourceTreeTest(unittest.TestCase):
pass
tree = kunit_kernel.LinuxSourceTree('', kunitconfig_path=dir)
+ def test_kselftest_nested(self):
+ kselftest_log = test_data_path('test_is_test_passed-kselftest.log')
+ with open(kselftest_log) as file:
+ result = kunit_parser.parse_run_tests(file.readlines())
+ self.assertEqual(
+ kunit_parser.TestStatus.SUCCESS,
+ result.status)
+
# TODO: add more test cases.
@@ -322,6 +370,12 @@ class KUnitJsonTest(unittest.TestCase):
result = self._json_for('test_is_test_passed-no_tests_run_with_header.log')
self.assertEqual(0, len(result['sub_groups']))
+ def test_nested_json(self):
+ result = self._json_for('test_is_test_passed-all_passed_nested.log')
+ self.assertEqual(
+ {'name': 'example_simple_test', 'status': 'PASS'},
+ result["sub_groups"][0]["sub_groups"][0]["test_cases"][0])
+
class StrContains(str):
def __eq__(self, other):
return self in other
@@ -380,7 +434,7 @@ class KUnitMainTest(unittest.TestCase):
self.assertEqual(e.exception.code, 1)
self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1)
- self.print_mock.assert_any_call(StrContains(' 0 tests run'))
+ self.print_mock.assert_any_call(StrContains('invalid KTAP input!'))
def test_exec_raw_output(self):
self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
@@ -388,7 +442,7 @@ class KUnitMainTest(unittest.TestCase):
self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1)
for call in self.print_mock.call_args_list:
self.assertNotEqual(call, mock.call(StrContains('Testing complete.')))
- self.assertNotEqual(call, mock.call(StrContains(' 0 tests run')))
+ self.assertNotEqual(call, mock.call(StrContains(' 0 tests run!')))
def test_run_raw_output(self):
self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
@@ -397,7 +451,7 @@ class KUnitMainTest(unittest.TestCase):
self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1)
for call in self.print_mock.call_args_list:
self.assertNotEqual(call, mock.call(StrContains('Testing complete.')))
- self.assertNotEqual(call, mock.call(StrContains(' 0 tests run')))
+ self.assertNotEqual(call, mock.call(StrContains(' 0 tests run!')))
def test_run_raw_output_kunit(self):
self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
diff --git a/tools/testing/kunit/test_data/test_is_test_passed-all_passed_nested.log b/tools/testing/kunit/test_data/test_is_test_passed-all_passed_nested.log
new file mode 100644
index 000000000000..9d5b04fe43a6
--- /dev/null
+++ b/tools/testing/kunit/test_data/test_is_test_passed-all_passed_nested.log
@@ -0,0 +1,34 @@
+TAP version 14
+1..2
+ # Subtest: sysctl_test
+ 1..4
+ # sysctl_test_dointvec_null_tbl_data: sysctl_test_dointvec_null_tbl_data passed
+ ok 1 - sysctl_test_dointvec_null_tbl_data
+ # Subtest: example
+ 1..2
+ init_suite
+ # example_simple_test: initializing
+ # example_simple_test: example_simple_test passed
+ ok 1 - example_simple_test
+ # example_mock_test: initializing
+ # example_mock_test: example_mock_test passed
+ ok 2 - example_mock_test
+ kunit example: all tests passed
+ ok 2 - example
+ # sysctl_test_dointvec_table_len_is_zero: sysctl_test_dointvec_table_len_is_zero passed
+ ok 3 - sysctl_test_dointvec_table_len_is_zero
+ # sysctl_test_dointvec_table_read_but_position_set: sysctl_test_dointvec_table_read_but_position_set passed
+ ok 4 - sysctl_test_dointvec_table_read_but_position_set
+kunit sysctl_test: all tests passed
+ok 1 - sysctl_test
+ # Subtest: example
+ 1..2
+init_suite
+ # example_simple_test: initializing
+ # example_simple_test: example_simple_test passed
+ ok 1 - example_simple_test
+ # example_mock_test: initializing
+ # example_mock_test: example_mock_test passed
+ ok 2 - example_mock_test
+kunit example: all tests passed
+ok 2 - example
diff --git a/tools/testing/kunit/test_data/test_is_test_passed-kselftest.log b/tools/testing/kunit/test_data/test_is_test_passed-kselftest.log
new file mode 100644
index 000000000000..65d3f27feaf2
--- /dev/null
+++ b/tools/testing/kunit/test_data/test_is_test_passed-kselftest.log
@@ -0,0 +1,14 @@
+TAP version 13
+1..2
+# selftests: membarrier: membarrier_test_single_thread
+# TAP version 13
+# 1..2
+# ok 1 sys_membarrier available
+# ok 2 sys membarrier invalid command test: command = -1, flags = 0, errno = 22. Failed as expected
+ok 1 selftests: membarrier: membarrier_test_single_thread
+# selftests: membarrier: membarrier_test_multi_thread
+# TAP version 13
+# 1..2
+# ok 1 sys_membarrier available
+# ok 2 sys membarrier invalid command test: command = -1, flags = 0, errno = 22. Failed as expected
+ok 2 selftests: membarrier: membarrier_test_multi_thread
diff --git a/tools/testing/kunit/test_data/test_is_test_passed-missing_plan.log b/tools/testing/kunit/test_data/test_is_test_passed-missing_plan.log
new file mode 100644
index 000000000000..5cd17b7f818a
--- /dev/null
+++ b/tools/testing/kunit/test_data/test_is_test_passed-missing_plan.log
@@ -0,0 +1,31 @@
+KTAP version 1
+ # Subtest: sysctl_test
+ # sysctl_test_dointvec_null_tbl_data: sysctl_test_dointvec_null_tbl_data passed
+ ok 1 - sysctl_test_dointvec_null_tbl_data
+ # sysctl_test_dointvec_table_maxlen_unset: sysctl_test_dointvec_table_maxlen_unset passed
+ ok 2 - sysctl_test_dointvec_table_maxlen_unset
+ # sysctl_test_dointvec_table_len_is_zero: sysctl_test_dointvec_table_len_is_zero passed
+ ok 3 - sysctl_test_dointvec_table_len_is_zero
+ # sysctl_test_dointvec_table_read_but_position_set: sysctl_test_dointvec_table_read_but_position_set passed
+ ok 4 - sysctl_test_dointvec_table_read_but_position_set
+ # sysctl_test_dointvec_happy_single_positive: sysctl_test_dointvec_happy_single_positive passed
+ ok 5 - sysctl_test_dointvec_happy_single_positive
+ # sysctl_test_dointvec_happy_single_negative: sysctl_test_dointvec_happy_single_negative passed
+ ok 6 - sysctl_test_dointvec_happy_single_negative
+ # sysctl_test_dointvec_single_less_int_min: sysctl_test_dointvec_single_less_int_min passed
+ ok 7 - sysctl_test_dointvec_single_less_int_min
+ # sysctl_test_dointvec_single_greater_int_max: sysctl_test_dointvec_single_greater_int_max passed
+ ok 8 - sysctl_test_dointvec_single_greater_int_max
+kunit sysctl_test: all tests passed
+ok 1 - sysctl_test
+ # Subtest: example
+ 1..2
+init_suite
+ # example_simple_test: initializing
+ # example_simple_test: example_simple_test passed
+ ok 1 - example_simple_test
+ # example_mock_test: initializing
+ # example_mock_test: example_mock_test passed
+ ok 2 - example_mock_test
+kunit example: all tests passed
+ok 2 - example
diff --git a/tools/testing/kunit/test_data/test_strip_hyphen.log b/tools/testing/kunit/test_data/test_strip_hyphen.log
new file mode 100644
index 000000000000..92ac7c24b374
--- /dev/null
+++ b/tools/testing/kunit/test_data/test_strip_hyphen.log
@@ -0,0 +1,16 @@
+KTAP version 1
+1..2
+ # Subtest: sysctl_test
+ 1..1
+ # sysctl_test_dointvec_null_tbl_data: sysctl_test_dointvec_null_tbl_data passed
+ ok 1 - sysctl_test_dointvec_null_tbl_data
+kunit sysctl_test: all tests passed
+ok 1 - sysctl_test
+ # Subtest: example
+ 1..1
+init_suite
+ # example_simple_test: initializing
+ # example_simple_test: example_simple_test passed
+ ok 1 example_simple_test
+kunit example: all tests passed
+ok 2 example
base-commit: 28a8b037484044d79f3b8d409a076a925ac75f8d
--
2.33.0.800.g4c38ced690-goog
Currently, `run_kernel()` dumps all the kernel output to a file
(.kunit/test.log) and then opens the file and yields it to callers.
This made it easier to respect the requested timeout, if any.
But it means that we can't yield the results in real time, either to the
parser or to stdout (if --raw_output is set).
This change spins up a background thread to enforce the timeout, which
allows us to yield the kernel output in real time, while also copying it
to the .kunit/test.log file.
It's also careful to ensure that the .kunit/test.log file is complete,
even in the kunit_parser throws an exception/otherwise doesn't consume
every line, see the new `finally` block and unit test.
For example:
$ ./tools/testing/kunit/kunit.py run --arch=x86_64 --raw_output
<configure + build steps>
...
<can now see output from QEMU in real time>
This does not currently have a visible effect when --raw_output is not
passed, as kunit_parser.py currently only outputs everything at the end.
But that could change, and this patch is a necessary step towards
showing parsed test results in real time.
Signed-off-by: Daniel Latypov <dlatypov(a)google.com>
---
tools/testing/kunit/kunit_kernel.py | 73 +++++++++++++++-----------
tools/testing/kunit/kunit_tool_test.py | 17 ++++++
2 files changed, 60 insertions(+), 30 deletions(-)
diff --git a/tools/testing/kunit/kunit_kernel.py b/tools/testing/kunit/kunit_kernel.py
index 2c6f916ccbaf..b8cba8123aa3 100644
--- a/tools/testing/kunit/kunit_kernel.py
+++ b/tools/testing/kunit/kunit_kernel.py
@@ -12,7 +12,8 @@ import subprocess
import os
import shutil
import signal
-from typing import Iterator, Optional, Tuple
+import threading
+from typing import Iterator, List, Optional, Tuple
from contextlib import ExitStack
@@ -103,8 +104,8 @@ class LinuxSourceTreeOperations(object):
if stderr: # likely only due to build warnings
print(stderr.decode())
- def run(self, params, timeout, build_dir, outfile) -> None:
- pass
+ def start(self, params: List[str], build_dir: str) -> subprocess.Popen:
+ raise RuntimeError('not implemented!')
class LinuxSourceTreeOperationsQemu(LinuxSourceTreeOperations):
@@ -123,7 +124,7 @@ class LinuxSourceTreeOperationsQemu(LinuxSourceTreeOperations):
kconfig.parse_from_string(self._kconfig)
base_kunitconfig.merge_in_entries(kconfig)
- def run(self, params, timeout, build_dir, outfile):
+ def start(self, params: List[str], build_dir: str) -> subprocess.Popen:
kernel_path = os.path.join(build_dir, self._kernel_path)
qemu_command = ['qemu-system-' + self._qemu_arch,
'-nodefaults',
@@ -134,18 +135,10 @@ class LinuxSourceTreeOperationsQemu(LinuxSourceTreeOperations):
'-nographic',
'-serial stdio'] + self._extra_qemu_params
print('Running tests with:\n$', ' '.join(qemu_command))
- with open(outfile, 'w') as output:
- process = subprocess.Popen(' '.join(qemu_command),
- stdin=subprocess.PIPE,
- stdout=output,
- stderr=subprocess.STDOUT,
- text=True, shell=True)
- try:
- process.wait(timeout=timeout)
- except Exception as e:
- print(e)
- process.terminate()
- return process
+ return subprocess.Popen(' '.join(qemu_command),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ text=True, shell=True)
class LinuxSourceTreeOperationsUml(LinuxSourceTreeOperations):
"""An abstraction over command line operations performed on a source tree."""
@@ -175,17 +168,13 @@ class LinuxSourceTreeOperationsUml(LinuxSourceTreeOperations):
kunit_parser.print_with_timestamp(
'Starting Kernel with all configs takes a few minutes...')
- def run(self, params, timeout, build_dir, outfile):
+ def start(self, params: List[str], build_dir: str) -> subprocess.Popen:
"""Runs the Linux UML binary. Must be named 'linux'."""
linux_bin = get_file_path(build_dir, 'linux')
- outfile = get_outfile_path(build_dir)
- with open(outfile, 'w') as output:
- process = subprocess.Popen([linux_bin] + params,
- stdin=subprocess.PIPE,
- stdout=output,
- stderr=subprocess.STDOUT,
- text=True)
- process.wait(timeout)
+ return subprocess.Popen([linux_bin] + params,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ text=True)
def get_kconfig_path(build_dir) -> str:
return get_file_path(build_dir, KCONFIG_PATH)
@@ -330,12 +319,36 @@ class LinuxSourceTree(object):
args.extend(['mem=1G', 'console=tty', 'kunit_shutdown=halt'])
if filter_glob:
args.append('kunit.filter_glob='+filter_glob)
- outfile = get_outfile_path(build_dir)
- self._ops.run(args, timeout, build_dir, outfile)
- subprocess.call(['stty', 'sane'])
- with open(outfile, 'r') as file:
- for line in file:
+
+ process = self._ops.start(args, build_dir)
+ assert process.stdout is not None # tell mypy it's set
+
+ # Enforce the timeout in a background thread.
+ def _wait_proc():
+ try:
+ process.wait(timeout=timeout)
+ except Exception as e:
+ print(e)
+ process.terminate()
+ process.wait()
+ waiter = threading.Thread(target=_wait_proc)
+ waiter.start()
+
+ output = open(get_outfile_path(build_dir), 'w')
+ try:
+ # Tee the output to the file and to our caller in real time.
+ for line in process.stdout:
+ output.write(line)
yield line
+ # This runs even if our caller doesn't consume every line.
+ finally:
+ # Flush any leftover output to the file
+ output.write(process.stdout.read())
+ output.close()
+ process.stdout.close()
+
+ waiter.join()
+ subprocess.call(['stty', 'sane'])
def signal_handler(self, sig, frame) -> None:
logging.error('Build interruption occurred. Cleaning console.')
diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py
index 619c4554cbff..f9a7398a9584 100755
--- a/tools/testing/kunit/kunit_tool_test.py
+++ b/tools/testing/kunit/kunit_tool_test.py
@@ -14,6 +14,7 @@ import tempfile, shutil # Handling test_tmpdir
import itertools
import json
import signal
+import subprocess
import os
import kunit_config
@@ -291,6 +292,22 @@ class LinuxSourceTreeTest(unittest.TestCase):
pass
tree = kunit_kernel.LinuxSourceTree('', kunitconfig_path=dir)
+ def test_run_kernel_hits_exception(self):
+ def fake_start(unused_args, unused_build_dir):
+ return subprocess.Popen(['echo "hi\nbye"'], shell=True, text=True, stdout=subprocess.PIPE)
+
+ with tempfile.TemporaryDirectory('') as build_dir:
+ tree = kunit_kernel.LinuxSourceTree(build_dir, load_config=False)
+ mock.patch.object(tree._ops, 'start', side_effect=fake_start).start()
+
+ with self.assertRaises(ValueError):
+ for line in tree.run_kernel(build_dir=build_dir):
+ self.assertEqual(line, 'hi\n')
+ raise ValueError('uh oh, did not read all output')
+
+ with open(kunit_kernel.get_outfile_path(build_dir), 'rt') as outfile:
+ self.assertEqual(outfile.read(), 'hi\nbye\n', msg='Missing some output')
+
# TODO: add more test cases.
base-commit: 3b29021ddd10cfb6b2565c623595bd3b02036f33
--
2.33.0.800.g4c38ced690-goog
Drop some variables in unit tests that were unused and/or add assertions
based on them.
For ExitStack, it was imported, but the `es` variable wasn't used so it
didn't do anything, and we were leaking the file objects.
Refactor it to just use nested `with` statements to properly close them.
And drop the direct use of .close() on file objects in the kunit tool
unit test, as these can be leaked if test assertions fail.
Signed-off-by: Daniel Latypov <dlatypov(a)google.com>
---
tools/testing/kunit/kunit.py | 1 -
tools/testing/kunit/kunit_kernel.py | 12 ++++--------
tools/testing/kunit/kunit_tool_test.py | 18 ++++++++----------
3 files changed, 12 insertions(+), 19 deletions(-)
diff --git a/tools/testing/kunit/kunit.py b/tools/testing/kunit/kunit.py
index 66f67af97971..1b2b7f06bb8c 100755
--- a/tools/testing/kunit/kunit.py
+++ b/tools/testing/kunit/kunit.py
@@ -18,7 +18,6 @@ from collections import namedtuple
from enum import Enum, auto
from typing import Iterable
-import kunit_config
import kunit_json
import kunit_kernel
import kunit_parser
diff --git a/tools/testing/kunit/kunit_kernel.py b/tools/testing/kunit/kunit_kernel.py
index 2c6f916ccbaf..1870e75ff153 100644
--- a/tools/testing/kunit/kunit_kernel.py
+++ b/tools/testing/kunit/kunit_kernel.py
@@ -14,10 +14,6 @@ import shutil
import signal
from typing import Iterator, Optional, Tuple
-from contextlib import ExitStack
-
-from collections import namedtuple
-
import kunit_config
import kunit_parser
import qemu_config
@@ -168,10 +164,10 @@ class LinuxSourceTreeOperationsUml(LinuxSourceTreeOperations):
process.wait()
kunit_parser.print_with_timestamp(
'Disabling broken configs to run KUnit tests...')
- with ExitStack() as es:
- config = open(get_kconfig_path(build_dir), 'a')
- disable = open(BROKEN_ALLCONFIG_PATH, 'r').read()
- config.write(disable)
+
+ with open(get_kconfig_path(build_dir), 'a') as config:
+ with open(BROKEN_ALLCONFIG_PATH, 'r') as disable:
+ config.write(disable.read())
kunit_parser.print_with_timestamp(
'Starting Kernel with all configs takes a few minutes...')
diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py
index 619c4554cbff..cad37a98e599 100755
--- a/tools/testing/kunit/kunit_tool_test.py
+++ b/tools/testing/kunit/kunit_tool_test.py
@@ -185,7 +185,7 @@ class KUnitParserTest(unittest.TestCase):
kunit_parser.extract_tap_lines(file.readlines()))
print_mock.assert_any_call(StrContains('could not parse test results!'))
print_mock.stop()
- file.close()
+ self.assertEqual(0, len(result.suites))
def test_crashed_test(self):
crashed_log = test_data_path('test_is_test_passed-crash.log')
@@ -197,24 +197,22 @@ class KUnitParserTest(unittest.TestCase):
def test_skipped_test(self):
skipped_log = test_data_path('test_skip_tests.log')
- file = open(skipped_log)
- result = kunit_parser.parse_run_tests(file.readlines())
+ with open(skipped_log) as file:
+ result = kunit_parser.parse_run_tests(file.readlines())
# A skipped test does not fail the whole suite.
self.assertEqual(
kunit_parser.TestStatus.SUCCESS,
result.status)
- file.close()
def test_skipped_all_tests(self):
skipped_log = test_data_path('test_skip_all_tests.log')
- file = open(skipped_log)
- result = kunit_parser.parse_run_tests(file.readlines())
+ with open(skipped_log) as file:
+ result = kunit_parser.parse_run_tests(file.readlines())
self.assertEqual(
kunit_parser.TestStatus.SKIPPED,
result.status)
- file.close()
def test_ignores_prefix_printk_time(self):
@@ -283,13 +281,13 @@ class LinuxSourceTreeTest(unittest.TestCase):
def test_valid_kunitconfig(self):
with tempfile.NamedTemporaryFile('wt') as kunitconfig:
- tree = kunit_kernel.LinuxSourceTree('', kunitconfig_path=kunitconfig.name)
+ kunit_kernel.LinuxSourceTree('', kunitconfig_path=kunitconfig.name)
def test_dir_kunitconfig(self):
with tempfile.TemporaryDirectory('') as dir:
- with open(os.path.join(dir, '.kunitconfig'), 'w') as f:
+ with open(os.path.join(dir, '.kunitconfig'), 'w'):
pass
- tree = kunit_kernel.LinuxSourceTree('', kunitconfig_path=dir)
+ kunit_kernel.LinuxSourceTree('', kunitconfig_path=dir)
# TODO: add more test cases.
base-commit: 3b29021ddd10cfb6b2565c623595bd3b02036f33
--
2.33.0.685.g46640cef36-goog
Hi Linus,
Please pull the following Kselftest fixes update for Linux 5.15-rc5.
This Kselftest fixes update for Linux 5.15-rc5 consists of a fix
to implicit declaration warns in drivers/dma-buf test.
I have been seeing this for a while and managed to fix it.
diff is attached.
thanks,
-- Shuah
----------------------------------------------------------------
The following changes since commit f5013d412a43662b63f3d5f3a804d63213acd471:
selftests: kvm: fix get_run_delay() ignoring fscanf() return warn (2021-09-16 12:57:32 -0600)
are available in the Git repository at:
git://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux-kselftest tags/linux-kselftest-fixes-5.15-rc5
for you to fetch changes up to 2f9602870886af74d97bac23ee6db5f5466d0a49:
selftests: drivers/dma-buf: Fix implicit declaration warns (2021-09-27 09:52:29 -0600)
----------------------------------------------------------------
linux-kselftest-fixes-5.15-rc5
This Kselftest fixes update for Linux 5.15-rc5 consists of a fix
to implicit declaration warns in drivers/dma-buf test.
----------------------------------------------------------------
Shuah Khan (1):
selftests: drivers/dma-buf: Fix implicit declaration warns
tools/testing/selftests/drivers/dma-buf/udmabuf.c | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------
When a user filters by a suite and not a test, e.g.
$ ./tools/testing/kunit/kunit.py run 'suite_name'
it hits this code
const int len = strlen(filter_glob);
...
parsed->suite_glob = kmalloc(len, GFP_KERNEL);
which fails to allocate space for the terminating NULL.
Somehow, it seems like we can't easily reproduce this under UML, so the
existing `parse_filter_test()` didn't catch this.
Fix this by allocating `len + 1` and switch to kzalloc() just to be a
bit more defensive. We're only going to run this code once per kernel
boot, and it should never be very long.
Also update the unit tests to be a bit more cautious.
This bug showed up as a NULL pointer dereference here:
> KUNIT_EXPECT_STREQ(test, (const char *)filtered.start[0][0]->name, "suite0");
`filtered.start[0][0]` was NULL, and `name` is at offset 0 in the struct,
so `...->name` was also NULL.
Fixes: 3b29021ddd10 ("kunit: tool: allow filtering test cases via glob")
Reported-by: kernel test robot <oliver.sang(a)intel.com>
Signed-off-by: Daniel Latypov <dlatypov(a)google.com>
---
lib/kunit/executor.c | 2 +-
lib/kunit/executor_test.c | 1 +
2 files changed, 2 insertions(+), 1 deletion(-)
diff --git a/lib/kunit/executor.c b/lib/kunit/executor.c
index bab3ab940acc..1d7fecd33261 100644
--- a/lib/kunit/executor.c
+++ b/lib/kunit/executor.c
@@ -33,7 +33,7 @@ static void kunit_parse_filter_glob(struct kunit_test_filter *parsed,
const char *period = strchr(filter_glob, '.');
if (!period) {
- parsed->suite_glob = kmalloc(len, GFP_KERNEL);
+ parsed->suite_glob = kzalloc(len + 1, GFP_KERNEL);
parsed->test_glob = NULL;
strcpy(parsed->suite_glob, filter_glob);
return;
diff --git a/lib/kunit/executor_test.c b/lib/kunit/executor_test.c
index e6323f398dfa..7d2b8dc668b1 100644
--- a/lib/kunit/executor_test.c
+++ b/lib/kunit/executor_test.c
@@ -149,6 +149,7 @@ static void filter_suites_test(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered.start);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered.start[0]);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered.start[0][0]);
KUNIT_EXPECT_STREQ(test, (const char *)filtered.start[0][0]->name, "suite0");
}
base-commit: 3b29021ddd10cfb6b2565c623595bd3b02036f33
--
2.33.0.800.g4c38ced690-goog
This change adds some initial kunit tests for the MCTP core. We'll
expand the coverage in a future series, and augment with a few
selftests, but this establishes a baseline set of tests for now.
Thanks to the kunit folks for the framework!
Cheers,
Jeremy
---
v2:
- fix MCTP=m, KUNIT={y,m} breakage
- fix mctp test netdev initialisation
- strict route reference count checking
---
Jeremy Kerr (5):
mctp: Add initial test structure and fragmentation test
mctp: Add test utils
mctp: Add packet rx tests
mctp: Add route input to socket tests
mctp: Add input reassembly tests
net/mctp/Kconfig | 5 +
net/mctp/Makefile | 3 +
net/mctp/route.c | 5 +
net/mctp/test/route-test.c | 544 +++++++++++++++++++++++++++++++++++++
net/mctp/test/utils.c | 67 +++++
net/mctp/test/utils.h | 20 ++
6 files changed, 644 insertions(+)
create mode 100644 net/mctp/test/route-test.c
create mode 100644 net/mctp/test/utils.c
create mode 100644 net/mctp/test/utils.h
--
2.33.0
This change adds some initial kunit tests for the MCTP core. We'll
expand the coverage in a future series, and augment with a few
selftests, but this establishes a baseline set of tests for now.
Thanks to the kunit folks for the framework!
Cheers,
Jeremy
---
Jeremy Kerr (5):
mctp: Add initial test structure and fragmentation test
mctp: Add test utils
mctp: Add packet rx tests
mctp: Add route input to socket tests
mctp: Add input reassembly tests
net/mctp/Kconfig | 5 +
net/mctp/Makefile | 3 +
net/mctp/route.c | 5 +
net/mctp/test/route-test.c | 532 +++++++++++++++++++++++++++++++++++++
net/mctp/test/utils.c | 67 +++++
net/mctp/test/utils.h | 20 ++
6 files changed, 632 insertions(+)
create mode 100644 net/mctp/test/route-test.c
create mode 100644 net/mctp/test/utils.c
create mode 100644 net/mctp/test/utils.h
--
2.33.0
Good day,
My name is Luis Fernandez.I would like to discuss something
important that will benefit both of us. I will send you more
details upon your response
Regards
Luis Fernandez
Allow running each suite or each test case alone per kernel boot.
The motivation for this is to debug "test hermeticity" issues.
This new --run_isolated flag would be a good first step to try and
narrow down root causes.
Context: sometimes tests pass/fail depending on what ran before them.
Memory corruption errors in particular might only cause noticeable
issues later on. But you can also have the opposite, where "fixing" one
test causes another to start failing.
Usage:
$ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite
$ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=test
$ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=test example
The last one would provide output like
======== [PASSED] example ========
[PASSED] example_simple_test
============================================================
Testing complete. 1 tests run. 0 failed. 0 crashed. 0 skipped.
Starting KUnit Kernel (2/3)...
============================================================
======== [SKIPPED] example ========
[SKIPPED] example_skip_test # SKIP this test should be skipped
============================================================
Testing complete. 1 tests run. 0 failed. 0 crashed. 1 skipped.
Starting KUnit Kernel (3/3)...
============================================================
======== [SKIPPED] example ========
[SKIPPED] example_mark_skipped_test # SKIP this test should be skipped
============================================================
Testing complete. 1 tests run. 0 failed. 0 crashed. 1 skipped.
See the last patch's description for a bit more detail.
Meta:
The first patch is from another series with just a reworded commit
message, https://lore.kernel.org/linux-kselftest/20210805235145.2528054-2-dlatypov@g…
This patch series is based on Shuah's kunit branch:
https://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux-kselftest.git/?…
Changes:
v1 -> v2: rebase onto Shuah's kunit branch, fix missing code in patch 1.
v2 -> v3: fix mypy errors, drop test plan from output, fix pre-existing
bug where kunit was not actually tracking test execution time (new patch 3).
Daniel Latypov (4):
kunit: add 'kunit.action' param to allow listing out tests
kunit: tool: factor exec + parse steps into a function
kunit: tool: actually track how long it took to run tests
kunit: tool: support running each suite/test separately
lib/kunit/executor.c | 45 ++++++++-
tools/testing/kunit/kunit.py | 129 +++++++++++++++++--------
tools/testing/kunit/kunit_tool_test.py | 40 ++++++++
3 files changed, 169 insertions(+), 45 deletions(-)
base-commit: 3b29021ddd10cfb6b2565c623595bd3b02036f33
--
2.33.0.685.g46640cef36-goog
The structleak plugin causes the stack frame size to grow immensely when
used with KUnit; this is caused because KUnit allocates lots of
moderately sized structs on the stack as part of its assertion macro
implementation. For most tests with small to moderately sized tests
cases there are never enough KUnit assertions to be an issue at all;
even when a single test cases has many KUnit assertions, the compiler
should never put all these struct allocations on the stack at the same
time since the scope of the structs is so limited; however, the
structleak plugin does not seem to respect the compiler doing the right
thing and will still warn of excessive stack size in some cases.
These patches are not a permanent solution since new tests can be added
with huge test cases, but this serves as a stop gap to stop structleak
from being used on KUnit tests which will currently result in excessive
stack size.
Please see the discussion thread here[1] for more context.
Changes since last revision:
- Dropped mmc: sdhci-of-aspeed patch since it was not a pure test and I
could not reproduce the stack size warning anyway.
- Removed Wframe-larger-than=10240 warning from the bitfield kunit
test.
- All other patches are the same except with updated
reviewers/contributor commit footers.
[1] https://lore.kernel.org/linux-arm-kernel/CAFd5g44udqkDiYBWh+VeDVJ=ELXeoXwun…
Arnd Bergmann (1):
bitfield: build kunit tests without structleak plugin
Brendan Higgins (4):
gcc-plugins/structleak: add makefile var for disabling structleak
iio/test-format: build kunit tests without structleak plugin
device property: build kunit tests without structleak plugin
thunderbolt: build kunit tests without structleak plugin
drivers/base/test/Makefile | 2 +-
drivers/iio/test/Makefile | 1 +
drivers/thunderbolt/Makefile | 1 +
lib/Makefile | 2 +-
scripts/Makefile.gcc-plugins | 4 ++++
5 files changed, 8 insertions(+), 2 deletions(-)
base-commit: 02d5e016800d082058b3d3b7c3ede136cdc6ddcb
--
2.33.0.685.g46640cef36-goog
The structleak plugin causes the stack frame size to grow immensely when
used with KUnit; this is caused because KUnit allocates lots of
moderately sized structs on the stack as part of its assertion macro
implementation. For most tests with small to moderately sized tests
cases there are never enough KUnit assertions to be an issue at all;
even when a single test cases has many KUnit assertions, the compiler
should never put all these struct allocations on the stack at the same
time since the scope of the structs is so limited; however, the
structleak plugin does not seem to respect the compiler doing the right
thing and will still warn of excessive stack size in some cases.
These patches are not a permanent solution since new tests can be added
with huge test cases, but this serves as a stop gap to stop structleak
from being used on KUnit tests which will currently result in excessive
stack size.
Of the following patches, I think the thunderbolt patch may be
unnecessary since Linus already fixed that test. Additionally, I was not
able to reproduce the error on the sdhci-of-aspeed test. Nevertheless, I
included these tests cases for completeness. Please see my discussion
with Arnd for more context[1].
NOTE: Arnd did the legwork for most of these patches, but did not
actually share code for some of them, so I left his Signed-off-by off of
those patches as I don't want to misrepresent him. Arnd, please sign off
on those patches at your soonest convenience.
[1] https://lore.kernel.org/linux-arm-kernel/CAFd5g44udqkDiYBWh+VeDVJ=ELXeoXwun…
Arnd Bergmann (1):
bitfield: build kunit tests without structleak plugin
Brendan Higgins (5):
gcc-plugins/structleak: add makefile var for disabling structleak
iio/test-format: build kunit tests without structleak plugin
device property: build kunit tests without structleak plugin
thunderbolt: build kunit tests without structleak plugin
mmc: sdhci-of-aspeed: build kunit tests without structleak plugin
drivers/base/test/Makefile | 2 +-
drivers/iio/test/Makefile | 1 +
drivers/mmc/host/Makefile | 1 +
drivers/thunderbolt/Makefile | 1 +
lib/Makefile | 2 +-
scripts/Makefile.gcc-plugins | 4 ++++
6 files changed, 9 insertions(+), 2 deletions(-)
base-commit: 316346243be6df12799c0b64b788e06bad97c30b
--
2.33.0.464.g1972c5931b-goog
Allow running each suite or each test case alone per kernel boot.
The motivation for this is to debug "test hermeticity" issues.
This new --run_isolated flag would be a good first step to try and
narrow down root causes.
Context: sometimes tests pass/fail depending on what ran before them.
Memory corruption errors in particular might only cause noticeable
issues later on. But you can also have the opposite, where "fixing" one
test causes another to start failing.
Usage:
$ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite
$ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=test
$ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=test example
The last one would provide output like
======== [PASSED] example ========
[PASSED] example_simple_test
============================================================
Testing complete. 1 tests run. 0 failed. 0 crashed. 0 skipped.
Starting KUnit Kernel (2/3)...
============================================================
======== [SKIPPED] example ========
[SKIPPED] example_skip_test # SKIP this test should be skipped
============================================================
Testing complete. 1 tests run. 0 failed. 0 crashed. 1 skipped.
Starting KUnit Kernel (3/3)...
============================================================
======== [SKIPPED] example ========
[SKIPPED] example_mark_skipped_test # SKIP this test should be skipped
============================================================
Testing complete. 1 tests run. 0 failed. 0 crashed. 1 skipped.
See the last patch's description for a bit more detail.
Meta:
The first patch is from another series with just a reworded commit
message, https://lore.kernel.org/linux-kselftest/20210805235145.2528054-2-dlatypov@g…
This patch series is based on Shuah's kunit branch:
https://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux-kselftest.git/?…
Changes:
v1 -> v2: rebase onto Shuah's kunit branch, fix missing code in patch 1.
Daniel Latypov (3):
kunit: add 'kunit.action' param to allow listing out tests
kunit: tool: factor exec + parse steps into a function
kunit: tool: support running each suite/test separately
lib/kunit/executor.c | 45 ++++++++-
tools/testing/kunit/kunit.py | 127 +++++++++++++++++--------
tools/testing/kunit/kunit_tool_test.py | 40 ++++++++
3 files changed, 167 insertions(+), 45 deletions(-)
base-commit: 3b29021ddd10cfb6b2565c623595bd3b02036f33
--
2.33.0.685.g46640cef36-goog
The nr_cpus = CPU_COUNT(&possible_mask) is the number of available CPUs in
possible_mask. As a result, the "cpu = i % nr_cpus" may always return CPU
that is not available in possible_mask.
Suppose the server has 8 CPUs. The below Failure is encountered immediately
if the task is bound to CPU 5 and 6.
==== Test Assertion Failure ====
rseq_test.c:228: i > (NR_TASK_MIGRATIONS / 2)
pid=10127 tid=10127 errno=4 - Interrupted system call
1 0x00000000004018e5: main at rseq_test.c:227
2 0x00007fcc8fc66bf6: ?? ??:0
3 0x0000000000401959: _start at ??:?
Only performed 4 KVM_RUNs, task stalled too much?
Signed-off-by: Dongli Zhang <dongli.zhang(a)oracle.com>
---
tools/testing/selftests/kvm/rseq_test.c | 18 +++++++++++++++++-
1 file changed, 17 insertions(+), 1 deletion(-)
diff --git a/tools/testing/selftests/kvm/rseq_test.c b/tools/testing/selftests/kvm/rseq_test.c
index c5e0dd664a7b..41df5173970c 100644
--- a/tools/testing/selftests/kvm/rseq_test.c
+++ b/tools/testing/selftests/kvm/rseq_test.c
@@ -10,6 +10,7 @@
#include <signal.h>
#include <syscall.h>
#include <sys/ioctl.h>
+#include <sys/sysinfo.h>
#include <asm/barrier.h>
#include <linux/atomic.h>
#include <linux/rseq.h>
@@ -43,6 +44,18 @@ static bool done;
static atomic_t seq_cnt;
+static int get_max_cpu_idx(void)
+{
+ int nproc = get_nprocs_conf();
+ int i, max = -ENOENT;
+
+ for (i = 0; i < nproc; i++)
+ if (CPU_ISSET(i, &possible_mask))
+ max = i;
+
+ return max;
+}
+
static void guest_code(void)
{
for (;;)
@@ -61,10 +74,13 @@ static void *migration_worker(void *ign)
{
cpu_set_t allowed_mask;
int r, i, nr_cpus, cpu;
+ int max_cpu_idx;
CPU_ZERO(&allowed_mask);
- nr_cpus = CPU_COUNT(&possible_mask);
+ max_cpu_idx = get_max_cpu_idx();
+ TEST_ASSERT(max_cpu_idx >= 0, "Invalid possible_mask");
+ nr_cpus = max_cpu_idx + 1;
for (i = 0; i < NR_TASK_MIGRATIONS; i++) {
cpu = i % nr_cpus;
--
2.17.1
This series fixes up a few issues introduced into vec-syscfg during
refactoring in the review process, then adds a new test which ensures
that the behaviour when we attempt to set a vector length which is not
supported by the current system matches what is documented in the SVE
ABI documentation.
v2:
- Fix handling of missing VLs when checking that vector length setting
works as expected.
Mark Brown (4):
selftests: arm64: Fix printf() format mismatch in vec-syscfg
selftests: arm64: Remove bogus error check on writing to files
selftests: arm64: Fix and enable test for setting current VL in
vec-syscfg
selftests: arm64: Verify that all possible vector lengths are handled
tools/testing/selftests/arm64/fp/vec-syscfg.c | 89 ++++++++++++++++---
1 file changed, 76 insertions(+), 13 deletions(-)
base-commit: 6880fa6c56601bb8ed59df6c30fd390cc5f6dd8f
--
2.20.1
This series fixes up a few issues introduced into vec-syscfg during
refactoring in the review process, then adds a new test which ensures
that the behaviour when we attempt to set a vector length which is not
supported by the current system matches what is documented in the SVE
ABI documentation.
v3:
- Rebased onto v5.14-rc3.
- Check to see if we discovered the system vector lengths before trying
to set all possible vector lengths since we need that information to
validate the results.
v2:
- Fix handling of missing VLs when checking that vector length setting
works as expected.
Mark Brown (4):
selftests: arm64: Fix printf() format mismatch in vec-syscfg
selftests: arm64: Remove bogus error check on writing to files
selftests: arm64: Fix and enable test for setting current VL in
vec-syscfg
selftests: arm64: Verify that all possible vector lengths are handled
tools/testing/selftests/arm64/fp/vec-syscfg.c | 95 ++++++++++++++++---
1 file changed, 82 insertions(+), 13 deletions(-)
base-commit: 5816b3e6577eaa676ceb00a848f0fd65fe2adc29
--
2.20.1
This series overhauls the selftests we have for the SVE ptrace interface
to make them much more comprehensive than they are currently, making the
coverage of the data read and written more complete. The new coverage
for setting data on all vector lengths showed the issue with using the
wrong buffer size with ptrace reported and fixed by:
https://lore.kernel.org/linux-arm-kernel/20210909165356.10675-1-broonie@ker…
(arm64/sve: Use correct size when reinitialising SVE state).
Mark Brown (8):
selftests: arm64: Use a define for the number of SVE ptrace tests to
be run
selftests: arm64: Don't log child creation as a test in SVE ptrace
test
selftests: arm64: Remove extraneous register setting code
selftests: arm64: Document what the SVE ptrace test is doing
selftests: arm64: Clarify output when verifying SVE register set
selftests: arm64: Verify interoperation of SVE and FPSIMD register
sets
selftests: arm64: More comprehensively test the SVE ptrace interface
selftests: arm64: Move FPSIMD in SVE ptrace test into a function
tools/testing/selftests/arm64/fp/Makefile | 2 +-
tools/testing/selftests/arm64/fp/TODO | 9 +-
.../selftests/arm64/fp/sve-ptrace-asm.S | 33 --
tools/testing/selftests/arm64/fp/sve-ptrace.c | 460 ++++++++++++------
4 files changed, 321 insertions(+), 183 deletions(-)
delete mode 100644 tools/testing/selftests/arm64/fp/sve-ptrace-asm.S
base-commit: 6880fa6c56601bb8ed59df6c30fd390cc5f6dd8f
--
2.20.1
RFC: https://lkml.org/lkml/2021/6/4/791
PATCH v1: https://lkml.org/lkml/2021/6/16/805
PATCH v2: https://lkml.org/lkml/2021/7/6/138
PATCH v3: https://lkml.org/lkml/2021/7/12/2799
PATCH v4: https://lkml.org/lkml/2021/7/16/532
PATCH v5: https://lkml.org/lkml/2021/7/19/247
PATCH v6: https://lkml.org/lkml/2021/7/20/36
PATCH v7: https://lkml.org/lkml/2021/7/23/26
Changelog v7-->v8
1. Rebased and tested against 5.15
2. Added a selftest to check if the energy and frequency attribues
exist and their files populated
Also, have implemented a POC using this interface for the powerpc-utils'
ppc64_cpu --frequency command-line tool to utilize this information
in userspace.
The POC for the new interface has been sent to the powerpc-utils mailing
list for early review: https://groups.google.com/g/powerpc-utils-devel/c/r4i7JnlyQ8s
Sample output from the powerpc-utils tool is as follows:
# ppc64_cpu --frequency
Power and Performance Mode: XXXX
Idle Power Saver Status : XXXX
Processor Folding Status : XXXX --> Printed if Idle power save status is supported
Platform reported frequencies --> Frequencies reported from the platform's H_CALL i.e PAPR interface
min : NNNN GHz
max : NNNN GHz
static : NNNN GHz
Tool Computed frequencies
min : NNNN GHz (cpu XX)
max : NNNN GHz (cpu XX)
avg : NNNN GHz
Pratik R. Sampat (2):
powerpc/pseries: Interface to represent PAPR firmware attributes
selftest/powerpc: Add PAPR sysfs attributes sniff test
.../sysfs-firmware-papr-energy-scale-info | 26 ++
arch/powerpc/include/asm/hvcall.h | 24 +-
arch/powerpc/kvm/trace_hv.h | 1 +
arch/powerpc/platforms/pseries/Makefile | 3 +-
.../pseries/papr_platform_attributes.c | 312 ++++++++++++++++++
tools/testing/selftests/powerpc/Makefile | 1 +
.../powerpc/papr_attributes/.gitignore | 2 +
.../powerpc/papr_attributes/Makefile | 7 +
.../powerpc/papr_attributes/attr_test.c | 107 ++++++
9 files changed, 481 insertions(+), 2 deletions(-)
create mode 100644 Documentation/ABI/testing/sysfs-firmware-papr-energy-scale-info
create mode 100644 arch/powerpc/platforms/pseries/papr_platform_attributes.c
create mode 100644 tools/testing/selftests/powerpc/papr_attributes/.gitignore
create mode 100644 tools/testing/selftests/powerpc/papr_attributes/Makefile
create mode 100644 tools/testing/selftests/powerpc/papr_attributes/attr_test.c
--
2.31.1
The phc.sh script in the ptp directory is still using exit 0 when
the test has been skipped due to some unmet requirements.
Use kselftest framework skip code instead so it can help us to
distinguish the return status.
Criterion to filter out what should be fixed in ptp directory:
grep -r "exit 0" -B1 | grep -i skip
This change might cause some false-positives if people are running
these test scripts directly and only checking their return codes,
which will change from 0 to 4. However I think the impact should be
small as most of our scripts here are already using this skip code.
And there will be no such issue if running them with the kselftest
framework.
Note that there are some SKIP messages exit with 1, I leave those
unchanged.
Signed-off-by: Po-Hsu Lin <po-hsu.lin(a)canonical.com>
---
tools/testing/selftests/ptp/phc.sh | 7 +++++--
1 file changed, 5 insertions(+), 2 deletions(-)
diff --git a/tools/testing/selftests/ptp/phc.sh b/tools/testing/selftests/ptp/phc.sh
index ac6e5a6..0820544 100755
--- a/tools/testing/selftests/ptp/phc.sh
+++ b/tools/testing/selftests/ptp/phc.sh
@@ -1,6 +1,9 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
+# Kselftest framework requirement - SKIP code is 4.
+readonly KSFT_SKIP=4
+
ALL_TESTS="
settime
adjtime
@@ -13,12 +16,12 @@ DEV=$1
if [[ "$(id -u)" -ne 0 ]]; then
echo "SKIP: need root privileges"
- exit 0
+ exit $KSFT_SKIP
fi
if [[ "$DEV" == "" ]]; then
echo "SKIP: PTP device not provided"
- exit 0
+ exit $KSFT_SKIP
fi
require_command()
--
2.7.4
There are several test cases in the bpf directory are still using
exit 0 when they need to be skipped. Use kselftest framework skip
code instead so it can help us to distinguish the return status.
Criterion to filter out what should be fixed in bpf directory:
grep -r "exit 0" -B1 | grep -i skip
This change might cause some false-positives if people are running
these test scripts directly and only checking their return codes,
which will change from 0 to 4. However I think the impact should be
small as most of our scripts here are already using this skip code.
And there will be no such issue if running them with the kselftest
framework.
Signed-off-by: Po-Hsu Lin <po-hsu.lin(a)canonical.com>
---
tools/testing/selftests/bpf/test_bpftool_build.sh | 5 ++++-
tools/testing/selftests/bpf/test_xdp_meta.sh | 5 ++++-
tools/testing/selftests/bpf/test_xdp_vlan.sh | 7 +++++--
3 files changed, 13 insertions(+), 4 deletions(-)
diff --git a/tools/testing/selftests/bpf/test_bpftool_build.sh b/tools/testing/selftests/bpf/test_bpftool_build.sh
index ac349a5..b6fab1e 100755
--- a/tools/testing/selftests/bpf/test_bpftool_build.sh
+++ b/tools/testing/selftests/bpf/test_bpftool_build.sh
@@ -1,6 +1,9 @@
#!/bin/bash
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
case $1 in
-h|--help)
echo -e "$0 [-j <n>]"
@@ -22,7 +25,7 @@ KDIR_ROOT_DIR=$(realpath $PWD/$SCRIPT_REL_DIR/../../../../)
cd $KDIR_ROOT_DIR
if [ ! -e tools/bpf/bpftool/Makefile ]; then
echo -e "skip: bpftool files not found!\n"
- exit 0
+ exit $ksft_skip
fi
ERROR=0
diff --git a/tools/testing/selftests/bpf/test_xdp_meta.sh b/tools/testing/selftests/bpf/test_xdp_meta.sh
index 637fcf4..fd3f218 100755
--- a/tools/testing/selftests/bpf/test_xdp_meta.sh
+++ b/tools/testing/selftests/bpf/test_xdp_meta.sh
@@ -1,5 +1,8 @@
#!/bin/sh
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
cleanup()
{
if [ "$?" = "0" ]; then
@@ -17,7 +20,7 @@ cleanup()
ip link set dev lo xdp off 2>/dev/null > /dev/null
if [ $? -ne 0 ];then
echo "selftests: [SKIP] Could not run test without the ip xdp support"
- exit 0
+ exit $ksft_skip
fi
set -e
diff --git a/tools/testing/selftests/bpf/test_xdp_vlan.sh b/tools/testing/selftests/bpf/test_xdp_vlan.sh
index bb8b0da..1aa7404 100755
--- a/tools/testing/selftests/bpf/test_xdp_vlan.sh
+++ b/tools/testing/selftests/bpf/test_xdp_vlan.sh
@@ -2,6 +2,9 @@
# SPDX-License-Identifier: GPL-2.0
# Author: Jesper Dangaard Brouer <hawk(a)kernel.org>
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
# Allow wrapper scripts to name test
if [ -z "$TESTNAME" ]; then
TESTNAME=xdp_vlan
@@ -94,7 +97,7 @@ while true; do
-h | --help )
usage;
echo "selftests: $TESTNAME [SKIP] usage help info requested"
- exit 0
+ exit $ksft_skip
;;
* )
shift
@@ -117,7 +120,7 @@ fi
ip link set dev lo xdpgeneric off 2>/dev/null > /dev/null
if [ $? -ne 0 ]; then
echo "selftests: $TESTNAME [SKIP] need ip xdp support"
- exit 0
+ exit $ksft_skip
fi
# Interactive mode likely require us to cleanup netns
--
2.7.4
There are several test cases in the bpf directory are still using
exit 0 when they need to be skipped. Use kselftest framework skip
code instead so it can help us to distinguish the return status.
Criterion to filter out what should be fixed in bpf directory:
grep -r "exit 0" -B1 | grep -i skip
This change might cause some false-positives if people are running
these test scripts directly and only checking their return codes,
which will change from 0 to 4. However I think the impact should be
small as most of our scripts here are already using this skip code.
And there will be no such issue if running them with the kselftest
framework.
v1 -> v2:
- Ignore bpf/test_bpftool_build.sh as similar changes has been made.
- Make KSFT_SKIP readonly as suggested by Jakub Sitnicki.
Signed-off-by: Po-Hsu Lin <po-hsu.lin(a)canonical.com>
---
tools/testing/selftests/bpf/test_xdp_meta.sh | 5 ++++-
tools/testing/selftests/bpf/test_xdp_vlan.sh | 7 +++++--
2 files changed, 9 insertions(+), 3 deletions(-)
diff --git a/tools/testing/selftests/bpf/test_xdp_meta.sh b/tools/testing/selftests/bpf/test_xdp_meta.sh
index 637fcf4..d10cefd 100755
--- a/tools/testing/selftests/bpf/test_xdp_meta.sh
+++ b/tools/testing/selftests/bpf/test_xdp_meta.sh
@@ -1,5 +1,8 @@
#!/bin/sh
+# Kselftest framework requirement - SKIP code is 4.
+readonly KSFT_SKIP=4
+
cleanup()
{
if [ "$?" = "0" ]; then
@@ -17,7 +20,7 @@ cleanup()
ip link set dev lo xdp off 2>/dev/null > /dev/null
if [ $? -ne 0 ];then
echo "selftests: [SKIP] Could not run test without the ip xdp support"
- exit 0
+ exit $KSFT_SKIP
fi
set -e
diff --git a/tools/testing/selftests/bpf/test_xdp_vlan.sh b/tools/testing/selftests/bpf/test_xdp_vlan.sh
index bb8b0da..0cbc760 100755
--- a/tools/testing/selftests/bpf/test_xdp_vlan.sh
+++ b/tools/testing/selftests/bpf/test_xdp_vlan.sh
@@ -2,6 +2,9 @@
# SPDX-License-Identifier: GPL-2.0
# Author: Jesper Dangaard Brouer <hawk(a)kernel.org>
+# Kselftest framework requirement - SKIP code is 4.
+readonly KSFT_SKIP=4
+
# Allow wrapper scripts to name test
if [ -z "$TESTNAME" ]; then
TESTNAME=xdp_vlan
@@ -94,7 +97,7 @@ while true; do
-h | --help )
usage;
echo "selftests: $TESTNAME [SKIP] usage help info requested"
- exit 0
+ exit $KSFT_SKIP
;;
* )
shift
@@ -117,7 +120,7 @@ fi
ip link set dev lo xdpgeneric off 2>/dev/null > /dev/null
if [ $? -ne 0 ]; then
echo "selftests: $TESTNAME [SKIP] need ip xdp support"
- exit 0
+ exit $KSFT_SKIP
fi
# Interactive mode likely require us to cleanup netns
--
2.7.4
Problem:
What does this do?
$ kunit.py run --json
Well, it runs all the tests and prints test results out as JSON.
And next is
$ kunit.py run my-test-suite --json
This runs just `my-test-suite` and prints results out as JSON.
But what about?
$ kunit.py run --json my-test-suite
This runs all the tests and stores the json results in a "my-test-suite"
file.
Why:
--json, and now --raw_output are actually string flags. They just have a
default value. --json in particular takes the name of an output file.
It was intended that you'd do
$ kunit.py run --json=my_output_file my-test-suite
if you ever wanted to specify the value.
Workaround:
It doesn't seem like there's a way to make
https://docs.python.org/3/library/argparse.html only accept arg values
after a '='.
I believe that `--json` should "just work" regardless of where it is.
So this patch automatically rewrites a bare `--json` to `--json=stdout`.
That makes the examples above work the same way.
Add a regression test that can catch this for --raw_output.
Fixes: 6a499c9c42d0 ("kunit: tool: make --raw_output support only showing kunit output")
Signed-off-by: Daniel Latypov <dlatypov(a)google.com>
Tested-by: David Gow <davidgow(a)google.com>
---
v1 -> v2: fix mypy error by converting mapped argv to a list.
---
tools/testing/kunit/kunit.py | 24 ++++++++++++++++++++++--
tools/testing/kunit/kunit_tool_test.py | 8 ++++++++
2 files changed, 30 insertions(+), 2 deletions(-)
diff --git a/tools/testing/kunit/kunit.py b/tools/testing/kunit/kunit.py
index 5a931456e718..ac35c61f65f5 100755
--- a/tools/testing/kunit/kunit.py
+++ b/tools/testing/kunit/kunit.py
@@ -16,7 +16,7 @@ assert sys.version_info >= (3, 7), "Python version is too old"
from collections import namedtuple
from enum import Enum, auto
-from typing import Iterable
+from typing import Iterable, Sequence
import kunit_config
import kunit_json
@@ -186,6 +186,26 @@ def run_tests(linux: kunit_kernel.LinuxSourceTree,
exec_result.elapsed_time))
return parse_result
+# Problem:
+# $ kunit.py run --json
+# works as one would expect and prints the parsed test results as JSON.
+# $ kunit.py run --json suite_name
+# would *not* pass suite_name as the filter_glob and print as json.
+# argparse will consider it to be another way of writing
+# $ kunit.py run --json=suite_name
+# i.e. it would run all tests, and dump the json to a `suite_name` file.
+# So we hackily automatically rewrite --json => --json=stdout
+pseudo_bool_flag_defaults = {
+ '--json': 'stdout',
+ '--raw_output': 'kunit',
+}
+def massage_argv(argv: Sequence[str]) -> Sequence[str]:
+ def massage_arg(arg: str) -> str:
+ if arg not in pseudo_bool_flag_defaults:
+ return arg
+ return f'{arg}={pseudo_bool_flag_defaults[arg]}'
+ return list(map(massage_arg, argv))
+
def add_common_opts(parser) -> None:
parser.add_argument('--build_dir',
help='As in the make command, it specifies the build '
@@ -303,7 +323,7 @@ def main(argv, linux=None):
help='Specifies the file to read results from.',
type=str, nargs='?', metavar='input_file')
- cli_args = parser.parse_args(argv)
+ cli_args = parser.parse_args(massage_argv(argv))
if get_kernel_root_path():
os.chdir(get_kernel_root_path())
diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py
index 619c4554cbff..1edcc8373b4e 100755
--- a/tools/testing/kunit/kunit_tool_test.py
+++ b/tools/testing/kunit/kunit_tool_test.py
@@ -408,6 +408,14 @@ class KUnitMainTest(unittest.TestCase):
self.assertNotEqual(call, mock.call(StrContains('Testing complete.')))
self.assertNotEqual(call, mock.call(StrContains(' 0 tests run')))
+ def test_run_raw_output_does_not_take_positional_args(self):
+ # --raw_output is a string flag, but we don't want it to consume
+ # any positional arguments, only ones after an '='
+ self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
+ kunit.main(['run', '--raw_output', 'filter_glob'], self.linux_source_mock)
+ self.linux_source_mock.run_kernel.assert_called_once_with(
+ args=None, build_dir='.kunit', filter_glob='filter_glob', timeout=300)
+
def test_exec_timeout(self):
timeout = 3453
kunit.main(['exec', '--timeout', str(timeout)], self.linux_source_mock)
base-commit: 4c17ca27923c16fd73bbb9ad033c7d749c3bcfcc
--
2.33.0.464.g1972c5931b-goog
From: Li Zhijian <lizhijian(a)cn.fujitsu.com>
[ Upstream commit 8914a7a247e065438a0ec86a58c1c359223d2c9e ]
LKP/0Day reported some building errors about kvm, and errors message
are not always same:
- lib/x86_64/processor.c:1083:31: error: ‘KVM_CAP_NESTED_STATE’ undeclared
(first use in this function); did you mean ‘KVM_CAP_PIT_STATE2’?
- lib/test_util.c:189:30: error: ‘MAP_HUGE_16KB’ undeclared (first use
in this function); did you mean ‘MAP_HUGE_16GB’?
Although kvm relies on the khdr, they still be built in parallel when -j
is specified. In this case, it will cause compiling errors.
Here we mark target khdr as NOTPARALLEL to make it be always built
first.
CC: Philip Li <philip.li(a)intel.com>
Reported-by: kernel test robot <lkp(a)intel.com>
Signed-off-by: Li Zhijian <lizhijian(a)cn.fujitsu.com>
Signed-off-by: Shuah Khan <skhan(a)linuxfoundation.org>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
---
tools/testing/selftests/lib.mk | 1 +
1 file changed, 1 insertion(+)
diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
index a5d40653a921..9700281bee4c 100644
--- a/tools/testing/selftests/lib.mk
+++ b/tools/testing/selftests/lib.mk
@@ -26,6 +26,7 @@ include $(top_srcdir)/scripts/subarch.include
ARCH ?= $(SUBARCH)
.PHONY: khdr
+.NOTPARALLEL:
khdr:
make ARCH=$(ARCH) -C $(top_srcdir) headers_install
--
2.33.0
From: Li Zhijian <lizhijian(a)cn.fujitsu.com>
[ Upstream commit 8914a7a247e065438a0ec86a58c1c359223d2c9e ]
LKP/0Day reported some building errors about kvm, and errors message
are not always same:
- lib/x86_64/processor.c:1083:31: error: ‘KVM_CAP_NESTED_STATE’ undeclared
(first use in this function); did you mean ‘KVM_CAP_PIT_STATE2’?
- lib/test_util.c:189:30: error: ‘MAP_HUGE_16KB’ undeclared (first use
in this function); did you mean ‘MAP_HUGE_16GB’?
Although kvm relies on the khdr, they still be built in parallel when -j
is specified. In this case, it will cause compiling errors.
Here we mark target khdr as NOTPARALLEL to make it be always built
first.
CC: Philip Li <philip.li(a)intel.com>
Reported-by: kernel test robot <lkp(a)intel.com>
Signed-off-by: Li Zhijian <lizhijian(a)cn.fujitsu.com>
Signed-off-by: Shuah Khan <skhan(a)linuxfoundation.org>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
---
tools/testing/selftests/lib.mk | 1 +
1 file changed, 1 insertion(+)
diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
index 67386aa3f31d..8794ce382bf5 100644
--- a/tools/testing/selftests/lib.mk
+++ b/tools/testing/selftests/lib.mk
@@ -48,6 +48,7 @@ ARCH ?= $(SUBARCH)
# When local build is done, headers are installed in the default
# INSTALL_HDR_PATH usr/include.
.PHONY: khdr
+.NOTPARALLEL:
khdr:
ifndef KSFT_KHDR_INSTALL_DONE
ifeq (1,$(DEFAULT_INSTALL_HDR_PATH))
--
2.33.0
From: Li Zhijian <lizhijian(a)cn.fujitsu.com>
[ Upstream commit 8914a7a247e065438a0ec86a58c1c359223d2c9e ]
LKP/0Day reported some building errors about kvm, and errors message
are not always same:
- lib/x86_64/processor.c:1083:31: error: ‘KVM_CAP_NESTED_STATE’ undeclared
(first use in this function); did you mean ‘KVM_CAP_PIT_STATE2’?
- lib/test_util.c:189:30: error: ‘MAP_HUGE_16KB’ undeclared (first use
in this function); did you mean ‘MAP_HUGE_16GB’?
Although kvm relies on the khdr, they still be built in parallel when -j
is specified. In this case, it will cause compiling errors.
Here we mark target khdr as NOTPARALLEL to make it be always built
first.
CC: Philip Li <philip.li(a)intel.com>
Reported-by: kernel test robot <lkp(a)intel.com>
Signed-off-by: Li Zhijian <lizhijian(a)cn.fujitsu.com>
Signed-off-by: Shuah Khan <skhan(a)linuxfoundation.org>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
---
tools/testing/selftests/lib.mk | 1 +
1 file changed, 1 insertion(+)
diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
index 0af84ad48aa7..b7217b5251f5 100644
--- a/tools/testing/selftests/lib.mk
+++ b/tools/testing/selftests/lib.mk
@@ -48,6 +48,7 @@ ARCH ?= $(SUBARCH)
# When local build is done, headers are installed in the default
# INSTALL_HDR_PATH usr/include.
.PHONY: khdr
+.NOTPARALLEL:
khdr:
ifndef KSFT_KHDR_INSTALL_DONE
ifeq (1,$(DEFAULT_INSTALL_HDR_PATH))
--
2.33.0
From: Li Zhijian <lizhijian(a)cn.fujitsu.com>
[ Upstream commit 8914a7a247e065438a0ec86a58c1c359223d2c9e ]
LKP/0Day reported some building errors about kvm, and errors message
are not always same:
- lib/x86_64/processor.c:1083:31: error: ‘KVM_CAP_NESTED_STATE’ undeclared
(first use in this function); did you mean ‘KVM_CAP_PIT_STATE2’?
- lib/test_util.c:189:30: error: ‘MAP_HUGE_16KB’ undeclared (first use
in this function); did you mean ‘MAP_HUGE_16GB’?
Although kvm relies on the khdr, they still be built in parallel when -j
is specified. In this case, it will cause compiling errors.
Here we mark target khdr as NOTPARALLEL to make it be always built
first.
CC: Philip Li <philip.li(a)intel.com>
Reported-by: kernel test robot <lkp(a)intel.com>
Signed-off-by: Li Zhijian <lizhijian(a)cn.fujitsu.com>
Signed-off-by: Shuah Khan <skhan(a)linuxfoundation.org>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
---
tools/testing/selftests/lib.mk | 1 +
1 file changed, 1 insertion(+)
diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
index fa2ac0e56b43..fe7ee2b0f29c 100644
--- a/tools/testing/selftests/lib.mk
+++ b/tools/testing/selftests/lib.mk
@@ -48,6 +48,7 @@ ARCH ?= $(SUBARCH)
# When local build is done, headers are installed in the default
# INSTALL_HDR_PATH usr/include.
.PHONY: khdr
+.NOTPARALLEL:
khdr:
ifndef KSFT_KHDR_INSTALL_DONE
ifeq (1,$(DEFAULT_INSTALL_HDR_PATH))
--
2.33.0
On Fri, Sep 17, 2021 at 10:04:18PM -0700, Luis Chamberlain wrote:
> In this v7 I've decided it is best to merge all the effort together into
> one patch set because communication was being lost when I split the
> patches up. This was not helping in any way to either fix the zram
> issues or come to consensus on a generic solution. The patches are also
> merged now because they are all related now.
Building up all the testing framewoork is really great. I have no opinions
about the license related stuff but all other changes generally look good to
me.
Thanks.
--
tejun
Hi Linus,
Please pull the following Kselftest fixes update for Linux 5.15-rc3.
This Kselftest fixes update for Linux 5.15-rc3 consists of:
- fix to Kselftest common framework header install to run before
other targets for it work correctly in parallel build case.
- fixes to kvm test to not ignore fscanf() returns which could
result in inconsistent test behavior and failures.
diff is attached.
thanks,
-- Shuah
----------------------------------------------------------------
The following changes since commit 6880fa6c56601bb8ed59df6c30fd390cc5f6dd8f:
Linux 5.15-rc1 (2021-09-12 16:28:37 -0700)
are available in the Git repository at:
git://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux-kselftest tags/linux-kselftest-fixes-5.15-rc3
for you to fetch changes up to f5013d412a43662b63f3d5f3a804d63213acd471:
selftests: kvm: fix get_run_delay() ignoring fscanf() return warn (2021-09-16 12:57:32 -0600)
----------------------------------------------------------------
linux-kselftest-fixes-5.15-rc3
This Kselftest fixes update for Linux 5.15-rc3 consists of:
- fix to Kselftest common framework header install to run before
other targets for it work correctly in parallel build case.
- fixes to kvm test to not ignore fscanf() returns which could
result in inconsistent test behavior and failures.
----------------------------------------------------------------
Li Zhijian (1):
selftests: be sure to make khdr before other targets
Shuah Khan (4):
selftests:kvm: fix get_warnings_count() ignoring fscanf() return warn
selftests:kvm: fix get_trans_hugepagesz() ignoring fscanf() return warn
selftests: kvm: move get_run_delay() into lib/test_util
selftests: kvm: fix get_run_delay() ignoring fscanf() return warn
tools/testing/selftests/kvm/include/test_util.h | 3 +++
tools/testing/selftests/kvm/lib/test_util.c | 22 +++++++++++++++++++++-
tools/testing/selftests/kvm/steal_time.c | 16 ----------------
.../selftests/kvm/x86_64/mmio_warning_test.c | 3 ++-
.../testing/selftests/kvm/x86_64/xen_shinfo_test.c | 15 ---------------
tools/testing/selftests/lib.mk | 1 +
6 files changed, 27 insertions(+), 33 deletions(-)
----------------------------------------------------------------
This is similar to TCP MD5 in functionality but it's sufficiently
different that wire formats are incompatible. Compared to TCP-MD5 more
algorithms are supported and multiple keys can be used on the same
connection but there is still no negotiation mechanism.
Expected use-case is protecting long-duration BGP/LDP connections
between routers using pre-shared keys. The goal of this series is to
allow routers using the linux TCP stack to interoperate with vendors
such as Cisco and Juniper.
Both algorithms described in RFC5926 are implemented but the code is not
very easily extensible beyond that. In particular there are several code
paths making stack allocations based on RFC5926 maximum, those would
have to be increased.
This version is incorporates previous feedback and expands the handling
of timewait sockets and RST packets. Here are some known flaws and
limits:
* Interaction with TCP-MD5 is not tested in all corners
* Interaction with FASTOPEN not tested but unlikely to work because
sequence number assumptions for syn/ack.
* Sequence Number Extension not implemented so connections will flap
every ~4G of traffic.
* Not clear if crypto_shash_setkey might sleep. If some implementation
do that then maybe they could be excluded through alloc flags.
* Traffic key is not cached (reducing performance)
* User is responsible for ensuring keys do not overlap
I labeled this as [PATCH]] because the issues above are not critical.
Test suite was added to tools/selftests/tcp_authopt. Tests are written
in python using pytest and scapy and check the API in some detail and
validate packet captures. Python code is already used in linux and in
kselftests but virtualenvs not very much. This test suite uses `tox` to
create a private virtualenv and hide dependencies. There is no clear
guidance for how to add python-based kselftests so I made it up.
Limited testing support is also included in nettest and fcnal-test.sh.
Coverage is extremely limited, I did not expand it because the tests run
too slowly.
Changes for frr: https://github.com/FRRouting/frr/pull/9442
That PR was made early for ABI feedback, it has many issues.
Changes for yabgp: https://github.com/cdleonard/yabgp/commits/tcp_authopt
This can be use for easy interoperability testing with cisco/juniper/etc.
Changes since RFCv3:
* Implement TCP_AUTHOPT handling for timewait and reset replies. Write
tests to execute these paths by injecting packets with scapy
* Handle combining md5 and authopt: if both are configured use authopt.
* Fix locking issues around send_key, introduced in on of the later
patches.
* Handle IPv4-mapped-IPv6 addresses: it used to be that an ipv4 SYN sent
to an ipv6 socket with TCP-AO triggered WARN
* Implement un-namespaced sysctl disabled this feature by default
* Allocate new key before removing any old one in setsockopt (Dmitry)
* Remove tcp_authopt_key_info.local_id because it's no longer used (Dmitry)
* Propagate errors from TCP_AUTHOPT getsockopt (Dmitry)
* Fix no-longer-correct TCP_AUTHOPT_KEY_DEL docs (Dmitry)
* Simplify crypto allocation (Eric)
* Use kzmalloc instead of __GFP_ZERO (Eric)
* Add static_key_false tcp_authopt_needed (Eric)
* Clear authopt_info copied from oldsk in __tcp_authopt_openreq (Eric)
* Replace memcmp in ipv4 and ipv6 addr comparisons (Eric)
* Export symbols for CONFIG_IPV6=m (kernel test robot)
* Mark more functions static (kernel test robot)
* Fix build with CONFIG_PROVE_RCU_LIST=y (kernel test robot)
Link: https://lore.kernel.org/netdev/cover.1629840814.git.cdleonard@gmail.com/
Changes since RFCv2:
* Removed local_id from ABI and match on send_id/recv_id/addr
* Add all relevant out-of-tree tests to tools/testing/selftests
* Return an error instead of ignoring unknown flags, hopefully this makes
it easier to extend.
* Check sk_family before __tcp_authopt_info_get_or_create in tcp_set_authopt_key
* Use sock_owned_by_me instead of WARN_ON(!lockdep_sock_is_held(sk))
* Fix some intermediate build failures reported by kbuild robot
* Improve documentation
Link: https://lore.kernel.org/netdev/cover.1628544649.git.cdleonard@gmail.com/
Changes since RFC:
* Split into per-topic commits for ease of review. The intermediate
commits compile with a few "unused function" warnings and don't do
anything useful by themselves.
* Add ABI documention including kernel-doc on uapi
* Fix lockdep warnings from crypto by creating pools with one shash for
each cpu
* Accept short options to setsockopt by padding with zeros; this
approach allows increasing the size of the structs in the future.
* Support for aes-128-cmac-96
* Support for binding addresses to keys in a way similar to old tcp_md5
* Add support for retrieving received keyid/rnextkeyid and controling
the keyid/rnextkeyid being sent.
Link: https://lore.kernel.org/netdev/01383a8751e97ef826ef2adf93bfde3a08195a43.162…
Leonard Crestez (19):
tcp: authopt: Initial support and key management
docs: Add user documentation for tcp_authopt
selftests: Initial tcp_authopt test module
selftests: tcp_authopt: Initial sockopt manipulation
tcp: authopt: Add crypto initialization
tcp: authopt: Compute packet signatures
tcp: authopt: Hook into tcp core
tcp: authopt: Disable via sysctl by default
selftests: tcp_authopt: Test key address binding
tcp: ipv6: Add AO signing for tcp_v6_send_response
tcp: authopt: Add support for signing skb-less replies
tcp: ipv4: Add AO signing for skb-less replies
selftests: tcp_authopt: Add scapy-based packet signing code
selftests: tcp_authopt: Add packet-level tests
selftests: Initial tcp_authopt support for nettest
selftests: Initial tcp_authopt support for fcnal-test
selftests: Add -t tcp_authopt option for fcnal-test.sh
tcp: authopt: Add key selection controls
selftests: tcp_authopt: Add tests for rollover
Documentation/networking/index.rst | 1 +
Documentation/networking/ip-sysctl.rst | 6 +
Documentation/networking/tcp_authopt.rst | 69 +
include/linux/tcp.h | 9 +
include/net/tcp.h | 1 +
include/net/tcp_authopt.h | 200 +++
include/uapi/linux/snmp.h | 1 +
include/uapi/linux/tcp.h | 110 ++
net/ipv4/Kconfig | 14 +
net/ipv4/Makefile | 1 +
net/ipv4/proc.c | 1 +
net/ipv4/sysctl_net_ipv4.c | 10 +
net/ipv4/tcp.c | 30 +
net/ipv4/tcp_authopt.c | 1450 +++++++++++++++++
net/ipv4/tcp_input.c | 17 +
net/ipv4/tcp_ipv4.c | 101 +-
net/ipv4/tcp_minisocks.c | 12 +
net/ipv4/tcp_output.c | 80 +-
net/ipv6/tcp_ipv6.c | 56 +-
tools/testing/selftests/net/fcnal-test.sh | 34 +
tools/testing/selftests/net/nettest.c | 34 +-
tools/testing/selftests/tcp_authopt/Makefile | 5 +
.../testing/selftests/tcp_authopt/README.rst | 15 +
tools/testing/selftests/tcp_authopt/config | 6 +
.../selftests/tcp_authopt/requirements.txt | 40 +
tools/testing/selftests/tcp_authopt/run.sh | 15 +
tools/testing/selftests/tcp_authopt/setup.cfg | 17 +
tools/testing/selftests/tcp_authopt/setup.py | 5 +
.../tcp_authopt/tcp_authopt_test/__init__.py | 0
.../tcp_authopt/tcp_authopt_test/conftest.py | 41 +
.../full_tcp_sniff_session.py | 81 +
.../tcp_authopt_test/linux_tcp_authopt.py | 248 +++
.../tcp_authopt_test/linux_tcp_md5sig.py | 95 ++
.../tcp_authopt_test/netns_fixture.py | 83 +
.../tcp_authopt_test/scapy_conntrack.py | 150 ++
.../tcp_authopt_test/scapy_tcp_authopt.py | 211 +++
.../tcp_authopt_test/scapy_utils.py | 176 ++
.../tcp_authopt/tcp_authopt_test/server.py | 95 ++
.../tcp_authopt/tcp_authopt_test/sockaddr.py | 112 ++
.../tcp_connection_fixture.py | 269 +++
.../tcp_authopt/tcp_authopt_test/test_bind.py | 145 ++
.../tcp_authopt_test/test_rollover.py | 180 ++
.../tcp_authopt_test/test_sockopt.py | 185 +++
.../tcp_authopt_test/test_vectors.py | 359 ++++
.../tcp_authopt_test/test_verify_capture.py | 555 +++++++
.../tcp_authopt/tcp_authopt_test/utils.py | 102 ++
.../tcp_authopt/tcp_authopt_test/validator.py | 127 ++
47 files changed, 5544 insertions(+), 10 deletions(-)
create mode 100644 Documentation/networking/tcp_authopt.rst
create mode 100644 include/net/tcp_authopt.h
create mode 100644 net/ipv4/tcp_authopt.c
create mode 100644 tools/testing/selftests/tcp_authopt/Makefile
create mode 100644 tools/testing/selftests/tcp_authopt/README.rst
create mode 100644 tools/testing/selftests/tcp_authopt/config
create mode 100644 tools/testing/selftests/tcp_authopt/requirements.txt
create mode 100755 tools/testing/selftests/tcp_authopt/run.sh
create mode 100644 tools/testing/selftests/tcp_authopt/setup.cfg
create mode 100644 tools/testing/selftests/tcp_authopt/setup.py
create mode 100644 tools/testing/selftests/tcp_authopt/tcp_authopt_test/__init__.py
create mode 100644 tools/testing/selftests/tcp_authopt/tcp_authopt_test/conftest.py
create mode 100644 tools/testing/selftests/tcp_authopt/tcp_authopt_test/full_tcp_sniff_session.py
create mode 100644 tools/testing/selftests/tcp_authopt/tcp_authopt_test/linux_tcp_authopt.py
create mode 100644 tools/testing/selftests/tcp_authopt/tcp_authopt_test/linux_tcp_md5sig.py
create mode 100644 tools/testing/selftests/tcp_authopt/tcp_authopt_test/netns_fixture.py
create mode 100644 tools/testing/selftests/tcp_authopt/tcp_authopt_test/scapy_conntrack.py
create mode 100644 tools/testing/selftests/tcp_authopt/tcp_authopt_test/scapy_tcp_authopt.py
create mode 100644 tools/testing/selftests/tcp_authopt/tcp_authopt_test/scapy_utils.py
create mode 100644 tools/testing/selftests/tcp_authopt/tcp_authopt_test/server.py
create mode 100644 tools/testing/selftests/tcp_authopt/tcp_authopt_test/sockaddr.py
create mode 100644 tools/testing/selftests/tcp_authopt/tcp_authopt_test/tcp_connection_fixture.py
create mode 100644 tools/testing/selftests/tcp_authopt/tcp_authopt_test/test_bind.py
create mode 100644 tools/testing/selftests/tcp_authopt/tcp_authopt_test/test_rollover.py
create mode 100644 tools/testing/selftests/tcp_authopt/tcp_authopt_test/test_sockopt.py
create mode 100644 tools/testing/selftests/tcp_authopt/tcp_authopt_test/test_vectors.py
create mode 100644 tools/testing/selftests/tcp_authopt/tcp_authopt_test/test_verify_capture.py
create mode 100644 tools/testing/selftests/tcp_authopt/tcp_authopt_test/utils.py
create mode 100644 tools/testing/selftests/tcp_authopt/tcp_authopt_test/validator.py
base-commit: 07b855628c226511542d0911cba1b180541fbb84
--
2.25.1
Subject: Introduction: I am a Linux and open source software enthusiast
Greetings from Singapore,
My name is Mr. Turritopsis Dohrnii Teo En Ming, 43 years old as of 25
September 2021. My country is Singapore. Presently I am an IT
Consultant with a System Integrator (SI)/computer firm in Singapore. I
am also a Linux and open source software and information technology
enthusiast.
You can read my autobiography on my redundant blogs. The title of my
autobiography is:
"Autobiography of Singaporean Targeted Individual Mr. Turritopsis
Dohrnii Teo En Ming (Very First Draft, Lots More to Add in Future)"
Links to my redundant blogs (Blogger and Wordpress) can be found in my
email signature below. These are my main blogs.
I have three other redundant blogs, namely:
https://teo-en-ming.tumblr.com/https://teo-en-ming.medium.com/https://teo-en-ming.livejournal.com/
Future/subsequent versions of my autobiography will be published on my
redundant blogs.
My Blog Books (in PDF format) are also available for download on my
redundant blogs.
I have also published many guides, howtos, tutorials, and information
technology articles on my redundant blogs.
Thank you very much.
-----BEGIN EMAIL SIGNATURE-----
The Gospel for all Targeted Individuals (TIs):
[The New York Times] Microwave Weapons Are Prime Suspect in Ills of
U.S. Embassy Workers
Link:
https://www.nytimes.com/2018/09/01/science/sonic-attack-cuba-microwave.html
********************************************************************************************
Singaporean Targeted Individual Mr. Turritopsis Dohrnii Teo En Ming's
Academic Qualifications as at 14 Feb 2019 and refugee seeking attempts
at the United Nations Refugee Agency Bangkok (21 Mar 2017), in Taiwan
(5 Aug 2019) and Australia (25 Dec 2019 to 9 Jan 2020):
[1] https://tdtemcerts.wordpress.com/
[2] https://tdtemcerts.blogspot.sg/
[3] https://www.scribd.com/user/270125049/Teo-En-Ming
-----END EMAIL SIGNATURE-----
Currently, the test decides whether or not to test certain features
(e.g., writeprotect support) essentially by examining command-line
arguments. For example, if we're testing anonymous memory, then we
should test writeprotect support as well (since it generally is
supported for anonymous).
This is broken, however. Take writeprotect support as an example: sure
it's supported for anon, but it also requires that we have
CONFIG_HAVE_ARCH_USERFAULTFD_WP. I.e., it is not supported at all on
aarch64. So, running the test on such an arch fails: it tries to test
writeprotect for anon, but since it isn't *actually* supported, it
fails.
So, instead of checking command-line arguments to the test, check the
features the way the UFFD API intends: when we open a new userfaultfd,
pass in the feature(s) this test case would like to try to exercise. The
kernel reports back a subset of those features which are actually
supported: check these returned flags to see if the features are
*actually* supported.
(For a couple of cases, where *registration* would fail [with -EINVAL]
even though UFFDIO_API reports the feature as supported, we have to
check test_type as well as the feature flag.)
In some cases, we check immediately after opening the userfaultfd, and
if the features are missing, we skip the entire test. In some other
cases, we can proceed with "most" of the test, only skipping a few
pieces.
This lets us remove the global test_uffdio_wp and test_uffdio_minor
variables entirely.
Signed-off-by: Axel Rasmussen <axelrasmussen(a)google.com>
---
tools/testing/selftests/vm/userfaultfd.c | 94 +++++++++++-------------
1 file changed, 43 insertions(+), 51 deletions(-)
diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c
index 10ab56c2484a..2366caf90435 100644
--- a/tools/testing/selftests/vm/userfaultfd.c
+++ b/tools/testing/selftests/vm/userfaultfd.c
@@ -79,10 +79,6 @@ static int test_type;
#define ALARM_INTERVAL_SECS 10
static volatile bool test_uffdio_copy_eexist = true;
static volatile bool test_uffdio_zeropage_eexist = true;
-/* Whether to test uffd write-protection */
-static bool test_uffdio_wp = false;
-/* Whether to test uffd minor faults */
-static bool test_uffdio_minor = false;
static bool map_shared;
static int shm_fd;
@@ -90,6 +86,7 @@ static int huge_fd;
static char *huge_fd_off0;
static unsigned long long *count_verify;
static int uffd = -1;
+static uint64_t uffd_features;
static int uffd_flags, finished, *pipefd;
static char *area_src, *area_src_alias, *area_dst, *area_dst_alias;
static char *zeropage;
@@ -345,7 +342,7 @@ static struct uffd_test_ops hugetlb_uffd_test_ops = {
static struct uffd_test_ops *uffd_test_ops;
-static void userfaultfd_open(uint64_t *features)
+static void userfaultfd_open(uint64_t features)
{
struct uffdio_api uffdio_api;
@@ -355,14 +352,20 @@ static void userfaultfd_open(uint64_t *features)
uffd_flags = fcntl(uffd, F_GETFD, NULL);
uffdio_api.api = UFFD_API;
- uffdio_api.features = *features;
+ uffdio_api.features = features;
if (ioctl(uffd, UFFDIO_API, &uffdio_api))
err("UFFDIO_API failed.\nPlease make sure to "
"run with either root or ptrace capability.");
if (uffdio_api.api != UFFD_API)
err("UFFDIO_API error: %" PRIu64, (uint64_t)uffdio_api.api);
- *features = uffdio_api.features;
+ uffd_features = uffdio_api.features;
+}
+
+static inline bool uffd_wp_supported(void)
+{
+ return test_type == TEST_ANON &&
+ (uffd_features & UFFD_FEATURE_PAGEFAULT_FLAG_WP);
}
static inline void munmap_area(void **area)
@@ -397,6 +400,7 @@ static void uffd_test_ctx_clear(void)
err("close uffd");
uffd = -1;
}
+ uffd_features = 0;
huge_fd_off0 = NULL;
munmap_area((void **)&area_src);
@@ -405,7 +409,7 @@ static void uffd_test_ctx_clear(void)
munmap_area((void **)&area_dst_alias);
}
-static void uffd_test_ctx_init_ext(uint64_t *features)
+static void uffd_test_ctx_init(uint64_t features)
{
unsigned long nr, cpu;
@@ -445,11 +449,6 @@ static void uffd_test_ctx_init_ext(uint64_t *features)
err("pipe");
}
-static inline void uffd_test_ctx_init(uint64_t features)
-{
- uffd_test_ctx_init_ext(&features);
-}
-
static int my_bcmp(char *str1, char *str2, size_t n)
{
unsigned long i;
@@ -587,7 +586,7 @@ static int __copy_page(int ufd, unsigned long offset, bool retry)
uffdio_copy.dst = (unsigned long) area_dst + offset;
uffdio_copy.src = (unsigned long) area_src + offset;
uffdio_copy.len = page_size;
- if (test_uffdio_wp)
+ if (uffd_wp_supported())
uffdio_copy.mode = UFFDIO_COPY_MODE_WP;
else
uffdio_copy.mode = 0;
@@ -778,7 +777,7 @@ static void *background_thread(void *arg)
* at least the first half of the pages mapped already which
* can be write-protected for testing
*/
- if (test_uffdio_wp)
+ if (uffd_wp_supported())
wp_range(uffd, (unsigned long)area_dst + start_nr * page_size,
nr_pages_per_cpu * page_size, true);
@@ -1062,12 +1061,12 @@ static int userfaultfd_zeropage_test(void)
printf("testing UFFDIO_ZEROPAGE: ");
fflush(stdout);
- uffd_test_ctx_init(0);
+ uffd_test_ctx_init(UFFD_FEATURE_PAGEFAULT_FLAG_WP);
uffdio_register.range.start = (unsigned long) area_dst;
uffdio_register.range.len = nr_pages * page_size;
uffdio_register.mode = UFFDIO_REGISTER_MODE_MISSING;
- if (test_uffdio_wp)
+ if (uffd_wp_supported())
uffdio_register.mode |= UFFDIO_REGISTER_MODE_WP;
if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register))
err("register failure");
@@ -1089,7 +1088,7 @@ static int userfaultfd_events_test(void)
struct uffdio_register uffdio_register;
unsigned long expected_ioctls;
pthread_t uffd_mon;
- int err, features;
+ int err;
pid_t pid;
char c;
struct uffd_stats stats = { 0 };
@@ -1097,16 +1096,15 @@ static int userfaultfd_events_test(void)
printf("testing events (fork, remap, remove): ");
fflush(stdout);
- features = UFFD_FEATURE_EVENT_FORK | UFFD_FEATURE_EVENT_REMAP |
- UFFD_FEATURE_EVENT_REMOVE;
- uffd_test_ctx_init(features);
+ uffd_test_ctx_init(UFFD_FEATURE_EVENT_FORK | UFFD_FEATURE_EVENT_REMAP |
+ UFFD_FEATURE_EVENT_REMOVE | UFFD_FEATURE_PAGEFAULT_FLAG_WP);
fcntl(uffd, F_SETFL, uffd_flags | O_NONBLOCK);
uffdio_register.range.start = (unsigned long) area_dst;
uffdio_register.range.len = nr_pages * page_size;
uffdio_register.mode = UFFDIO_REGISTER_MODE_MISSING;
- if (test_uffdio_wp)
+ if (uffd_wp_supported())
uffdio_register.mode |= UFFDIO_REGISTER_MODE_WP;
if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register))
err("register failure");
@@ -1144,7 +1142,7 @@ static int userfaultfd_sig_test(void)
unsigned long expected_ioctls;
unsigned long userfaults;
pthread_t uffd_mon;
- int err, features;
+ int err;
pid_t pid;
char c;
struct uffd_stats stats = { 0 };
@@ -1152,15 +1150,15 @@ static int userfaultfd_sig_test(void)
printf("testing signal delivery: ");
fflush(stdout);
- features = UFFD_FEATURE_EVENT_FORK|UFFD_FEATURE_SIGBUS;
- uffd_test_ctx_init(features);
+ uffd_test_ctx_init(UFFD_FEATURE_EVENT_FORK | UFFD_FEATURE_SIGBUS |
+ UFFD_FEATURE_PAGEFAULT_FLAG_WP);
fcntl(uffd, F_SETFL, uffd_flags | O_NONBLOCK);
uffdio_register.range.start = (unsigned long) area_dst;
uffdio_register.range.len = nr_pages * page_size;
uffdio_register.mode = UFFDIO_REGISTER_MODE_MISSING;
- if (test_uffdio_wp)
+ if (uffd_wp_supported())
uffdio_register.mode |= UFFDIO_REGISTER_MODE_WP;
if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register))
err("register failure");
@@ -1209,25 +1207,23 @@ static int userfaultfd_minor_test(void)
void *expected_page;
char c;
struct uffd_stats stats = { 0 };
- uint64_t req_features, features_out;
-
- if (!test_uffdio_minor)
- return 0;
+ uint64_t features;
printf("testing minor faults: ");
fflush(stdout);
- if (test_type == TEST_HUGETLB)
- req_features = UFFD_FEATURE_MINOR_HUGETLBFS;
+ if (test_type == TEST_HUGETLB && map_shared)
+ features = UFFD_FEATURE_MINOR_HUGETLBFS;
else if (test_type == TEST_SHMEM)
- req_features = UFFD_FEATURE_MINOR_SHMEM;
- else
- return 1;
+ features = UFFD_FEATURE_MINOR_SHMEM;
+ else {
+ printf("skipping test due to unsupported memory type\n");
+ return 0;
+ }
- features_out = req_features;
- uffd_test_ctx_init_ext(&features_out);
+ uffd_test_ctx_init(features);
/* If kernel reports required features aren't supported, skip test. */
- if ((features_out & req_features) != req_features) {
+ if ((uffd_features & features) != features) {
printf("skipping test due to lack of feature support\n");
fflush(stdout);
return 0;
@@ -1349,10 +1345,6 @@ static void userfaultfd_pagemap_test(unsigned int test_pgsize)
int pagemap_fd;
uint64_t value;
- /* Pagemap tests uffd-wp only */
- if (!test_uffdio_wp)
- return;
-
/* Not enough memory to test this page size */
if (test_pgsize > nr_pages * page_size)
return;
@@ -1361,7 +1353,12 @@ static void userfaultfd_pagemap_test(unsigned int test_pgsize)
/* Flush so it doesn't flush twice in parent/child later */
fflush(stdout);
- uffd_test_ctx_init(0);
+ uffd_test_ctx_init(UFFD_FEATURE_PAGEFAULT_FLAG_WP);
+ /* Pagemap tests uffd-wp only */
+ if (!uffd_wp_supported()) {
+ printf("skipping test due to lack of feature support\n");
+ return;
+ }
if (test_pgsize > page_size) {
/* This is a thp test */
@@ -1426,7 +1423,7 @@ static int userfaultfd_stress(void)
struct uffdio_register uffdio_register;
struct uffd_stats uffd_stats[nr_cpus];
- uffd_test_ctx_init(0);
+ uffd_test_ctx_init(UFFD_FEATURE_PAGEFAULT_FLAG_WP);
if (posix_memalign(&area, page_size, page_size))
err("out of memory");
@@ -1464,7 +1461,7 @@ static int userfaultfd_stress(void)
uffdio_register.range.start = (unsigned long) area_dst;
uffdio_register.range.len = nr_pages * page_size;
uffdio_register.mode = UFFDIO_REGISTER_MODE_MISSING;
- if (test_uffdio_wp)
+ if (uffd_wp_supported())
uffdio_register.mode |= UFFDIO_REGISTER_MODE_WP;
if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register))
err("register failure");
@@ -1513,7 +1510,7 @@ static int userfaultfd_stress(void)
return 1;
/* Clear all the write protections if there is any */
- if (test_uffdio_wp)
+ if (uffd_wp_supported())
wp_range(uffd, (unsigned long)area_dst,
nr_pages * page_size, false);
@@ -1595,8 +1592,6 @@ static void set_test_type(const char *type)
if (!strcmp(type, "anon")) {
test_type = TEST_ANON;
uffd_test_ops = &anon_uffd_test_ops;
- /* Only enable write-protect test for anonymous test */
- test_uffdio_wp = true;
} else if (!strcmp(type, "hugetlb")) {
test_type = TEST_HUGETLB;
uffd_test_ops = &hugetlb_uffd_test_ops;
@@ -1604,13 +1599,10 @@ static void set_test_type(const char *type)
map_shared = true;
test_type = TEST_HUGETLB;
uffd_test_ops = &hugetlb_uffd_test_ops;
- /* Minor faults require shared hugetlb; only enable here. */
- test_uffdio_minor = true;
} else if (!strcmp(type, "shmem")) {
map_shared = true;
test_type = TEST_SHMEM;
uffd_test_ops = &shmem_uffd_test_ops;
- test_uffdio_minor = true;
} else {
err("Unknown test type: %s", type);
}
--
2.33.0.464.g1972c5931b-goog