Commit 7122debb4367 ("kunit: introduce
kunit_kmalloc_array/kunit_kcalloc() helpers") added new functions but
called last arg `flags`, unlike the existing code that used `gfp`.
This only is an issue in test.h, test.c still used `gfp`.
But the documentation was copy-pasted with the old names, leading to
kernel-doc warnings.
Do s/flags/gfp to make the names consistent and fix the warnings.
Fixes: 7122debb4367 ("kunit: introduce kunit_kmalloc_array/kunit_kcalloc() helpers")
Reported-by: Randy Dunlap <rdunlap(a)infradead.org>
Signed-off-by: Daniel Latypov <dlatypov(a)google.com>
---
include/kunit/test.h | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/include/kunit/test.h b/include/kunit/test.h
index 24b40e5c160b..018e776a34b9 100644
--- a/include/kunit/test.h
+++ b/include/kunit/test.h
@@ -613,7 +613,7 @@ void kunit_remove_resource(struct kunit *test, struct kunit_resource *res);
* and is automatically cleaned up after the test case concludes. See &struct
* kunit_resource for more information.
*/
-void *kunit_kmalloc_array(struct kunit *test, size_t n, size_t size, gfp_t flags);
+void *kunit_kmalloc_array(struct kunit *test, size_t n, size_t size, gfp_t gfp);
/**
* kunit_kmalloc() - Like kmalloc() except the allocation is *test managed*.
@@ -657,9 +657,9 @@ static inline void *kunit_kzalloc(struct kunit *test, size_t size, gfp_t gfp)
*
* See kcalloc() and kunit_kmalloc_array() for more information.
*/
-static inline void *kunit_kcalloc(struct kunit *test, size_t n, size_t size, gfp_t flags)
+static inline void *kunit_kcalloc(struct kunit *test, size_t n, size_t size, gfp_t gfp)
{
- return kunit_kmalloc_array(test, n, size, flags | __GFP_ZERO);
+ return kunit_kmalloc_array(test, n, size, gfp | __GFP_ZERO);
}
void kunit_cleanup(struct kunit *test);
base-commit: 3b29021ddd10cfb6b2565c623595bd3b02036f33
--
2.33.0.882.g93a45727a2-goog
Currently, `run_kernel()` dumps all the kernel output to a file
(.kunit/test.log) and then opens the file and yields it to callers.
This made it easier to respect the requested timeout, if any.
But it means that we can't yield the results in real time, either to the
parser or to stdout (if --raw_output is set).
This change spins up a background thread to enforce the timeout, which
allows us to yield the kernel output in real time, while also copying it
to the .kunit/test.log file.
It's also careful to ensure that the .kunit/test.log file is complete,
even in the kunit_parser throws an exception/otherwise doesn't consume
every line, see the new `finally` block and unit test.
For example:
$ ./tools/testing/kunit/kunit.py run --arch=x86_64 --raw_output
<configure + build steps>
...
<can now see output from QEMU in real time>
This does not currently have a visible effect when --raw_output is not
passed, as kunit_parser.py currently only outputs everything at the end.
But that could change, and this patch is a necessary step towards
showing parsed test results in real time.
Signed-off-by: Daniel Latypov <dlatypov(a)google.com>
---
v1 -> v2: restore stdin=subprocess.PIPE to avoid a UML (?) bug.
https://lore.kernel.org/linux-kselftest/CABVgOSnXBtCHEMDYYqrbXWvKcrSbY9BXP4…
---
tools/testing/kunit/kunit_kernel.py | 75 +++++++++++++++-----------
tools/testing/kunit/kunit_tool_test.py | 17 ++++++
2 files changed, 62 insertions(+), 30 deletions(-)
diff --git a/tools/testing/kunit/kunit_kernel.py b/tools/testing/kunit/kunit_kernel.py
index 2c6f916ccbaf..450f1c386e01 100644
--- a/tools/testing/kunit/kunit_kernel.py
+++ b/tools/testing/kunit/kunit_kernel.py
@@ -12,7 +12,8 @@ import subprocess
import os
import shutil
import signal
-from typing import Iterator, Optional, Tuple
+import threading
+from typing import Iterator, List, Optional, Tuple
from contextlib import ExitStack
@@ -103,8 +104,8 @@ class LinuxSourceTreeOperations(object):
if stderr: # likely only due to build warnings
print(stderr.decode())
- def run(self, params, timeout, build_dir, outfile) -> None:
- pass
+ def start(self, params: List[str], build_dir: str) -> subprocess.Popen:
+ raise RuntimeError('not implemented!')
class LinuxSourceTreeOperationsQemu(LinuxSourceTreeOperations):
@@ -123,7 +124,7 @@ class LinuxSourceTreeOperationsQemu(LinuxSourceTreeOperations):
kconfig.parse_from_string(self._kconfig)
base_kunitconfig.merge_in_entries(kconfig)
- def run(self, params, timeout, build_dir, outfile):
+ def start(self, params: List[str], build_dir: str) -> subprocess.Popen:
kernel_path = os.path.join(build_dir, self._kernel_path)
qemu_command = ['qemu-system-' + self._qemu_arch,
'-nodefaults',
@@ -134,18 +135,11 @@ class LinuxSourceTreeOperationsQemu(LinuxSourceTreeOperations):
'-nographic',
'-serial stdio'] + self._extra_qemu_params
print('Running tests with:\n$', ' '.join(qemu_command))
- with open(outfile, 'w') as output:
- process = subprocess.Popen(' '.join(qemu_command),
- stdin=subprocess.PIPE,
- stdout=output,
- stderr=subprocess.STDOUT,
- text=True, shell=True)
- try:
- process.wait(timeout=timeout)
- except Exception as e:
- print(e)
- process.terminate()
- return process
+ return subprocess.Popen(' '.join(qemu_command),
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ text=True, shell=True)
class LinuxSourceTreeOperationsUml(LinuxSourceTreeOperations):
"""An abstraction over command line operations performed on a source tree."""
@@ -175,17 +169,14 @@ class LinuxSourceTreeOperationsUml(LinuxSourceTreeOperations):
kunit_parser.print_with_timestamp(
'Starting Kernel with all configs takes a few minutes...')
- def run(self, params, timeout, build_dir, outfile):
+ def start(self, params: List[str], build_dir: str) -> subprocess.Popen:
"""Runs the Linux UML binary. Must be named 'linux'."""
linux_bin = get_file_path(build_dir, 'linux')
- outfile = get_outfile_path(build_dir)
- with open(outfile, 'w') as output:
- process = subprocess.Popen([linux_bin] + params,
- stdin=subprocess.PIPE,
- stdout=output,
- stderr=subprocess.STDOUT,
- text=True)
- process.wait(timeout)
+ return subprocess.Popen([linux_bin] + params,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ text=True)
def get_kconfig_path(build_dir) -> str:
return get_file_path(build_dir, KCONFIG_PATH)
@@ -330,12 +321,36 @@ class LinuxSourceTree(object):
args.extend(['mem=1G', 'console=tty', 'kunit_shutdown=halt'])
if filter_glob:
args.append('kunit.filter_glob='+filter_glob)
- outfile = get_outfile_path(build_dir)
- self._ops.run(args, timeout, build_dir, outfile)
- subprocess.call(['stty', 'sane'])
- with open(outfile, 'r') as file:
- for line in file:
+
+ process = self._ops.start(args, build_dir)
+ assert process.stdout is not None # tell mypy it's set
+
+ # Enforce the timeout in a background thread.
+ def _wait_proc():
+ try:
+ process.wait(timeout=timeout)
+ except Exception as e:
+ print(e)
+ process.terminate()
+ process.wait()
+ waiter = threading.Thread(target=_wait_proc)
+ waiter.start()
+
+ output = open(get_outfile_path(build_dir), 'w')
+ try:
+ # Tee the output to the file and to our caller in real time.
+ for line in process.stdout:
+ output.write(line)
yield line
+ # This runs even if our caller doesn't consume every line.
+ finally:
+ # Flush any leftover output to the file
+ output.write(process.stdout.read())
+ output.close()
+ process.stdout.close()
+
+ waiter.join()
+ subprocess.call(['stty', 'sane'])
def signal_handler(self, sig, frame) -> None:
logging.error('Build interruption occurred. Cleaning console.')
diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py
index 619c4554cbff..f9a7398a9584 100755
--- a/tools/testing/kunit/kunit_tool_test.py
+++ b/tools/testing/kunit/kunit_tool_test.py
@@ -14,6 +14,7 @@ import tempfile, shutil # Handling test_tmpdir
import itertools
import json
import signal
+import subprocess
import os
import kunit_config
@@ -291,6 +292,22 @@ class LinuxSourceTreeTest(unittest.TestCase):
pass
tree = kunit_kernel.LinuxSourceTree('', kunitconfig_path=dir)
+ def test_run_kernel_hits_exception(self):
+ def fake_start(unused_args, unused_build_dir):
+ return subprocess.Popen(['echo "hi\nbye"'], shell=True, text=True, stdout=subprocess.PIPE)
+
+ with tempfile.TemporaryDirectory('') as build_dir:
+ tree = kunit_kernel.LinuxSourceTree(build_dir, load_config=False)
+ mock.patch.object(tree._ops, 'start', side_effect=fake_start).start()
+
+ with self.assertRaises(ValueError):
+ for line in tree.run_kernel(build_dir=build_dir):
+ self.assertEqual(line, 'hi\n')
+ raise ValueError('uh oh, did not read all output')
+
+ with open(kunit_kernel.get_outfile_path(build_dir), 'rt') as outfile:
+ self.assertEqual(outfile.read(), 'hi\nbye\n', msg='Missing some output')
+
# TODO: add more test cases.
base-commit: 3b29021ddd10cfb6b2565c623595bd3b02036f33
--
2.33.0.800.g4c38ced690-goog
Allow running each suite or each test case alone per kernel boot.
The motivation for this is to debug "test hermeticity" issues.
This new --run_isolated flag would be a good first step to try and
narrow down root causes.
Context: sometimes tests pass/fail depending on what ran before them.
Memory corruption errors in particular might only cause noticeable
issues later on. But you can also have the opposite, where "fixing" one
test causes another to start failing.
Usage:
$ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite
$ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=test
$ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=test example
The last one would provide output like
======== [PASSED] example ========
[PASSED] example_simple_test
============================================================
Testing complete. 1 tests run. 0 failed. 0 crashed. 0 skipped.
Starting KUnit Kernel (2/3)...
============================================================
======== [SKIPPED] example ========
[SKIPPED] example_skip_test # SKIP this test should be skipped
============================================================
Testing complete. 1 tests run. 0 failed. 0 crashed. 1 skipped.
Starting KUnit Kernel (3/3)...
============================================================
======== [SKIPPED] example ========
[SKIPPED] example_mark_skipped_test # SKIP this test should be skipped
============================================================
Testing complete. 1 tests run. 0 failed. 0 crashed. 1 skipped.
See the last patch's description for a bit more detail.
Meta:
The first patch is from another series with just a reworded commit
message, https://lore.kernel.org/linux-kselftest/20210805235145.2528054-2-dlatypov@g…
This patch series is based on Shuah's kunit branch:
https://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux-kselftest.git/?…
Changes:
v1 -> v2: rebase onto Shuah's kunit branch, fix missing code in patch 1.
v2 -> v3: fix mypy errors, drop test plan from output, fix pre-existing
bug where kunit was not actually tracking test execution time (new patch 3).
v3 -> v4: attempt to filter out non-KUnit dmesg output when getting list
of test names, using this regex: ^[^\s.]+\.[^\s.]+$
Daniel Latypov (4):
kunit: add 'kunit.action' param to allow listing out tests
kunit: tool: factor exec + parse steps into a function
kunit: tool: actually track how long it took to run tests
kunit: tool: support running each suite/test separately
lib/kunit/executor.c | 45 ++++++++-
tools/testing/kunit/kunit.py | 134 +++++++++++++++++--------
tools/testing/kunit/kunit_tool_test.py | 40 ++++++++
3 files changed, 173 insertions(+), 46 deletions(-)
base-commit: 3b29021ddd10cfb6b2565c623595bd3b02036f33
--
2.33.0.800.g4c38ced690-goog
From: Rae Moar <rmoar(a)google.com>
Update to kunit_parser to improve compatibility with KTAP
specification including arbitrarily nested tests. Patch accomplishes
three major changes:
- Use a general Test object to represent all tests rather than TestCase
and TestSuite objects. This allows for easier implementation of arbitrary
levels of nested tests and promotes the idea that both test suites and test
cases are tests.
- Print errors incrementally rather than all at once after the
parsing finishes to maximize information given to the user in the
case of the parser given invalid input and to increase the helpfulness
of the timestamps given during printing. Note that kunit.py parse does
not print incrementally yet. However, this fix brings us closer to
this feature.
- Increase compatibility for different formats of input. Arbitrary levels
of nested tests supported. Also, test cases and test suites are now
supported to be present on the same level of testing.
This patch now implements the KTAP specification as described here:
https://lore.kernel.org/linux-kselftest/CA+GJov6tdjvY9x12JsJT14qn6c7NViJxqa….
This patch adjusts the kunit_tool_test.py file to check for
the correct outputs from the new parser and adds a new test to check
the parsing for a KTAP result log with correct format for multiple nested
subtests (test_is_test_passed-all_passed_nested.log).
This patch also alters the kunit_json.py file to allow for arbitrarily
nested tests.
Signed-off-by: Rae Moar <rmoar(a)google.com>
Reviewed-by: Brendan Higgins <brendanhiggins(a)google.com>
---
NOTE: this patch is now applied on top of
https://lore.kernel.org/linux-kselftest/20210930222048.1692635-5-dlatypov@g…
to resolve a conflict.
Change log from v3,4:
https://lore.kernel.org/linux-kselftest/20210901190623.315736-1-rmoar@googl…
- Move test_kselftest_nested from LinuxSourceTreeTest => KUnitParserTest.
- Resolve conflict with hermetic testing patches.
- max_status is no longer defined, so we need to use the TestCounts
type now. And to keep --raw_output working, we need to set this to
SUCCESS to avoid the default assumption that the kernel crashed.
Ignore v4, was accidentally based on v2.
Change log from v2:
https://lore.kernel.org/linux-kselftest/20210826195505.3066755-1-rmoar@goog…
- Fixes bug of type disagreement in kunit_json.py for build_dir
- Removes raw_output()
- Changes docstrings in kunit_parser.py (class docstring, LineStream
docstrings, add_error(), total(), get_status(), all parsing methods)
- Fixes bug of not printing diagnostic log in the case of end of lines
- Sets default status of all tests to TEST_CRASHED
- Adds and prints empty tests with crashed status in case of missing
tests
- Prints 'subtest' in instance of 1 subtest instead of 'subtests'
- Includes checking for 'BUG:' message in search of crash messages in
log (note that parse_crash_in_log method could be removed but would
require deleting tests in kunit_tool_test.py that include the crash
message that is no longer used. If removed, parser would still print
log in cases of test crashed or failure, which would now include
missing subtests)
- Fixes bug of including directives (other than SKIP) in test name
when matching name in result line for subtests
Change log from v1:
https://lore.kernel.org/linux-kselftest/20210820200032.2178134-1-rmoar@goog…
- Rebase onto kselftest/kunit branch
- Add tests to kunit_tool_test.py to check parser is correctly stripping
hyphen, producing correct json objects with nested tests, correctly
passing kselftest TAP output, and correctly deals with missing test plan.
- Fix bug to correctly match test name in instance of a missing test plan.
- Fix bug in kunit_tool_test.py pointed out by Daniel where it was not
correctly checking for a proper match to the '0 tests run!' error
message. Reverts changes back to original.
- A few minor changes to commit message using Daniel's comments.
- Change docstrings using Daniel's comments to reduce:
- Shortens some docstrings to be one-line or just description if it is
self explanatory.
- Remove explicit respecification of types of parameters and returns
because this is already specified in the function annoations. However,
some descriptions of the parameters and returns remain and some contain
the type for context. Additionally, the types of public attributes of
classes remain.
- Remove any documentation of 'Return: None'
- Remove docstrings of helper methods within other methods
---
tools/testing/kunit/kunit.py | 19 +-
tools/testing/kunit/kunit_json.py | 56 +-
tools/testing/kunit/kunit_parser.py | 1024 ++++++++++++-----
tools/testing/kunit/kunit_tool_test.py | 133 ++-
.../test_is_test_passed-all_passed_nested.log | 34 +
.../test_is_test_passed-kselftest.log | 14 +
.../test_is_test_passed-missing_plan.log | 31 +
.../kunit/test_data/test_strip_hyphen.log | 16 +
8 files changed, 943 insertions(+), 384 deletions(-)
create mode 100644 tools/testing/kunit/test_data/test_is_test_passed-all_passed_nested.log
create mode 100644 tools/testing/kunit/test_data/test_is_test_passed-kselftest.log
create mode 100644 tools/testing/kunit/test_data/test_is_test_passed-missing_plan.log
create mode 100644 tools/testing/kunit/test_data/test_strip_hyphen.log
diff --git a/tools/testing/kunit/kunit.py b/tools/testing/kunit/kunit.py
index 8c7e8c7b2c97..6aef16478efb 100755
--- a/tools/testing/kunit/kunit.py
+++ b/tools/testing/kunit/kunit.py
@@ -136,7 +136,7 @@ def exec_tests(linux: kunit_kernel.LinuxSourceTree, request: KunitExecRequest,
test_glob = request.filter_glob.split('.', maxsplit=2)[1]
filter_globs = [g + '.'+ test_glob for g in filter_globs]
- overall_status = kunit_parser.TestStatus.SUCCESS
+ test_counts = kunit_parser.TestCounts()
exec_time = 0.0
for i, filter_glob in enumerate(filter_globs):
kunit_parser.print_with_timestamp('Starting KUnit Kernel ({}/{})...'.format(i+1, len(filter_globs)))
@@ -155,18 +155,29 @@ def exec_tests(linux: kunit_kernel.LinuxSourceTree, request: KunitExecRequest,
test_end = time.time()
exec_time += test_end - test_start
- overall_status = kunit_parser.max_status(overall_status, result.status)
+ test_counts.add_subtest_counts(result.result.test.counts)
- return KunitResult(status=result.status, result=result.result, elapsed_time=exec_time)
+ kunit_status = _map_to_overall_status(test_counts.get_status())
+ return KunitResult(status=kunit_status, result=result.result, elapsed_time=exec_time)
+
+def _map_to_overall_status(test_status: kunit_parser.TestStatus) -> KunitStatus:
+ if test_status in (kunit_parser.TestStatus.SUCCESS, kunit_parser.TestStatus.SKIPPED):
+ return KunitStatus.SUCCESS
+ else:
+ return KunitStatus.TEST_FAILURE
def parse_tests(request: KunitParseRequest, input_data: Iterable[str]) -> KunitResult:
parse_start = time.time()
test_result = kunit_parser.TestResult(kunit_parser.TestStatus.SUCCESS,
- [],
+ kunit_parser.Test(),
'Tests not Parsed.')
if request.raw_output:
+ # Treat unparsed results as one passing test.
+ test_result.test.status = kunit_parser.TestStatus.SUCCESS
+ test_result.test.counts.passed = 1
+
output: Iterable[str] = input_data
if request.raw_output == 'all':
pass
diff --git a/tools/testing/kunit/kunit_json.py b/tools/testing/kunit/kunit_json.py
index f5cca5c38cac..746bec72b9ac 100644
--- a/tools/testing/kunit/kunit_json.py
+++ b/tools/testing/kunit/kunit_json.py
@@ -11,47 +11,47 @@ import os
import kunit_parser
-from kunit_parser import TestStatus
-
-def get_json_result(test_result, def_config, build_dir, json_path) -> str:
- sub_groups = []
-
- # Each test suite is mapped to a KernelCI sub_group
- for test_suite in test_result.suites:
- sub_group = {
- "name": test_suite.name,
- "arch": "UM",
- "defconfig": def_config,
- "build_environment": build_dir,
- "test_cases": [],
- "lab_name": None,
- "kernel": None,
- "job": None,
- "git_branch": "kselftest",
- }
- test_cases = []
- # TODO: Add attachments attribute in test_case with detailed
- # failure message, see https://api.kernelci.org/schema-test-case.html#get
- for case in test_suite.cases:
- test_case = {"name": case.name, "status": "FAIL"}
- if case.status == TestStatus.SUCCESS:
+from kunit_parser import Test, TestResult, TestStatus
+from typing import Any, Dict, Optional
+
+JsonObj = Dict[str, Any]
+
+def _get_group_json(test: Test, def_config: str,
+ build_dir: Optional[str]) -> JsonObj:
+ sub_groups = [] # List[JsonObj]
+ test_cases = [] # List[JsonObj]
+
+ for subtest in test.subtests:
+ if len(subtest.subtests):
+ sub_group = _get_group_json(subtest, def_config,
+ build_dir)
+ sub_groups.append(sub_group)
+ else:
+ test_case = {"name": subtest.name, "status": "FAIL"}
+ if subtest.status == TestStatus.SUCCESS:
test_case["status"] = "PASS"
- elif case.status == TestStatus.TEST_CRASHED:
+ elif subtest.status == TestStatus.TEST_CRASHED:
test_case["status"] = "ERROR"
test_cases.append(test_case)
- sub_group["test_cases"] = test_cases
- sub_groups.append(sub_group)
+
test_group = {
- "name": "KUnit Test Group",
+ "name": test.name,
"arch": "UM",
"defconfig": def_config,
"build_environment": build_dir,
"sub_groups": sub_groups,
+ "test_cases": test_cases,
"lab_name": None,
"kernel": None,
"job": None,
"git_branch": "kselftest",
}
+ return test_group
+
+def get_json_result(test_result: TestResult, def_config: str,
+ build_dir: Optional[str], json_path: str) -> str:
+ test_group = _get_group_json(test_result.test, def_config, build_dir)
+ test_group["name"] = "KUnit Test Group"
json_obj = json.dumps(test_group, indent=4)
if json_path != 'stdout':
with open(json_path, 'w') as result_path:
diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py
index 6310a641b151..db796580dc5f 100644
--- a/tools/testing/kunit/kunit_parser.py
+++ b/tools/testing/kunit/kunit_parser.py
@@ -1,11 +1,15 @@
# SPDX-License-Identifier: GPL-2.0
#
-# Parses test results from a kernel dmesg log.
+# Parses KTAP test results from a kernel dmesg log and incrementally prints
+# results with reader-friendly format. Stores and returns test results in a
+# Test object.
#
# Copyright (C) 2019, Google LLC.
# Author: Felix Guo <felixguoxiuping(a)gmail.com>
# Author: Brendan Higgins <brendanhiggins(a)google.com>
+# Author: Rae Moar <rmoar(a)google.com>
+from __future__ import annotations
import re
from collections import namedtuple
@@ -14,33 +18,52 @@ from enum import Enum, auto
from functools import reduce
from typing import Iterable, Iterator, List, Optional, Tuple
-TestResult = namedtuple('TestResult', ['status','suites','log'])
-
-class TestSuite(object):
+TestResult = namedtuple('TestResult', ['status','test','log'])
+
+class Test(object):
+ """
+ A class to represent a test parsed from KTAP results. All KTAP
+ results within a test log are stored in a main Test object as
+ subtests.
+
+ Attributes:
+ status : TestStatus - status of the test
+ name : str - name of the test
+ expected_count : int - expected number of subtests (0 if single
+ test case and None if unknown expected number of subtests)
+ subtests : List[Test] - list of subtests
+ log : List[str] - log of KTAP lines that correspond to the test
+ counts : TestCounts - counts of the test statuses and errors of
+ subtests or of the test itself if the test is a single
+ test case.
+ """
def __init__(self) -> None:
- self.status = TestStatus.SUCCESS
- self.name = ''
- self.cases = [] # type: List[TestCase]
-
- def __str__(self) -> str:
- return 'TestSuite(' + str(self.status) + ',' + self.name + ',' + str(self.cases) + ')'
-
- def __repr__(self) -> str:
- return str(self)
-
-class TestCase(object):
- def __init__(self) -> None:
- self.status = TestStatus.SUCCESS
+ """Creates Test object with default attributes."""
+ self.status = TestStatus.TEST_CRASHED
self.name = ''
+ self.expected_count = 0 # type: Optional[int]
+ self.subtests = [] # type: List[Test]
self.log = [] # type: List[str]
+ self.counts = TestCounts()
def __str__(self) -> str:
- return 'TestCase(' + str(self.status) + ',' + self.name + ',' + str(self.log) + ')'
+ """Returns string representation of a Test class object."""
+ return ('Test(' + str(self.status) + ', ' + self.name +
+ ', ' + str(self.expected_count) + ', ' +
+ str(self.subtests) + ', ' + str(self.log) + ', ' +
+ str(self.counts) + ')')
def __repr__(self) -> str:
+ """Returns string representation of a Test class object."""
return str(self)
+ def add_error(self, error_message: str) -> None:
+ """Records an error that occurred while parsing this test."""
+ self.counts.errors += 1
+ print_error('Test ' + self.name + ': ' + error_message)
+
class TestStatus(Enum):
+ """An enumeration class to represent the status of a test."""
SUCCESS = auto()
FAILURE = auto()
SKIPPED = auto()
@@ -48,381 +71,756 @@ class TestStatus(Enum):
NO_TESTS = auto()
FAILURE_TO_PARSE_TESTS = auto()
+class TestCounts:
+ """
+ Tracks the counts of statuses of all test cases and any errors within
+ a Test.
+
+ Attributes:
+ passed : int - the number of tests that have passed
+ failed : int - the number of tests that have failed
+ crashed : int - the number of tests that have crashed
+ skipped : int - the number of tests that have skipped
+ errors : int - the number of errors in the test and subtests
+ """
+ def __init__(self):
+ """Creates TestCounts object with counts of all test
+ statuses and test errors set to 0.
+ """
+ self.passed = 0
+ self.failed = 0
+ self.crashed = 0
+ self.skipped = 0
+ self.errors = 0
+
+ def __str__(self) -> str:
+ """Returns the string representation of a TestCounts object.
+ """
+ return ('Passed: ' + str(self.passed) +
+ ', Failed: ' + str(self.failed) +
+ ', Crashed: ' + str(self.crashed) +
+ ', Skipped: ' + str(self.skipped) +
+ ', Errors: ' + str(self.errors))
+
+ def total(self) -> int:
+ """Returns the total number of test cases within a test
+ object, where a test case is a test with no subtests.
+ """
+ return (self.passed + self.failed + self.crashed +
+ self.skipped)
+
+ def add_subtest_counts(self, counts: TestCounts) -> None:
+ """
+ Adds the counts of another TestCounts object to the current
+ TestCounts object. Used to add the counts of a subtest to the
+ parent test.
+
+ Parameters:
+ counts - a different TestCounts object whose counts
+ will be added to the counts of the TestCounts object
+ """
+ self.passed += counts.passed
+ self.failed += counts.failed
+ self.crashed += counts.crashed
+ self.skipped += counts.skipped
+ self.errors += counts.errors
+
+ def get_status(self) -> TestStatus:
+ """Returns the aggregated status of a Test using test
+ counts.
+ """
+ if self.total() == 0:
+ return TestStatus.NO_TESTS
+ elif self.crashed:
+ # If one of the subtests crash, the expected status
+ # of the Test is crashed.
+ return TestStatus.TEST_CRASHED
+ elif self.failed:
+ # Otherwise if one of the subtests fail, the
+ # expected status of the Test is failed.
+ return TestStatus.FAILURE
+ elif self.passed:
+ # Otherwise if one of the subtests pass, the
+ # expected status of the Test is passed.
+ return TestStatus.SUCCESS
+ else:
+ # Finally, if none of the subtests have failed,
+ # crashed, or passed, the expected status of the
+ # Test is skipped.
+ return TestStatus.SKIPPED
+
+ def add_status(self, status: TestStatus) -> None:
+ """
+ Increments count of inputted status.
+
+ Parameters:
+ status - status to be added to the TestCounts object
+ """
+ if status == TestStatus.SUCCESS or \
+ status == TestStatus.NO_TESTS:
+ # if status is NO_TESTS the most appropriate
+ # attribute to increment is passed because
+ # the test did not fail, crash or get skipped.
+ self.passed += 1
+ elif status == TestStatus.FAILURE:
+ self.failed += 1
+ elif status == TestStatus.SKIPPED:
+ self.skipped += 1
+ else:
+ self.crashed += 1
+
class LineStream:
- """Provides a peek()/pop() interface over an iterator of (line#, text)."""
+ """
+ A class to represent the lines of kernel output.
+ Provides a peek()/pop() interface over an iterator of
+ (line#, text).
+ """
_lines: Iterator[Tuple[int, str]]
_next: Tuple[int, str]
_done: bool
def __init__(self, lines: Iterator[Tuple[int, str]]):
+ """Creates a new LineStream that wraps the given iterator."""
self._lines = lines
self._done = False
self._next = (0, '')
self._get_next()
def _get_next(self) -> None:
+ """Advances the LineSteam to the next line or sets the _done
+ attribute if the LineStream has reached the end of the lines.
+ """
try:
self._next = next(self._lines)
except StopIteration:
self._done = True
def peek(self) -> str:
+ """Returns the current line, without advancing the LineStream.
+ """
return self._next[1]
def pop(self) -> str:
+ """Returns the current line and advances the LineStream to
+ the next line.
+ """
n = self._next
self._get_next()
return n[1]
def __bool__(self) -> bool:
+ """Returns True if stream has more lines."""
return not self._done
# Only used by kunit_tool_test.py.
def __iter__(self) -> Iterator[str]:
+ """Empties all lines stored in LineStream object into
+ Iterator object and returns the Iterator object.
+ """
while bool(self):
yield self.pop()
def line_number(self) -> int:
+ """Returns the line number of the current line."""
return self._next[0]
-kunit_start_re = re.compile(r'TAP version [0-9]+$')
-kunit_end_re = re.compile('(List of all partitions:|'
- 'Kernel panic - not syncing: VFS:|reboot: System halted)')
+# Parsing helper methods:
+
+KTAP_START = re.compile(r'KTAP version ([0-9]+)$')
+TAP_START = re.compile(r'TAP version ([0-9]+)$')
+KTAP_END = re.compile('(List of all partitions:|'
+ 'Kernel panic - not syncing: VFS:|reboot: System halted)')
def extract_tap_lines(kernel_output: Iterable[str]) -> LineStream:
- def isolate_kunit_output(kernel_output: Iterable[str]) -> Iterator[Tuple[int, str]]:
+ """Extracts KTAP lines from inputted kernel output in LineStream
+ object."""
+ def isolate_ktap_output(kernel_output: Iterable[str]) \
+ -> Iterator[Tuple[int, str]]:
line_num = 0
started = False
for line in kernel_output:
line_num += 1
- line = line.rstrip() # line always has a trailing \n
- if kunit_start_re.search(line):
+ line = line.rstrip() # remove trailing \n
+ if not started and KTAP_START.search(line):
+ # start extracting KTAP lines and set prefix
+ # to number of characters before version line
+ prefix_len = len(
+ line.split('KTAP version')[0])
+ started = True
+ yield line_num, line[prefix_len:]
+ elif not started and TAP_START.search(line):
+ # start extracting KTAP lines and set prefix
+ # to number of characters before version line
prefix_len = len(line.split('TAP version')[0])
started = True
yield line_num, line[prefix_len:]
- elif kunit_end_re.search(line):
+ elif started and KTAP_END.search(line):
+ # stop extracting KTAP lines
break
elif started:
- yield line_num, line[prefix_len:]
- return LineStream(lines=isolate_kunit_output(kernel_output))
-
-DIVIDER = '=' * 60
-
-RESET = '\033[0;0m'
-
-def red(text) -> str:
- return '\033[1;31m' + text + RESET
-
-def yellow(text) -> str:
- return '\033[1;33m' + text + RESET
-
-def green(text) -> str:
- return '\033[1;32m' + text + RESET
-
-def print_with_timestamp(message) -> None:
- print('[%s] %s' % (datetime.now().strftime('%H:%M:%S'), message))
+ # remove prefix and any indention and yield
+ # line with line number
+ line = line[prefix_len:].lstrip()
+ yield line_num, line
+ return LineStream(lines=isolate_ktap_output(kernel_output))
+
+KTAP_VERSIONS = [1]
+TAP_VERSIONS = [13, 14]
+
+def check_version(version_num: int, accepted_versions: List[int],
+ version_type: str, test: Test) -> None:
+ """
+ Adds error to test object if version number is too high or too
+ low.
+
+ Parameters:
+ version_num - The inputted version number from the parsed KTAP or TAP
+ header line
+ accepted_version - List of accepted KTAP or TAP versions
+ version_type - 'KTAP' or 'TAP' depending on the type of
+ version line.
+ test - Test object for current test being parsed
+ """
+ if version_num < min(accepted_versions):
+ test.add_error(version_type +
+ ' version lower than expected!')
+ elif version_num > max(accepted_versions):
+ test.add_error(
+ version_type + ' version higher than expected!')
+
+def parse_ktap_header(lines: LineStream, test: Test) -> bool:
+ """
+ Parses KTAP/TAP header line and checks version number.
+ Returns False if fails to parse KTAP/TAP header line.
+
+ Accepted formats:
+ - 'KTAP version [version number]'
+ - 'TAP version [version number]'
+
+ Parameters:
+ lines - LineStream of KTAP output to parse
+ test - Test object for current test being parsed
+
+ Return:
+ True if successfully parsed KTAP/TAP header line
+ """
+ ktap_match = KTAP_START.match(lines.peek())
+ tap_match = TAP_START.match(lines.peek())
+ if ktap_match:
+ version_num = int(ktap_match.group(1))
+ check_version(version_num, KTAP_VERSIONS, 'KTAP', test)
+ elif tap_match:
+ version_num = int(tap_match.group(1))
+ check_version(version_num, TAP_VERSIONS, 'TAP', test)
+ else:
+ return False
+ test.log.append(lines.pop())
+ return True
-def format_suite_divider(message) -> str:
- return '======== ' + message + ' ========'
+TEST_HEADER = re.compile(r'^# Subtest: (.*)$')
-def print_suite_divider(message) -> None:
- print_with_timestamp(DIVIDER)
- print_with_timestamp(format_suite_divider(message))
+def parse_test_header(lines: LineStream, test: Test) -> bool:
+ """
+ Parses test header and stores test name in test object.
+ Returns False if fails to parse test header line.
-def print_log(log) -> None:
- for m in log:
- print_with_timestamp(m)
+ Accepted format:
+ - '# Subtest: [test name]'
-TAP_ENTRIES = re.compile(r'^(TAP|[\s]*ok|[\s]*not ok|[\s]*[0-9]+\.\.[0-9]+|[\s]*# (Subtest:|.*: kunit test case crashed!)).*$')
+ Parameters:
+ lines - LineStream of ktap output to parse
+ test - Test object for current test being parsed
-def consume_non_diagnostic(lines: LineStream) -> None:
- while lines and not TAP_ENTRIES.match(lines.peek()):
- lines.pop()
-
-def save_non_diagnostic(lines: LineStream, test_case: TestCase) -> None:
- while lines and not TAP_ENTRIES.match(lines.peek()):
- test_case.log.append(lines.peek())
- lines.pop()
+ Return:
+ True if successfully parsed test header line
+ """
+ match = TEST_HEADER.match(lines.peek())
+ if not match:
+ return False
+ test.log.append(lines.pop())
+ test.name = match.group(1)
+ return True
-OkNotOkResult = namedtuple('OkNotOkResult', ['is_ok','description', 'text'])
+TEST_PLAN = re.compile(r'1\.\.([0-9]+)')
-OK_NOT_OK_SKIP = re.compile(r'^[\s]*(ok|not ok) [0-9]+ - (.*) # SKIP(.*)$')
+def parse_test_plan(lines: LineStream, test: Test) -> bool:
+ """
+ Parses test plan line and stores the expected number of subtests in
+ test object. Reports an error if expected count is 0.
+ Returns False and reports missing test plan error if fails to parse
+ test plan.
-OK_NOT_OK_SUBTEST = re.compile(r'^[\s]+(ok|not ok) [0-9]+ - (.*)$')
+ Accepted format:
+ - '1..[number of subtests]'
-OK_NOT_OK_MODULE = re.compile(r'^(ok|not ok) ([0-9]+) - (.*)$')
+ Parameters:
+ lines - LineStream of ktap output to parse
+ test - Test object for current test being parsed
-def parse_ok_not_ok_test_case(lines: LineStream, test_case: TestCase) -> bool:
- save_non_diagnostic(lines, test_case)
- if not lines:
- test_case.status = TestStatus.TEST_CRASHED
- return True
- line = lines.peek()
- match = OK_NOT_OK_SUBTEST.match(line)
- while not match and lines:
- line = lines.pop()
- match = OK_NOT_OK_SUBTEST.match(line)
- if match:
- test_case.log.append(lines.pop())
- test_case.name = match.group(2)
- skip_match = OK_NOT_OK_SKIP.match(line)
- if skip_match:
- test_case.status = TestStatus.SKIPPED
- return True
- if test_case.status == TestStatus.TEST_CRASHED:
- return True
- if match.group(1) == 'ok':
- test_case.status = TestStatus.SUCCESS
- else:
- test_case.status = TestStatus.FAILURE
- return True
- else:
+ Return:
+ True if successfully parsed test plan line
+ """
+ match = TEST_PLAN.match(lines.peek())
+ if not match:
+ test.expected_count = None
+ test.add_error('missing plan line!')
return False
-
-SUBTEST_DIAGNOSTIC = re.compile(r'^[\s]+# (.*)$')
-DIAGNOSTIC_CRASH_MESSAGE = re.compile(r'^[\s]+# .*?: kunit test case crashed!$')
-
-def parse_diagnostic(lines: LineStream, test_case: TestCase) -> bool:
- save_non_diagnostic(lines, test_case)
- if not lines:
+ test.log.append(lines.pop())
+ expected_count = int(match.group(1))
+ test.expected_count = expected_count
+ if expected_count == 0:
+ test.status = TestStatus.NO_TESTS
+ test.add_error('0 tests run!')
+ return True
+
+TEST_RESULT = re.compile(r'^(ok|not ok) ([0-9]+) (- )?([^#]*)( # .*)?$')
+
+TEST_RESULT_SKIP = re.compile(r'^(ok|not ok) ([0-9]+) (- )?(.*) # SKIP(.*)$')
+
+def peek_test_name_match(lines: LineStream, test: Test) -> bool:
+ """
+ Matches current line with the format of a test result line and checks
+ if the name matches the name of the current test.
+ Returns False if fails to match format or name.
+
+ Accepted format:
+ - '[ok|not ok] [test number] [-] [test name] [optional skip
+ directive]'
+
+ Parameters:
+ lines - LineStream of KTAP output to parse
+ test - Test object for current test being parsed
+
+ Return:
+ True if matched a test result line and the name matching the
+ expected test name
+ """
+ line = lines.peek()
+ match = TEST_RESULT.match(line)
+ if not match:
return False
+ name = match.group(4)
+ return (name == test.name)
+
+def parse_test_result(lines: LineStream, test: Test,
+ expected_num: int) -> bool:
+ """
+ Parses test result line and stores the status and name in the test
+ object. Reports an error if the test number does not match expected
+ test number.
+ Returns False if fails to parse test result line.
+
+ Note that the SKIP directive is the only direction that causes a
+ change in status.
+
+ Accepted format:
+ - '[ok|not ok] [test number] [-] [test name] [optional skip
+ directive]'
+
+ Parameters:
+ lines - LineStream of KTAP output to parse
+ test - Test object for current test being parsed
+ expected_num - expected test number for current test
+
+ Return:
+ True if successfully parsed a test result line.
+ """
line = lines.peek()
- match = SUBTEST_DIAGNOSTIC.match(line)
- if match:
- test_case.log.append(lines.pop())
- crash_match = DIAGNOSTIC_CRASH_MESSAGE.match(line)
- if crash_match:
- test_case.status = TestStatus.TEST_CRASHED
- return True
- else:
+ match = TEST_RESULT.match(line)
+ skip_match = TEST_RESULT_SKIP.match(line)
+
+ # Check if line matches test result line format
+ if not match:
return False
+ test.log.append(lines.pop())
-def parse_test_case(lines: LineStream) -> Optional[TestCase]:
- test_case = TestCase()
- save_non_diagnostic(lines, test_case)
- while parse_diagnostic(lines, test_case):
- pass
- if parse_ok_not_ok_test_case(lines, test_case):
- return test_case
+ # Set name of test object
+ if skip_match:
+ test.name = skip_match.group(4)
else:
- return None
-
-SUBTEST_HEADER = re.compile(r'^[\s]+# Subtest: (.*)$')
-
-def parse_subtest_header(lines: LineStream) -> Optional[str]:
- consume_non_diagnostic(lines)
- if not lines:
- return None
- match = SUBTEST_HEADER.match(lines.peek())
- if match:
- lines.pop()
- return match.group(1)
+ test.name = match.group(4)
+
+ # Check test num
+ num = int(match.group(2))
+ if num != expected_num:
+ test.add_error('Expected test number ' +
+ str(expected_num) + ' but found ' + str(num))
+
+ # Set status of test object
+ status = match.group(1)
+ if skip_match:
+ test.status = TestStatus.SKIPPED
+ elif status == 'ok':
+ test.status = TestStatus.SUCCESS
else:
- return None
+ test.status = TestStatus.FAILURE
+ return True
+
+def parse_diagnostic(lines: LineStream) -> List[str]:
+ """
+ Parse lines that do not match the format of a test result line or
+ test header line and returns them in list.
+
+ Line formats that are not parsed:
+ - '# Subtest: [test name]'
+ - '[ok|not ok] [test number] [-] [test name] [optional skip
+ directive]'
+
+ Parameters:
+ lines - LineStream of KTAP output to parse
+
+ Return:
+ Log of diagnostic lines
+ """
+ log = [] # type: List[str]
+ while lines and not TEST_RESULT.match(lines.peek()) and not \
+ TEST_HEADER.match(lines.peek()):
+ log.append(lines.pop())
+ return log
+
+DIAGNOSTIC_CRASH_MESSAGE = re.compile(
+ r'^(BUG:|# .*?: kunit test case crashed!$)')
+
+def parse_crash_in_log(test: Test) -> bool:
+ """
+ Iterate through the lines of the log to parse for crash message.
+ If crash message found, set status to crashed and return True.
+ Otherwise return False.
+
+ Parameters:
+ test - Test object for current test being parsed
+
+ Return:
+ True if crash message found in log
+ """
+ for line in test.log:
+ if DIAGNOSTIC_CRASH_MESSAGE.match(line):
+ test.status = TestStatus.TEST_CRASHED
+ return True
+ return False
-SUBTEST_PLAN = re.compile(r'[\s]+[0-9]+\.\.([0-9]+)')
-def parse_subtest_plan(lines: LineStream) -> Optional[int]:
- consume_non_diagnostic(lines)
- match = SUBTEST_PLAN.match(lines.peek())
- if match:
- lines.pop()
- return int(match.group(1))
- else:
- return None
-
-def max_status(left: TestStatus, right: TestStatus) -> TestStatus:
- if left == right:
- return left
- elif left == TestStatus.TEST_CRASHED or right == TestStatus.TEST_CRASHED:
- return TestStatus.TEST_CRASHED
- elif left == TestStatus.FAILURE or right == TestStatus.FAILURE:
- return TestStatus.FAILURE
- elif left == TestStatus.SKIPPED:
- return right
- else:
- return left
+# Printing helper methods:
-def parse_ok_not_ok_test_suite(lines: LineStream,
- test_suite: TestSuite,
- expected_suite_index: int) -> bool:
- consume_non_diagnostic(lines)
- if not lines:
- test_suite.status = TestStatus.TEST_CRASHED
- return False
- line = lines.peek()
- match = OK_NOT_OK_MODULE.match(line)
- if match:
- lines.pop()
- if match.group(1) == 'ok':
- test_suite.status = TestStatus.SUCCESS
- else:
- test_suite.status = TestStatus.FAILURE
- skip_match = OK_NOT_OK_SKIP.match(line)
- if skip_match:
- test_suite.status = TestStatus.SKIPPED
- suite_index = int(match.group(2))
- if suite_index != expected_suite_index:
- print_with_timestamp(
- red('[ERROR] ') + 'expected_suite_index ' +
- str(expected_suite_index) + ', but got ' +
- str(suite_index))
- return True
- else:
- return False
+DIVIDER = '=' * 60
-def bubble_up_errors(status_list: Iterable[TestStatus]) -> TestStatus:
- return reduce(max_status, status_list, TestStatus.SKIPPED)
+RESET = '\033[0;0m'
-def bubble_up_test_case_errors(test_suite: TestSuite) -> TestStatus:
- max_test_case_status = bubble_up_errors(x.status for x in test_suite.cases)
- return max_status(max_test_case_status, test_suite.status)
+def red(text: str) -> str:
+ """Returns inputted string with red color code."""
+ return '\033[1;31m' + text + RESET
-def parse_test_suite(lines: LineStream, expected_suite_index: int) -> Optional[TestSuite]:
- if not lines:
- return None
- consume_non_diagnostic(lines)
- test_suite = TestSuite()
- test_suite.status = TestStatus.SUCCESS
- name = parse_subtest_header(lines)
- if not name:
- return None
- test_suite.name = name
- expected_test_case_num = parse_subtest_plan(lines)
- if expected_test_case_num is None:
- return None
- while expected_test_case_num > 0:
- test_case = parse_test_case(lines)
- if not test_case:
- break
- test_suite.cases.append(test_case)
- expected_test_case_num -= 1
- if parse_ok_not_ok_test_suite(lines, test_suite, expected_suite_index):
- test_suite.status = bubble_up_test_case_errors(test_suite)
- return test_suite
- elif not lines:
- print_with_timestamp(red('[ERROR] ') + 'ran out of lines before end token')
- return test_suite
- else:
- print(f'failed to parse end of suite "{name}", at line {lines.line_number()}: {lines.peek()}')
- return None
+def yellow(text: str) -> str:
+ """Returns inputted string with yellow color code."""
+ return '\033[1;33m' + text + RESET
-TAP_HEADER = re.compile(r'^TAP version 14$')
+def green(text: str) -> str:
+ """Returns inputted string with green color code."""
+ return '\033[1;32m' + text + RESET
-def parse_tap_header(lines: LineStream) -> bool:
- consume_non_diagnostic(lines)
- if TAP_HEADER.match(lines.peek()):
- lines.pop()
- return True
- else:
- return False
+ANSI_LEN = len(red(''))
-TEST_PLAN = re.compile(r'[0-9]+\.\.([0-9]+)')
+def print_with_timestamp(message: str) -> None:
+ """Prints message with timestamp at beginning."""
+ print('[%s] %s' % (datetime.now().strftime('%H:%M:%S'), message))
-def parse_test_plan(lines: LineStream) -> Optional[int]:
- consume_non_diagnostic(lines)
- match = TEST_PLAN.match(lines.peek())
- if match:
- lines.pop()
- return int(match.group(1))
- else:
- return None
-
-def bubble_up_suite_errors(test_suites: Iterable[TestSuite]) -> TestStatus:
- return bubble_up_errors(x.status for x in test_suites)
-
-def parse_test_result(lines: LineStream) -> TestResult:
- consume_non_diagnostic(lines)
- if not lines or not parse_tap_header(lines):
- return TestResult(TestStatus.FAILURE_TO_PARSE_TESTS, [], lines)
- expected_test_suite_num = parse_test_plan(lines)
- if expected_test_suite_num == 0:
- return TestResult(TestStatus.NO_TESTS, [], lines)
- elif expected_test_suite_num is None:
- return TestResult(TestStatus.FAILURE_TO_PARSE_TESTS, [], lines)
- test_suites = []
- for i in range(1, expected_test_suite_num + 1):
- test_suite = parse_test_suite(lines, i)
- if test_suite:
- test_suites.append(test_suite)
+def format_test_divider(message: str, len_message: int) -> str:
+ """
+ Returns string with message centered in fixed width divider.
+
+ Example:
+ '===================== message example ====================='
+
+ Parameters:
+ message - message to be centered in divider line
+ len_message - length of the message to be printed such that
+ any characters of the color codes are not counted
+
+ Return:
+ String containing message centered in fixed width divider
+ """
+ default_count = 3 # default number of dashes
+ len_1 = default_count
+ len_2 = default_count
+ difference = len(DIVIDER) - len_message - 2 # 2 spaces added
+ if difference > 0:
+ # calculate number of dashes for each side of the divider
+ len_1 = int(difference / 2)
+ len_2 = difference - len_1
+ return ('=' * len_1) + ' ' + message + ' ' + ('=' * len_2)
+
+def print_test_header(test: Test) -> None:
+ """
+ Prints test header with test name and optionally the expected number
+ of subtests.
+
+ Example:
+ '=================== example (2 subtests) ==================='
+
+ Parameters:
+ test - Test object representing current test being printed
+ """
+ message = test.name
+ if test.expected_count:
+ if test.expected_count == 1:
+ message += (' (' + str(test.expected_count) +
+ ' subtest)')
else:
- print_with_timestamp(
- red('[ERROR] ') + ' expected ' +
- str(expected_test_suite_num) +
- ' test suites, but got ' + str(i - 2))
- break
- test_suite = parse_test_suite(lines, -1)
- if test_suite:
- print_with_timestamp(red('[ERROR] ') +
- 'got unexpected test suite: ' + test_suite.name)
- if test_suites:
- return TestResult(bubble_up_suite_errors(test_suites), test_suites, lines)
- else:
- return TestResult(TestStatus.NO_TESTS, [], lines)
+ message += (' (' + str(test.expected_count) +
+ ' subtests)')
+ print_with_timestamp(format_test_divider(message, len(message)))
-class TestCounts:
- passed: int
- failed: int
- crashed: int
- skipped: int
+def print_log(log: Iterable[str]) -> None:
+ """
+ Prints all strings in saved log for test in yellow.
- def __init__(self):
- self.passed = 0
- self.failed = 0
- self.crashed = 0
- self.skipped = 0
-
- def total(self) -> int:
- return self.passed + self.failed + self.crashed + self.skipped
-
-def print_and_count_results(test_result: TestResult) -> TestCounts:
- counts = TestCounts()
- for test_suite in test_result.suites:
- if test_suite.status == TestStatus.SUCCESS:
- print_suite_divider(green('[PASSED] ') + test_suite.name)
- elif test_suite.status == TestStatus.SKIPPED:
- print_suite_divider(yellow('[SKIPPED] ') + test_suite.name)
- elif test_suite.status == TestStatus.TEST_CRASHED:
- print_suite_divider(red('[CRASHED] ' + test_suite.name))
- else:
- print_suite_divider(red('[FAILED] ') + test_suite.name)
- for test_case in test_suite.cases:
- if test_case.status == TestStatus.SUCCESS:
- counts.passed += 1
- print_with_timestamp(green('[PASSED] ') + test_case.name)
- elif test_case.status == TestStatus.SKIPPED:
- counts.skipped += 1
- print_with_timestamp(yellow('[SKIPPED] ') + test_case.name)
- elif test_case.status == TestStatus.TEST_CRASHED:
- counts.crashed += 1
- print_with_timestamp(red('[CRASHED] ' + test_case.name))
- print_log(map(yellow, test_case.log))
- print_with_timestamp('')
+ Parameters:
+ log - Iterable object with all strings saved in log for test
+ """
+ for m in log:
+ print_with_timestamp(yellow(m))
+
+def format_test_result(test: Test) -> str:
+ """
+ Returns string with formatted test result with colored status and test
+ name.
+
+ Example:
+ '[PASSED] example'
+
+ Parameters:
+ test - Test object representing current test being printed
+
+ Return:
+ String containing formatted test result
+ """
+ if test.status == TestStatus.SUCCESS:
+ return (green('[PASSED] ') + test.name)
+ elif test.status == TestStatus.SKIPPED:
+ return (yellow('[SKIPPED] ') + test.name)
+ elif test.status == TestStatus.TEST_CRASHED:
+ print_log(test.log)
+ return (red('[CRASHED] ') + test.name)
+ else:
+ print_log(test.log)
+ return (red('[FAILED] ') + test.name)
+
+def print_test_result(test: Test) -> None:
+ """
+ Prints result line with status of test.
+
+ Example:
+ '[PASSED] example'
+
+ Parameters:
+ test - Test object representing current test being printed
+ """
+ print_with_timestamp(format_test_result(test))
+
+def print_test_footer(test: Test) -> None:
+ """
+ Prints test footer with status of test.
+
+ Example:
+ '===================== [PASSED] example ====================='
+
+ Parameters:
+ test - Test object representing current test being printed
+ """
+ message = format_test_result(test)
+ print_with_timestamp(format_test_divider(message,
+ len(message) - ANSI_LEN))
+
+def print_summary_line(test: Test) -> None:
+ """
+ Prints summary line of test object. Color of line is dependent on
+ status of test. Color is green if test passes, yellow if test is
+ skipped, and red if the test fails or crashes. Summary line contains
+ counts of the statuses of the tests subtests or the test itself if it
+ has no subtests.
+
+ Example:
+ "Testing complete. Passed: 2, Failed: 0, Crashed: 0, Skipped: 0,
+ Errors: 0"
+
+ test - Test object representing current test being printed
+ """
+ if test.status == TestStatus.SUCCESS or \
+ test.status == TestStatus.NO_TESTS:
+ color = green
+ elif test.status == TestStatus.SKIPPED:
+ color = yellow
+ else:
+ color = red
+ counts = test.counts
+ print_with_timestamp(color('Testing complete. ' + str(counts)))
+
+def print_error(error_message: str) -> None:
+ """
+ Prints error message with error format.
+
+ Example:
+ "[ERROR] Test example: missing test plan!"
+
+ Parameters:
+ error_message - message describing error
+ """
+ print_with_timestamp(red('[ERROR] ') + error_message)
+
+# Other methods:
+
+def bubble_up_test_results(test: Test) -> None:
+ """
+ If the test has subtests, add the test counts of the subtests to the
+ test and check if any of the tests crashed and if so set the test
+ status to crashed. Otherwise if the test has no subtests add the
+ status of the test to the test counts.
+
+ Parameters:
+ test - Test object for current test being parsed
+ """
+ parse_crash_in_log(test)
+ subtests = test.subtests
+ counts = test.counts
+ status = test.status
+ for t in subtests:
+ counts.add_subtest_counts(t.counts)
+ if counts.total() == 0:
+ counts.add_status(status)
+ elif test.counts.get_status() == TestStatus.TEST_CRASHED:
+ test.status = TestStatus.TEST_CRASHED
+
+def parse_test(lines: LineStream, expected_num: int, log: List[str]) -> Test:
+ """
+ Finds next test to parse in LineStream, creates new Test object,
+ parses any subtests of the test, populates Test object with all
+ information (status, name) about the test and the Test objects for
+ any subtests, and then returns the Test object. The method accepts
+ three formats of tests:
+
+ Accepted test formats:
+
+ - Main KTAP/TAP header
+
+ Example:
+
+ KTAP version 1
+ 1..4
+ [subtests]
+
+ - Subtest header line
+
+ Example:
+
+ # Subtest: name
+ 1..3
+ [subtests]
+ ok 1 name
+
+ - Test result line
+
+ Example:
+
+ ok 1 - test
+
+ Parameters:
+ lines - LineStream of KTAP output to parse
+ expected_num - expected test number for test to be parsed
+ log - list of strings containing any preceding diagnostic lines
+ corresponding to the current test
+
+ Return:
+ Test object populated with characteristics and any subtests
+ """
+ test = Test()
+ test.log.extend(log)
+ parent_test = False
+ main = parse_ktap_header(lines, test)
+ if main:
+ # If KTAP/TAP header is found, attempt to parse
+ # test plan
+ test.name = "main"
+ parse_test_plan(lines, test)
+ else:
+ # If KTAP/TAP header is not found, test must be subtest
+ # header or test result line so parse attempt to parser
+ # subtest header
+ parent_test = parse_test_header(lines, test)
+ if parent_test:
+ # If subtest header is found, attempt to parse
+ # test plan and print header
+ parse_test_plan(lines, test)
+ print_test_header(test)
+ expected_count = test.expected_count
+ subtests = []
+ test_num = 1
+ while expected_count is None or test_num <= expected_count:
+ # Loop to parse any subtests.
+ # Break after parsing expected number of tests or
+ # if expected number of tests is unknown break when test
+ # result line with matching name to subtest header is found
+ # or no more lines in stream.
+ sub_log = parse_diagnostic(lines)
+ sub_test = Test()
+ if not lines or (peek_test_name_match(lines, test) and
+ not main):
+ if expected_count and test_num <= expected_count:
+ # If parser reaches end of test before
+ # parsing expected number of subtests, print
+ # crashed subtest and record error
+ test.add_error('missing expected subtest!')
+ sub_test.log.extend(sub_log)
+ test.counts.add_status(
+ TestStatus.TEST_CRASHED)
+ print_test_result(sub_test)
else:
- counts.failed += 1
- print_with_timestamp(red('[FAILED] ') + test_case.name)
- print_log(map(yellow, test_case.log))
- print_with_timestamp('')
- return counts
+ test.log.extend(sub_log)
+ break
+ else:
+ sub_test = parse_test(lines, test_num, sub_log)
+ subtests.append(sub_test)
+ test_num += 1
+ test.subtests = subtests
+ if not main:
+ # If not main test, look for test result line
+ test.log.extend(parse_diagnostic(lines))
+ if (parent_test and peek_test_name_match(lines, test)) or \
+ not parent_test:
+ parse_test_result(lines, test, expected_num)
+ else:
+ test.add_error('missing subtest result line!')
+ # Add statuses to TestCounts attribute in Test object
+ bubble_up_test_results(test)
+ if parent_test:
+ # If test has subtests and is not the main test object, print
+ # footer.
+ print_test_footer(test)
+ elif not main:
+ print_test_result(test)
+ return test
def parse_run_tests(kernel_output: Iterable[str]) -> TestResult:
- counts = TestCounts()
+ """
+ Using kernel output, extract KTAP lines, parse the lines for test
+ results and print condensed test results and summary line .
+
+ Parameters:
+ kernel_output - Iterable object contains lines of kernel output
+
+ Return:
+ TestResult - Tuple containg status of main test object, main test
+ object with all subtests, and log of all KTAP lines.
+ """
+ print_with_timestamp(DIVIDER)
lines = extract_tap_lines(kernel_output)
- test_result = parse_test_result(lines)
- if test_result.status == TestStatus.NO_TESTS:
- print(red('[ERROR] ') + yellow('no tests run!'))
- elif test_result.status == TestStatus.FAILURE_TO_PARSE_TESTS:
- print(red('[ERROR] ') + yellow('could not parse test results!'))
+ test = Test()
+ if not lines:
+ test.add_error('invalid KTAP input!')
+ test.status = TestStatus.FAILURE_TO_PARSE_TESTS
else:
- counts = print_and_count_results(test_result)
+ test = parse_test(lines, 0, [])
+ if test.status != TestStatus.NO_TESTS:
+ test.status = test.counts.get_status()
print_with_timestamp(DIVIDER)
- if test_result.status == TestStatus.SUCCESS:
- fmt = green
- elif test_result.status == TestStatus.SKIPPED:
- fmt = yellow
- else:
- fmt =red
- print_with_timestamp(
- fmt('Testing complete. %d tests run. %d failed. %d crashed. %d skipped.' %
- (counts.total(), counts.failed, counts.crashed, counts.skipped)))
- return test_result
+ print_summary_line(test)
+ return TestResult(test.status, test, lines)
diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py
index 3251cef25b76..c309ed76aef5 100755
--- a/tools/testing/kunit/kunit_tool_test.py
+++ b/tools/testing/kunit/kunit_tool_test.py
@@ -106,10 +106,10 @@ class KUnitParserTest(unittest.TestCase):
with open(log_path) as file:
result = kunit_parser.extract_tap_lines(file.readlines())
self.assertContains('TAP version 14', result)
- self.assertContains(' # Subtest: example', result)
- self.assertContains(' 1..2', result)
- self.assertContains(' ok 1 - example_simple_test', result)
- self.assertContains(' ok 2 - example_mock_test', result)
+ self.assertContains('# Subtest: example', result)
+ self.assertContains('1..2', result)
+ self.assertContains('ok 1 - example_simple_test', result)
+ self.assertContains('ok 2 - example_mock_test', result)
self.assertContains('ok 1 - example', result)
def test_output_with_prefix_isolated_correctly(self):
@@ -117,28 +117,28 @@ class KUnitParserTest(unittest.TestCase):
with open(log_path) as file:
result = kunit_parser.extract_tap_lines(file.readlines())
self.assertContains('TAP version 14', result)
- self.assertContains(' # Subtest: kunit-resource-test', result)
- self.assertContains(' 1..5', result)
- self.assertContains(' ok 1 - kunit_resource_test_init_resources', result)
- self.assertContains(' ok 2 - kunit_resource_test_alloc_resource', result)
- self.assertContains(' ok 3 - kunit_resource_test_destroy_resource', result)
- self.assertContains(' foo bar #', result)
- self.assertContains(' ok 4 - kunit_resource_test_cleanup_resources', result)
- self.assertContains(' ok 5 - kunit_resource_test_proper_free_ordering', result)
+ self.assertContains('# Subtest: kunit-resource-test', result)
+ self.assertContains('1..5', result)
+ self.assertContains('ok 1 - kunit_resource_test_init_resources', result)
+ self.assertContains('ok 2 - kunit_resource_test_alloc_resource', result)
+ self.assertContains('ok 3 - kunit_resource_test_destroy_resource', result)
+ self.assertContains('foo bar #', result)
+ self.assertContains('ok 4 - kunit_resource_test_cleanup_resources', result)
+ self.assertContains('ok 5 - kunit_resource_test_proper_free_ordering', result)
self.assertContains('ok 1 - kunit-resource-test', result)
- self.assertContains(' foo bar # non-kunit output', result)
- self.assertContains(' # Subtest: kunit-try-catch-test', result)
- self.assertContains(' 1..2', result)
- self.assertContains(' ok 1 - kunit_test_try_catch_successful_try_no_catch',
+ self.assertContains('foo bar # non-kunit output', result)
+ self.assertContains('# Subtest: kunit-try-catch-test', result)
+ self.assertContains('1..2', result)
+ self.assertContains('ok 1 - kunit_test_try_catch_successful_try_no_catch',
result)
- self.assertContains(' ok 2 - kunit_test_try_catch_unsuccessful_try_does_catch',
+ self.assertContains('ok 2 - kunit_test_try_catch_unsuccessful_try_does_catch',
result)
self.assertContains('ok 2 - kunit-try-catch-test', result)
- self.assertContains(' # Subtest: string-stream-test', result)
- self.assertContains(' 1..3', result)
- self.assertContains(' ok 1 - string_stream_test_empty_on_creation', result)
- self.assertContains(' ok 2 - string_stream_test_not_empty_after_add', result)
- self.assertContains(' ok 3 - string_stream_test_get_string', result)
+ self.assertContains('# Subtest: string-stream-test', result)
+ self.assertContains('1..3', result)
+ self.assertContains('ok 1 - string_stream_test_empty_on_creation', result)
+ self.assertContains('ok 2 - string_stream_test_not_empty_after_add', result)
+ self.assertContains('ok 3 - string_stream_test_get_string', result)
self.assertContains('ok 3 - string-stream-test', result)
def test_parse_successful_test_log(self):
@@ -149,6 +149,22 @@ class KUnitParserTest(unittest.TestCase):
kunit_parser.TestStatus.SUCCESS,
result.status)
+ def test_parse_successful_nested_tests_log(self):
+ all_passed_log = test_data_path('test_is_test_passed-all_passed_nested.log')
+ with open(all_passed_log) as file:
+ result = kunit_parser.parse_run_tests(file.readlines())
+ self.assertEqual(
+ kunit_parser.TestStatus.SUCCESS,
+ result.status)
+
+ def test_kselftest_nested(self):
+ kselftest_log = test_data_path('test_is_test_passed-kselftest.log')
+ with open(kselftest_log) as file:
+ result = kunit_parser.parse_run_tests(file.readlines())
+ self.assertEqual(
+ kunit_parser.TestStatus.SUCCESS,
+ result.status)
+
def test_parse_failed_test_log(self):
failed_log = test_data_path('test_is_test_passed-failure.log')
with open(failed_log) as file:
@@ -162,17 +178,31 @@ class KUnitParserTest(unittest.TestCase):
with open(empty_log) as file:
result = kunit_parser.parse_run_tests(
kunit_parser.extract_tap_lines(file.readlines()))
- self.assertEqual(0, len(result.suites))
+ self.assertEqual(0, len(result.test.subtests))
self.assertEqual(
kunit_parser.TestStatus.FAILURE_TO_PARSE_TESTS,
result.status)
+ def test_missing_test_plan(self):
+ missing_plan_log = test_data_path('test_is_test_passed-'
+ 'missing_plan.log')
+ with open(missing_plan_log) as file:
+ result = kunit_parser.parse_run_tests(
+ kunit_parser.extract_tap_lines(
+ file.readlines()))
+ self.assertEqual(2, result.test.counts.errors)
+ self.assertEqual(
+ kunit_parser.TestStatus.SUCCESS,
+ result.status)
+
def test_no_tests(self):
- empty_log = test_data_path('test_is_test_passed-no_tests_run_with_header.log')
- with open(empty_log) as file:
+ header_log = test_data_path('test_is_test_passed-'
+ 'no_tests_run_with_header.log')
+ with open(header_log) as file:
result = kunit_parser.parse_run_tests(
- kunit_parser.extract_tap_lines(file.readlines()))
- self.assertEqual(0, len(result.suites))
+ kunit_parser.extract_tap_lines(
+ file.readlines()))
+ self.assertEqual(0, len(result.test.subtests))
self.assertEqual(
kunit_parser.TestStatus.NO_TESTS,
result.status)
@@ -182,15 +212,17 @@ class KUnitParserTest(unittest.TestCase):
print_mock = mock.patch('builtins.print').start()
with open(crash_log) as file:
result = kunit_parser.parse_run_tests(
- kunit_parser.extract_tap_lines(file.readlines()))
- print_mock.assert_any_call(StrContains('could not parse test results!'))
+ kunit_parser.extract_tap_lines(
+ file.readlines()))
+ print_mock.assert_any_call(StrContains('invalid KTAP input!'))
print_mock.stop()
file.close()
def test_crashed_test(self):
crashed_log = test_data_path('test_is_test_passed-crash.log')
with open(crashed_log) as file:
- result = kunit_parser.parse_run_tests(file.readlines())
+ result = kunit_parser.parse_run_tests(
+ file.readlines())
self.assertEqual(
kunit_parser.TestStatus.TEST_CRASHED,
result.status)
@@ -216,6 +248,23 @@ class KUnitParserTest(unittest.TestCase):
result.status)
file.close()
+ def test_ignores_hyphen(self):
+ hyphen_log = test_data_path('test_strip_hyphen.log')
+ file = open(hyphen_log)
+ result = kunit_parser.parse_run_tests(file.readlines())
+
+ # A skipped test does not fail the whole suite.
+ self.assertEqual(
+ kunit_parser.TestStatus.SUCCESS,
+ result.status)
+ self.assertEqual(
+ "sysctl_test",
+ result.test.subtests[0].name)
+ self.assertEqual(
+ "example",
+ result.test.subtests[1].name)
+ file.close()
+
def test_ignores_prefix_printk_time(self):
prefix_log = test_data_path('test_config_printk_time.log')
@@ -224,7 +273,7 @@ class KUnitParserTest(unittest.TestCase):
self.assertEqual(
kunit_parser.TestStatus.SUCCESS,
result.status)
- self.assertEqual('kunit-resource-test', result.suites[0].name)
+ self.assertEqual('kunit-resource-test', result.test.subtests[0].name)
def test_ignores_multiple_prefixes(self):
prefix_log = test_data_path('test_multiple_prefixes.log')
@@ -233,7 +282,7 @@ class KUnitParserTest(unittest.TestCase):
self.assertEqual(
kunit_parser.TestStatus.SUCCESS,
result.status)
- self.assertEqual('kunit-resource-test', result.suites[0].name)
+ self.assertEqual('kunit-resource-test', result.test.subtests[0].name)
def test_prefix_mixed_kernel_output(self):
mixed_prefix_log = test_data_path('test_interrupted_tap_output.log')
@@ -242,7 +291,7 @@ class KUnitParserTest(unittest.TestCase):
self.assertEqual(
kunit_parser.TestStatus.SUCCESS,
result.status)
- self.assertEqual('kunit-resource-test', result.suites[0].name)
+ self.assertEqual('kunit-resource-test', result.test.subtests[0].name)
def test_prefix_poundsign(self):
pound_log = test_data_path('test_pound_sign.log')
@@ -251,7 +300,7 @@ class KUnitParserTest(unittest.TestCase):
self.assertEqual(
kunit_parser.TestStatus.SUCCESS,
result.status)
- self.assertEqual('kunit-resource-test', result.suites[0].name)
+ self.assertEqual('kunit-resource-test', result.test.subtests[0].name)
def test_kernel_panic_end(self):
panic_log = test_data_path('test_kernel_panic_interrupt.log')
@@ -260,7 +309,7 @@ class KUnitParserTest(unittest.TestCase):
self.assertEqual(
kunit_parser.TestStatus.TEST_CRASHED,
result.status)
- self.assertEqual('kunit-resource-test', result.suites[0].name)
+ self.assertEqual('kunit-resource-test', result.test.subtests[0].name)
def test_pound_no_prefix(self):
pound_log = test_data_path('test_pound_no_prefix.log')
@@ -269,7 +318,7 @@ class KUnitParserTest(unittest.TestCase):
self.assertEqual(
kunit_parser.TestStatus.SUCCESS,
result.status)
- self.assertEqual('kunit-resource-test', result.suites[0].name)
+ self.assertEqual('kunit-resource-test', result.test.subtests[0].name)
class LinuxSourceTreeTest(unittest.TestCase):
@@ -322,6 +371,12 @@ class KUnitJsonTest(unittest.TestCase):
result = self._json_for('test_is_test_passed-no_tests_run_with_header.log')
self.assertEqual(0, len(result['sub_groups']))
+ def test_nested_json(self):
+ result = self._json_for('test_is_test_passed-all_passed_nested.log')
+ self.assertEqual(
+ {'name': 'example_simple_test', 'status': 'PASS'},
+ result["sub_groups"][0]["sub_groups"][0]["test_cases"][0])
+
class StrContains(str):
def __eq__(self, other):
return self in other
@@ -380,7 +435,7 @@ class KUnitMainTest(unittest.TestCase):
self.assertEqual(e.exception.code, 1)
self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1)
- self.print_mock.assert_any_call(StrContains(' 0 tests run'))
+ self.print_mock.assert_any_call(StrContains('invalid KTAP input!'))
def test_exec_raw_output(self):
self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
@@ -388,7 +443,7 @@ class KUnitMainTest(unittest.TestCase):
self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1)
for call in self.print_mock.call_args_list:
self.assertNotEqual(call, mock.call(StrContains('Testing complete.')))
- self.assertNotEqual(call, mock.call(StrContains(' 0 tests run')))
+ self.assertNotEqual(call, mock.call(StrContains(' 0 tests run!')))
def test_run_raw_output(self):
self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
@@ -397,7 +452,7 @@ class KUnitMainTest(unittest.TestCase):
self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1)
for call in self.print_mock.call_args_list:
self.assertNotEqual(call, mock.call(StrContains('Testing complete.')))
- self.assertNotEqual(call, mock.call(StrContains(' 0 tests run')))
+ self.assertNotEqual(call, mock.call(StrContains(' 0 tests run!')))
def test_run_raw_output_kunit(self):
self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
diff --git a/tools/testing/kunit/test_data/test_is_test_passed-all_passed_nested.log b/tools/testing/kunit/test_data/test_is_test_passed-all_passed_nested.log
new file mode 100644
index 000000000000..9d5b04fe43a6
--- /dev/null
+++ b/tools/testing/kunit/test_data/test_is_test_passed-all_passed_nested.log
@@ -0,0 +1,34 @@
+TAP version 14
+1..2
+ # Subtest: sysctl_test
+ 1..4
+ # sysctl_test_dointvec_null_tbl_data: sysctl_test_dointvec_null_tbl_data passed
+ ok 1 - sysctl_test_dointvec_null_tbl_data
+ # Subtest: example
+ 1..2
+ init_suite
+ # example_simple_test: initializing
+ # example_simple_test: example_simple_test passed
+ ok 1 - example_simple_test
+ # example_mock_test: initializing
+ # example_mock_test: example_mock_test passed
+ ok 2 - example_mock_test
+ kunit example: all tests passed
+ ok 2 - example
+ # sysctl_test_dointvec_table_len_is_zero: sysctl_test_dointvec_table_len_is_zero passed
+ ok 3 - sysctl_test_dointvec_table_len_is_zero
+ # sysctl_test_dointvec_table_read_but_position_set: sysctl_test_dointvec_table_read_but_position_set passed
+ ok 4 - sysctl_test_dointvec_table_read_but_position_set
+kunit sysctl_test: all tests passed
+ok 1 - sysctl_test
+ # Subtest: example
+ 1..2
+init_suite
+ # example_simple_test: initializing
+ # example_simple_test: example_simple_test passed
+ ok 1 - example_simple_test
+ # example_mock_test: initializing
+ # example_mock_test: example_mock_test passed
+ ok 2 - example_mock_test
+kunit example: all tests passed
+ok 2 - example
diff --git a/tools/testing/kunit/test_data/test_is_test_passed-kselftest.log b/tools/testing/kunit/test_data/test_is_test_passed-kselftest.log
new file mode 100644
index 000000000000..65d3f27feaf2
--- /dev/null
+++ b/tools/testing/kunit/test_data/test_is_test_passed-kselftest.log
@@ -0,0 +1,14 @@
+TAP version 13
+1..2
+# selftests: membarrier: membarrier_test_single_thread
+# TAP version 13
+# 1..2
+# ok 1 sys_membarrier available
+# ok 2 sys membarrier invalid command test: command = -1, flags = 0, errno = 22. Failed as expected
+ok 1 selftests: membarrier: membarrier_test_single_thread
+# selftests: membarrier: membarrier_test_multi_thread
+# TAP version 13
+# 1..2
+# ok 1 sys_membarrier available
+# ok 2 sys membarrier invalid command test: command = -1, flags = 0, errno = 22. Failed as expected
+ok 2 selftests: membarrier: membarrier_test_multi_thread
diff --git a/tools/testing/kunit/test_data/test_is_test_passed-missing_plan.log b/tools/testing/kunit/test_data/test_is_test_passed-missing_plan.log
new file mode 100644
index 000000000000..5cd17b7f818a
--- /dev/null
+++ b/tools/testing/kunit/test_data/test_is_test_passed-missing_plan.log
@@ -0,0 +1,31 @@
+KTAP version 1
+ # Subtest: sysctl_test
+ # sysctl_test_dointvec_null_tbl_data: sysctl_test_dointvec_null_tbl_data passed
+ ok 1 - sysctl_test_dointvec_null_tbl_data
+ # sysctl_test_dointvec_table_maxlen_unset: sysctl_test_dointvec_table_maxlen_unset passed
+ ok 2 - sysctl_test_dointvec_table_maxlen_unset
+ # sysctl_test_dointvec_table_len_is_zero: sysctl_test_dointvec_table_len_is_zero passed
+ ok 3 - sysctl_test_dointvec_table_len_is_zero
+ # sysctl_test_dointvec_table_read_but_position_set: sysctl_test_dointvec_table_read_but_position_set passed
+ ok 4 - sysctl_test_dointvec_table_read_but_position_set
+ # sysctl_test_dointvec_happy_single_positive: sysctl_test_dointvec_happy_single_positive passed
+ ok 5 - sysctl_test_dointvec_happy_single_positive
+ # sysctl_test_dointvec_happy_single_negative: sysctl_test_dointvec_happy_single_negative passed
+ ok 6 - sysctl_test_dointvec_happy_single_negative
+ # sysctl_test_dointvec_single_less_int_min: sysctl_test_dointvec_single_less_int_min passed
+ ok 7 - sysctl_test_dointvec_single_less_int_min
+ # sysctl_test_dointvec_single_greater_int_max: sysctl_test_dointvec_single_greater_int_max passed
+ ok 8 - sysctl_test_dointvec_single_greater_int_max
+kunit sysctl_test: all tests passed
+ok 1 - sysctl_test
+ # Subtest: example
+ 1..2
+init_suite
+ # example_simple_test: initializing
+ # example_simple_test: example_simple_test passed
+ ok 1 - example_simple_test
+ # example_mock_test: initializing
+ # example_mock_test: example_mock_test passed
+ ok 2 - example_mock_test
+kunit example: all tests passed
+ok 2 - example
diff --git a/tools/testing/kunit/test_data/test_strip_hyphen.log b/tools/testing/kunit/test_data/test_strip_hyphen.log
new file mode 100644
index 000000000000..92ac7c24b374
--- /dev/null
+++ b/tools/testing/kunit/test_data/test_strip_hyphen.log
@@ -0,0 +1,16 @@
+KTAP version 1
+1..2
+ # Subtest: sysctl_test
+ 1..1
+ # sysctl_test_dointvec_null_tbl_data: sysctl_test_dointvec_null_tbl_data passed
+ ok 1 - sysctl_test_dointvec_null_tbl_data
+kunit sysctl_test: all tests passed
+ok 1 - sysctl_test
+ # Subtest: example
+ 1..1
+init_suite
+ # example_simple_test: initializing
+ # example_simple_test: example_simple_test passed
+ ok 1 example_simple_test
+kunit example: all tests passed
+ok 2 example
base-commit: 28a8b037484044d79f3b8d409a076a925ac75f8d
--
2.33.0.882.g93a45727a2-goog
From: Oliver Upton <oupton(a)google.com>
[ Upstream commit 01f91acb55be7aac3950b89c458bcea9ef6e4f49 ]
The SMC64 calling convention passes a function identifier in w0 and its
parameters in x1-x17. Given this, there are two deviations in the
SMC64 call performed by the steal_time test: the function identifier is
assigned to a 64 bit register and the parameter is only 32 bits wide.
Align the call with the SMCCC by using a 32 bit register to handle the
function identifier and increasing the parameter width to 64 bits.
Suggested-by: Andrew Jones <drjones(a)redhat.com>
Signed-off-by: Oliver Upton <oupton(a)google.com>
Reviewed-by: Andrew Jones <drjones(a)redhat.com>
Message-Id: <20210921171121.2148982-3-oupton(a)google.com>
Signed-off-by: Paolo Bonzini <pbonzini(a)redhat.com>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
---
tools/testing/selftests/kvm/steal_time.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/tools/testing/selftests/kvm/steal_time.c b/tools/testing/selftests/kvm/steal_time.c
index fcc840088c91..7daedee3e7ee 100644
--- a/tools/testing/selftests/kvm/steal_time.c
+++ b/tools/testing/selftests/kvm/steal_time.c
@@ -120,12 +120,12 @@ struct st_time {
uint64_t st_time;
};
-static int64_t smccc(uint32_t func, uint32_t arg)
+static int64_t smccc(uint32_t func, uint64_t arg)
{
unsigned long ret;
asm volatile(
- "mov x0, %1\n"
+ "mov w0, %w1\n"
"mov x1, %2\n"
"hvc #0\n"
"mov %0, x0\n"
--
2.33.0
From: Oliver Upton <oupton(a)google.com>
[ Upstream commit 01f91acb55be7aac3950b89c458bcea9ef6e4f49 ]
The SMC64 calling convention passes a function identifier in w0 and its
parameters in x1-x17. Given this, there are two deviations in the
SMC64 call performed by the steal_time test: the function identifier is
assigned to a 64 bit register and the parameter is only 32 bits wide.
Align the call with the SMCCC by using a 32 bit register to handle the
function identifier and increasing the parameter width to 64 bits.
Suggested-by: Andrew Jones <drjones(a)redhat.com>
Signed-off-by: Oliver Upton <oupton(a)google.com>
Reviewed-by: Andrew Jones <drjones(a)redhat.com>
Message-Id: <20210921171121.2148982-3-oupton(a)google.com>
Signed-off-by: Paolo Bonzini <pbonzini(a)redhat.com>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
---
tools/testing/selftests/kvm/steal_time.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/tools/testing/selftests/kvm/steal_time.c b/tools/testing/selftests/kvm/steal_time.c
index ecec30865a74..aafaa8e38b7c 100644
--- a/tools/testing/selftests/kvm/steal_time.c
+++ b/tools/testing/selftests/kvm/steal_time.c
@@ -118,12 +118,12 @@ struct st_time {
uint64_t st_time;
};
-static int64_t smccc(uint32_t func, uint32_t arg)
+static int64_t smccc(uint32_t func, uint64_t arg)
{
unsigned long ret;
asm volatile(
- "mov x0, %1\n"
+ "mov w0, %w1\n"
"mov x1, %2\n"
"hvc #0\n"
"mov %0, x0\n"
--
2.33.0
[root@iaas-rpma gpio]# make
gcc gpio-mockup-cdev.c -o /home/lizhijian/linux/tools/testing/selftests/gpio/gpio-mockup-cdev
gpio-mockup-cdev.c: In function ‘request_line_v2’:
gpio-mockup-cdev.c:24:30: error: storage size of ‘req’ isn’t known
24 | struct gpio_v2_line_request req;
| ^~~
gpio-mockup-cdev.c:32:14: error: ‘GPIO_V2_LINE_FLAG_OUTPUT’ undeclared (first use in this function); did you mean ‘GPIOLINE_FLAG_IS_OUT’?
32 | if (flags & GPIO_V2_LINE_FLAG_OUTPUT) {
| ^~~~~~~~~~~~~~~~~~~~~~~~
gpio-mockup-cdev.c includes <linux/gpio.h> which could be provided by
kernel-headers package, and where it's expected to declare
GPIO_V2_LINE_FLAG_OUTPUT. However distros or developers will not always
install the same kernel-header as we are compiling.
So we can tell compiler to search headers from linux tree simply like others,
such as sched.
CC: Philip Li <philip.li(a)intel.com>
Reported-by: kernel test robot <lkp(a)intel.com>
Signed-off-by: Li Zhijian <lizhijian(a)cn.fujitsu.com>
---
V2: add more details about the fix
---
tools/testing/selftests/gpio/Makefile | 1 +
1 file changed, 1 insertion(+)
diff --git a/tools/testing/selftests/gpio/Makefile b/tools/testing/selftests/gpio/Makefile
index 39f2bbe8dd3d..42ea7d2aa844 100644
--- a/tools/testing/selftests/gpio/Makefile
+++ b/tools/testing/selftests/gpio/Makefile
@@ -3,5 +3,6 @@
TEST_PROGS := gpio-mockup.sh
TEST_FILES := gpio-mockup-sysfs.sh
TEST_GEN_PROGS_EXTENDED := gpio-mockup-cdev
+CFLAGS += -I../../../../usr/include
include ../lib.mk
--
2.31.1
Consider this attempt to run KUnit in QEMU:
$ ./tools/testing/kunit/kunit.py run --arch=x86
Before you'd get this error message:
kunit_kernel.ConfigError: x86 is not a valid arch
After:
kunit_kernel.ConfigError: x86 is not a valid arch, options are ['alpha', 'arm', 'arm64', 'i386', 'powerpc', 'riscv', 's390', 'sparc', 'x86_64']
This should make it a bit easier for people to notice when they make
typos, etc. Currently, one would have to dive into the python code to
figure out what the valid set is.
Signed-off-by: Daniel Latypov <dlatypov(a)google.com>
---
tools/testing/kunit/kunit_kernel.py | 5 +++--
tools/testing/kunit/kunit_tool_test.py | 4 ++++
2 files changed, 7 insertions(+), 2 deletions(-)
diff --git a/tools/testing/kunit/kunit_kernel.py b/tools/testing/kunit/kunit_kernel.py
index 1870e75ff153..a6b3cee3f0d0 100644
--- a/tools/testing/kunit/kunit_kernel.py
+++ b/tools/testing/kunit/kunit_kernel.py
@@ -198,8 +198,9 @@ def get_source_tree_ops(arch: str, cross_compile: Optional[str]) -> LinuxSourceT
return LinuxSourceTreeOperationsUml(cross_compile=cross_compile)
elif os.path.isfile(config_path):
return get_source_tree_ops_from_qemu_config(config_path, cross_compile)[1]
- else:
- raise ConfigError(arch + ' is not a valid arch')
+
+ options = [f[:-3] for f in os.listdir(QEMU_CONFIGS_DIR) if f.endswith('.py')]
+ raise ConfigError(arch + ' is not a valid arch, options are ' + str(sorted(options)))
def get_source_tree_ops_from_qemu_config(config_path: str,
cross_compile: Optional[str]) -> Tuple[
diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py
index cad37a98e599..2ae72f04cbe0 100755
--- a/tools/testing/kunit/kunit_tool_test.py
+++ b/tools/testing/kunit/kunit_tool_test.py
@@ -289,6 +289,10 @@ class LinuxSourceTreeTest(unittest.TestCase):
pass
kunit_kernel.LinuxSourceTree('', kunitconfig_path=dir)
+ def test_invalid_arch(self):
+ with self.assertRaisesRegex(kunit_kernel.ConfigError, 'not a valid arch, options are.*x86_64'):
+ kunit_kernel.LinuxSourceTree('', arch='invalid')
+
# TODO: add more test cases.
base-commit: 865a0a8025ee0b54d1cc74834c57197d184a441e
--
2.33.0.685.g46640cef36-goog
Fix documentation build warnings in <kunit/test.h>:
../include/kunit/test.h:616: warning: Function parameter or member 'flags' not described in 'kunit_kmalloc_array'
../include/kunit/test.h:616: warning: Excess function parameter 'gfp' description in 'kunit_kmalloc_array'
../include/kunit/test.h:661: warning: Function parameter or member 'flags' not described in 'kunit_kcalloc'
../include/kunit/test.h:661: warning: Excess function parameter 'gfp' description in 'kunit_kcalloc'
Fixes: 0a756853586c ("kunit: test: add test resource management API")
Fixes: 7122debb4367 ("kunit: introduce kunit_kmalloc_array/kunit_kcalloc() helpers")
Signed-off-by: Randy Dunlap <rdunlap(a)infradead.org>
Cc: Brendan Higgins <brendanhiggins(a)google.com>
Cc: linux-kselftest(a)vger.kernel.org
Cc: kunit-dev(a)googlegroups.com
Cc: Daniel Latypov <dlatypov(a)google.com>
Cc: Shuah Khan <skhan(a)linuxfoundation.org>
---
include/kunit/test.h | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
--- lnx-515-rc4.orig/include/kunit/test.h
+++ lnx-515-rc4/include/kunit/test.h
@@ -607,7 +607,7 @@ void kunit_remove_resource(struct kunit
* @test: The test context object.
* @n: number of elements.
* @size: The size in bytes of the desired memory.
- * @gfp: flags passed to underlying kmalloc().
+ * @flags: gfp flags passed to underlying kmalloc().
*
* Just like `kmalloc_array(...)`, except the allocation is managed by the test case
* and is automatically cleaned up after the test case concludes. See &struct
@@ -653,7 +653,7 @@ static inline void *kunit_kzalloc(struct
* @test: The test context object.
* @n: number of elements.
* @size: The size in bytes of the desired memory.
- * @gfp: flags passed to underlying kmalloc().
+ * @flags: gfp flags passed to underlying kmalloc().
*
* See kcalloc() and kunit_kmalloc_array() for more information.
*/
From: Rae Moar <rmoar(a)google.com>
Update to kunit_parser to improve compatibility with KTAP
specification including arbitrarily nested tests. Patch accomplishes
three major changes:
- Use a general Test object to represent all tests rather than TestCase
and TestSuite objects. This allows for easier implementation of arbitrary
levels of nested tests and promotes the idea that both test suites and test
cases are tests.
- Print errors incrementally rather than all at once after the
parsing finishes to maximize information given to the user in the
case of the parser given invalid input and to increase the helpfulness
of the timestamps given during printing. Note that kunit.py parse does
not print incrementally yet. However, this fix brings us closer to
this feature.
- Increase compatibility for different formats of input. Arbitrary levels
of nested tests supported. Also, test cases and test suites are now
supported to be present on the same level of testing.
This patch now implements the KTAP specification as described here:
https://lore.kernel.org/linux-kselftest/CA+GJov6tdjvY9x12JsJT14qn6c7NViJxqa….
This patch adjusts the kunit_tool_test.py file to check for
the correct outputs from the new parser and adds a new test to check
the parsing for a KTAP result log with correct format for multiple nested
subtests (test_is_test_passed-all_passed_nested.log).
This patch also alters the kunit_json.py file to allow for arbitrarily
nested tests.
Signed-off-by: Rae Moar <rmoar(a)google.com>
Reviewed-by: Brendan Higgins <brendanhiggins(a)google.com>
Signed-off-by: Daniel Latypov <dlatypov(a)google.com>
---
NOTE: this patch is now applied on top of
https://lore.kernel.org/linux-kselftest/20210930222048.1692635-5-dlatypov@g…
to resolve a conflict.
Change log from v3:
https://lore.kernel.org/linux-kselftest/20210901190623.315736-1-rmoar@googl…
- Resolve conflict with hermetic testing patches
- Fix pytype error (str -> Optional[str] in kunit_json.py)
Change log from v2:
https://lore.kernel.org/linux-kselftest/20210826195505.3066755-1-rmoar@goog…
- Fixes bug of type disagreement in kunit_json.py for build_dir
- Removes raw_output()
- Changes docstrings in kunit_parser.py (class docstring, LineStream
docstrings, add_error(), total(), get_status(), all parsing methods)
- Fixes bug of not printing diagnostic log in the case of end of lines
- Sets default status of all tests to TEST_CRASHED
- Adds and prints empty tests with crashed status in case of missing
tests
- Prints 'subtest' in instance of 1 subtest instead of 'subtests'
- Includes checking for 'BUG:' message in search of crash messages in
log (note that parse_crash_in_log method could be removed but would
require deleting tests in kunit_tool_test.py that include the crash
message that is no longer used. If removed, parser would still print
log in cases of test crashed or failure, which would now include
missing subtests)
- Fixes bug of including directives (other than SKIP) in test name
when matching name in result line for subtests
Change log from v1:
https://lore.kernel.org/linux-kselftest/20210820200032.2178134-1-rmoar@goog…
- Rebase onto kselftest/kunit branch
- Add tests to kunit_tool_test.py to check parser is correctly stripping
hyphen, producing correct json objects with nested tests, correctly
passing kselftest TAP output, and correctly deals with missing test plan.
- Fix bug to correctly match test name in instance of a missing test plan.
- Fix bug in kunit_tool_test.py pointed out by Daniel where it was not
correctly checking for a proper match to the '0 tests run!' error
message. Reverts changes back to original.
- A few minor changes to commit message using Daniel's comments.
- Change docstrings using Daniel's comments to reduce:
- Shortens some docstrings to be one-line or just description if it is
self explanatory.
- Remove explicit respecification of types of parameters and returns
because this is already specified in the function annoations. However,
some descriptions of the parameters and returns remain and some contain
the type for context. Additionally, the types of public attributes of
classes remain.
- Remove any documentation of 'Return: None'
- Remove docstrings of helper methods within other methods
---
tools/testing/kunit/kunit.py | 6 +-
tools/testing/kunit/kunit_json.py | 55 +-
tools/testing/kunit/kunit_parser.py | 1056 ++++++++++++-----
tools/testing/kunit/kunit_tool_test.py | 134 ++-
.../test_is_test_passed-all_passed_nested.log | 34 +
.../test_is_test_passed-kselftest.log | 14 +
.../test_is_test_passed-missing_plan.log | 31 +
.../kunit/test_data/test_strip_hyphen.log | 16 +
8 files changed, 954 insertions(+), 392 deletions(-)
create mode 100644 tools/testing/kunit/test_data/test_is_test_passed-all_passed_nested.log
create mode 100644 tools/testing/kunit/test_data/test_is_test_passed-kselftest.log
create mode 100644 tools/testing/kunit/test_data/test_is_test_passed-missing_plan.log
create mode 100644 tools/testing/kunit/test_data/test_strip_hyphen.log
diff --git a/tools/testing/kunit/kunit.py b/tools/testing/kunit/kunit.py
index 8c7e8c7b2c97..9e2a96e45e3b 100755
--- a/tools/testing/kunit/kunit.py
+++ b/tools/testing/kunit/kunit.py
@@ -136,7 +136,7 @@ def exec_tests(linux: kunit_kernel.LinuxSourceTree, request: KunitExecRequest,
test_glob = request.filter_glob.split('.', maxsplit=2)[1]
filter_globs = [g + '.'+ test_glob for g in filter_globs]
- overall_status = kunit_parser.TestStatus.SUCCESS
+ test_counts = kunit_parser.TestCounts()
exec_time = 0.0
for i, filter_glob in enumerate(filter_globs):
kunit_parser.print_with_timestamp('Starting KUnit Kernel ({}/{})...'.format(i+1, len(filter_globs)))
@@ -155,9 +155,9 @@ def exec_tests(linux: kunit_kernel.LinuxSourceTree, request: KunitExecRequest,
test_end = time.time()
exec_time += test_end - test_start
- overall_status = kunit_parser.max_status(overall_status, result.status)
+ test_counts.add_subtest_counts(result.result.test.counts)
- return KunitResult(status=result.status, result=result.result, elapsed_time=exec_time)
+ return KunitResult(status=test_counts.get_status(), result=result.result, elapsed_time=exec_time)
def parse_tests(request: KunitParseRequest, input_data: Iterable[str]) -> KunitResult:
parse_start = time.time()
diff --git a/tools/testing/kunit/kunit_json.py b/tools/testing/kunit/kunit_json.py
index f5cca5c38cac..d01581d068ac 100644
--- a/tools/testing/kunit/kunit_json.py
+++ b/tools/testing/kunit/kunit_json.py
@@ -11,47 +11,46 @@ import os
import kunit_parser
-from kunit_parser import TestStatus
-
-def get_json_result(test_result, def_config, build_dir, json_path) -> str:
- sub_groups = []
-
- # Each test suite is mapped to a KernelCI sub_group
- for test_suite in test_result.suites:
- sub_group = {
- "name": test_suite.name,
- "arch": "UM",
- "defconfig": def_config,
- "build_environment": build_dir,
- "test_cases": [],
- "lab_name": None,
- "kernel": None,
- "job": None,
- "git_branch": "kselftest",
- }
- test_cases = []
- # TODO: Add attachments attribute in test_case with detailed
- # failure message, see https://api.kernelci.org/schema-test-case.html#get
- for case in test_suite.cases:
- test_case = {"name": case.name, "status": "FAIL"}
- if case.status == TestStatus.SUCCESS:
+from kunit_parser import Test, TestResult, TestStatus
+from typing import Any, Dict, Optional
+
+JsonObj = Dict[str, Any]
+
+def _get_group_json(test: Test, def_config: str, build_dir: Optional[str]) -> JsonObj:
+ sub_groups = [] # List[JsonObj]
+ test_cases = [] # List[JsonObj]
+
+ for subtest in test.subtests:
+ if len(subtest.subtests):
+ sub_group = _get_group_json(subtest, def_config,
+ build_dir)
+ sub_groups.append(sub_group)
+ else:
+ test_case = {"name": subtest.name, "status": "FAIL"}
+ if subtest.status == TestStatus.SUCCESS:
test_case["status"] = "PASS"
- elif case.status == TestStatus.TEST_CRASHED:
+ elif subtest.status == TestStatus.TEST_CRASHED:
test_case["status"] = "ERROR"
test_cases.append(test_case)
- sub_group["test_cases"] = test_cases
- sub_groups.append(sub_group)
+
test_group = {
- "name": "KUnit Test Group",
+ "name": test.name,
"arch": "UM",
"defconfig": def_config,
"build_environment": build_dir,
"sub_groups": sub_groups,
+ "test_cases": test_cases,
"lab_name": None,
"kernel": None,
"job": None,
"git_branch": "kselftest",
}
+ return test_group
+
+def get_json_result(test_result: TestResult, def_config: str, build_dir: Optional[str],
+ json_path: str) -> str:
+ test_group = _get_group_json(test_result.test, def_config, build_dir)
+ test_group["name"] = "KUnit Test Group"
json_obj = json.dumps(test_group, indent=4)
if json_path != 'stdout':
with open(json_path, 'w') as result_path:
diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py
index 6310a641b151..4b6086159c7f 100644
--- a/tools/testing/kunit/kunit_parser.py
+++ b/tools/testing/kunit/kunit_parser.py
@@ -1,11 +1,15 @@
# SPDX-License-Identifier: GPL-2.0
#
-# Parses test results from a kernel dmesg log.
+# Parses KTAP test results from a kernel dmesg log and incrementally prints
+# results with reader-friendly format. Stores and returns test results in a
+# Test object.
#
# Copyright (C) 2019, Google LLC.
# Author: Felix Guo <felixguoxiuping(a)gmail.com>
# Author: Brendan Higgins <brendanhiggins(a)google.com>
+# Author: Rae Moar <rmoar(a)google.com>
+from __future__ import annotations
import re
from collections import namedtuple
@@ -14,33 +18,55 @@ from enum import Enum, auto
from functools import reduce
from typing import Iterable, Iterator, List, Optional, Tuple
-TestResult = namedtuple('TestResult', ['status','suites','log'])
-
-class TestSuite(object):
- def __init__(self) -> None:
- self.status = TestStatus.SUCCESS
- self.name = ''
- self.cases = [] # type: List[TestCase]
-
- def __str__(self) -> str:
- return 'TestSuite(' + str(self.status) + ',' + self.name + ',' + str(self.cases) + ')'
-
- def __repr__(self) -> str:
- return str(self)
-
-class TestCase(object):
+TestResult = namedtuple('TestResult', ['status','test','log'])
+
+class Test(object):
+ """
+ A class to represent a test parsed from KTAP results. All KTAP
+ results within a test log are stored in a main Test object as
+ subtests.
+
+ Attributes:
+ status : TestStatus - status of the test
+ name : str - name of the test
+ expected_count : int - expected number of subtests (0 if single
+ test case and None if unknown expected number of subtests)
+ subtests : List[Test] - list of subtests
+ log : List[str] - log of KTAP lines that correspond to the test
+ counts : TestCounts - counts of the test statuses and errors of
+ subtests or of the test itself if the test is a single
+ test case.
+ """
def __init__(self) -> None:
+ """Constructs the default attributes of a Test class object.
+ """
self.status = TestStatus.SUCCESS
self.name = ''
+ self.expected_count = 0 # type: Optional[int]
+ self.subtests = [] # type: List[Test]
self.log = [] # type: List[str]
+ self.counts = TestCounts()
def __str__(self) -> str:
- return 'TestCase(' + str(self.status) + ',' + self.name + ',' + str(self.log) + ')'
+ """Returns string representation of a Test class object."""
+ return ('Test(' + str(self.status) + ', ' + self.name +
+ ', ' + str(self.expected_count) + ', ' +
+ str(self.subtests) + ', ' + str(self.log) + ', ' +
+ str(self.counts) + ')')
def __repr__(self) -> str:
+ """Returns string representation of a Test class object."""
return str(self)
+ def add_error(self, error_message: str) -> None:
+ """Adds error to test object by incrementing the error count
+ and printing the error message.
+ """
+ self.counts.errors += 1
+ print_error('Test ' + self.name + ': ' + error_message)
+
class TestStatus(Enum):
+ """An enumeration class to represent the status of a test."""
SUCCESS = auto()
FAILURE = auto()
SKIPPED = auto()
@@ -48,381 +74,769 @@ class TestStatus(Enum):
NO_TESTS = auto()
FAILURE_TO_PARSE_TESTS = auto()
+class TestCounts:
+ """
+ A class to represent the counts of statuses and test errors of
+ subtests or of the test itself if the test is a single test case with
+ no subtests. Note that the sum of the counts of passed, failed,
+ crashed, and skipped should sum to the total number of subtests for
+ the test.
+
+ Attributes:
+ passed : int - the number of tests that have passed
+ failed : int - the number of tests that have failed
+ crashed : int - the number of tests that have crashed
+ skipped : int - the number of tests that have skipped
+ errors : int - the number of errors in the test and subtests
+ """
+ def __init__(self):
+ """Contructs the default attributes of a TestCounts class
+ object. Sets the counts of all test statuses and test
+ errors to be 0.
+ """
+ self.passed = 0
+ self.failed = 0
+ self.crashed = 0
+ self.skipped = 0
+ self.errors = 0
+
+ def __str__(self) -> str:
+ """Returns the string representation of a TestCounts object.
+ """
+ return ('Passed: ' + str(self.passed) +
+ ', Failed: ' + str(self.failed) +
+ ', Crashed: ' + str(self.crashed) +
+ ', Skipped: ' + str(self.skipped) +
+ ', Errors: ' + str(self.errors))
+
+ def total(self) -> int:
+ """Returns total number of subtests or 1 if the test object
+ has no subtests to represent the test itself. This number is
+ calculated by the sum of the passed, failed, crashed, and
+ skipped subtests.
+ """
+ return (self.passed + self.failed + self.crashed +
+ self.skipped)
+
+ def add_subtest_counts(self, counts: TestCounts) -> None:
+ """
+ Adds the counts of another TestCounts object to the current
+ TestCounts object. Used to add the counts of a subtest to the
+ parent test.
+
+ Parameters:
+ counts - a different TestCounts object whose counts
+ will be added to the counts of the TestCounts object
+ """
+ self.passed += counts.passed
+ self.failed += counts.failed
+ self.crashed += counts.crashed
+ self.skipped += counts.skipped
+ self.errors += counts.errors
+
+ def get_status(self) -> TestStatus:
+ """Returns the expected status of a Test using test counts."""
+ if self.crashed:
+ # If one of the subtests crash, the expected status
+ # of the Test is crashed.
+ return TestStatus.TEST_CRASHED
+ elif self.failed:
+ # Otherwise if one of the subtests fail, the
+ # expected status of the Test is failed.
+ return TestStatus.FAILURE
+ elif self.passed:
+ # Otherwise if one of the subtests pass, the
+ # expected status of the Test is passed.
+ return TestStatus.SUCCESS
+ else:
+ # Finally, if none of the subtests have failed,
+ # crashed, or passed, the expected status of the
+ # Test is skipped.
+ return TestStatus.SKIPPED
+
+ def add_status(self, status: TestStatus) -> None:
+ """
+ Given inputted status, increments corresponding attribute of
+ TestCounts object.
+
+ Parameters:
+ status - status to be added to the TestCounts object
+ """
+ if status == TestStatus.SUCCESS or \
+ status == TestStatus.NO_TESTS:
+ # if status is NO_TESTS the most appropriate
+ # attribute to increment is passed because
+ # the test did not fail, crash or get skipped.
+ self.passed += 1
+ elif status == TestStatus.FAILURE:
+ self.failed += 1
+ elif status == TestStatus.SKIPPED:
+ self.skipped += 1
+ else:
+ self.crashed += 1
+
class LineStream:
- """Provides a peek()/pop() interface over an iterator of (line#, text)."""
+ """
+ A class to represent the lines of kernel output.
+ Provides a peek()/pop() interface over an iterator of
+ (line#, text).
+ """
_lines: Iterator[Tuple[int, str]]
_next: Tuple[int, str]
_done: bool
def __init__(self, lines: Iterator[Tuple[int, str]]):
+ """Set defaults for LineStream object and sets _lines
+ attribute to lines parameter.
+ """
self._lines = lines
self._done = False
self._next = (0, '')
self._get_next()
def _get_next(self) -> None:
+ """Advances the LineSteam to the next line or sets the _done
+ attribute if the LineStream has reached the end of the lines.
+ """
try:
self._next = next(self._lines)
except StopIteration:
self._done = True
def peek(self) -> str:
+ """Returns the next line in the LineStream without advancing
+ the LineStream.
+ """
return self._next[1]
def pop(self) -> str:
+ """Returns the next line in the LineStream and advances the
+ LineStream to the next line.
+ """
n = self._next
self._get_next()
return n[1]
def __bool__(self) -> bool:
+ """Returns whether the LineStream has reached the end of the
+ lines.
+ """
return not self._done
# Only used by kunit_tool_test.py.
def __iter__(self) -> Iterator[str]:
+ """Empties all lines stored in LineStream object into
+ Iterator object and returns the Iterator object.
+ """
while bool(self):
yield self.pop()
def line_number(self) -> int:
+ """Returns the line number of the next line in the
+ LineStream.
+ """
return self._next[0]
-kunit_start_re = re.compile(r'TAP version [0-9]+$')
-kunit_end_re = re.compile('(List of all partitions:|'
- 'Kernel panic - not syncing: VFS:|reboot: System halted)')
+# Parsing helper methods:
+
+KTAP_START = re.compile(r'KTAP version ([0-9]+)$')
+TAP_START = re.compile(r'TAP version ([0-9]+)$')
+KTAP_END = re.compile('(List of all partitions:|'
+ 'Kernel panic - not syncing: VFS:|reboot: System halted)')
def extract_tap_lines(kernel_output: Iterable[str]) -> LineStream:
- def isolate_kunit_output(kernel_output: Iterable[str]) -> Iterator[Tuple[int, str]]:
+ """Extracts KTAP lines from inputted kernel output in LineStream
+ object."""
+ def isolate_ktap_output(kernel_output: Iterable[str]) \
+ -> Iterator[Tuple[int, str]]:
line_num = 0
started = False
for line in kernel_output:
line_num += 1
- line = line.rstrip() # line always has a trailing \n
- if kunit_start_re.search(line):
+ line = line.rstrip() # remove trailing \n
+ if not started and KTAP_START.search(line):
+ # start extracting KTAP lines and set prefix
+ # to number of characters before version line
+ prefix_len = len(
+ line.split('KTAP version')[0])
+ started = True
+ yield line_num, line[prefix_len:]
+ elif not started and TAP_START.search(line):
+ # start extracting KTAP lines and set prefix
+ # to number of characters before version line
prefix_len = len(line.split('TAP version')[0])
started = True
yield line_num, line[prefix_len:]
- elif kunit_end_re.search(line):
+ elif started and KTAP_END.search(line):
+ # stop extracting KTAP lines
break
elif started:
- yield line_num, line[prefix_len:]
- return LineStream(lines=isolate_kunit_output(kernel_output))
-
-DIVIDER = '=' * 60
-
-RESET = '\033[0;0m'
-
-def red(text) -> str:
- return '\033[1;31m' + text + RESET
-
-def yellow(text) -> str:
- return '\033[1;33m' + text + RESET
-
-def green(text) -> str:
- return '\033[1;32m' + text + RESET
-
-def print_with_timestamp(message) -> None:
- print('[%s] %s' % (datetime.now().strftime('%H:%M:%S'), message))
-
-def format_suite_divider(message) -> str:
- return '======== ' + message + ' ========'
-
-def print_suite_divider(message) -> None:
- print_with_timestamp(DIVIDER)
- print_with_timestamp(format_suite_divider(message))
-
-def print_log(log) -> None:
- for m in log:
- print_with_timestamp(m)
-
-TAP_ENTRIES = re.compile(r'^(TAP|[\s]*ok|[\s]*not ok|[\s]*[0-9]+\.\.[0-9]+|[\s]*# (Subtest:|.*: kunit test case crashed!)).*$')
-
-def consume_non_diagnostic(lines: LineStream) -> None:
- while lines and not TAP_ENTRIES.match(lines.peek()):
- lines.pop()
-
-def save_non_diagnostic(lines: LineStream, test_case: TestCase) -> None:
- while lines and not TAP_ENTRIES.match(lines.peek()):
- test_case.log.append(lines.peek())
- lines.pop()
-
-OkNotOkResult = namedtuple('OkNotOkResult', ['is_ok','description', 'text'])
-
-OK_NOT_OK_SKIP = re.compile(r'^[\s]*(ok|not ok) [0-9]+ - (.*) # SKIP(.*)$')
-
-OK_NOT_OK_SUBTEST = re.compile(r'^[\s]+(ok|not ok) [0-9]+ - (.*)$')
-
-OK_NOT_OK_MODULE = re.compile(r'^(ok|not ok) ([0-9]+) - (.*)$')
-
-def parse_ok_not_ok_test_case(lines: LineStream, test_case: TestCase) -> bool:
- save_non_diagnostic(lines, test_case)
- if not lines:
- test_case.status = TestStatus.TEST_CRASHED
- return True
- line = lines.peek()
- match = OK_NOT_OK_SUBTEST.match(line)
- while not match and lines:
- line = lines.pop()
- match = OK_NOT_OK_SUBTEST.match(line)
- if match:
- test_case.log.append(lines.pop())
- test_case.name = match.group(2)
- skip_match = OK_NOT_OK_SKIP.match(line)
- if skip_match:
- test_case.status = TestStatus.SKIPPED
- return True
- if test_case.status == TestStatus.TEST_CRASHED:
- return True
- if match.group(1) == 'ok':
- test_case.status = TestStatus.SUCCESS
- else:
- test_case.status = TestStatus.FAILURE
- return True
+ # remove prefix and any indention and yield
+ # line with line number
+ line = line[prefix_len:].lstrip()
+ yield line_num, line
+ return LineStream(lines=isolate_ktap_output(kernel_output))
+
+def raw_output(kernel_output: Iterable[str]) -> None:
+ """Prints all lines of kernel output."""
+ for line in kernel_output:
+ print(line.rstrip())
+
+KTAP_VERSIONS = [1]
+TAP_VERSIONS = [13, 14]
+
+def check_version(version_num: int, accepted_versions: List[int],
+ version_type: str, test: Test) -> None:
+ """
+ Adds error to test object if version number is too high or too
+ low.
+
+ Parameters:
+ version_num - The inputted version number from the parsed KTAP or TAP
+ header line
+ accepted_version - List of accepted KTAP or TAP versions
+ version_type - 'KTAP' or 'TAP' depending on the type of
+ version line.
+ test - Test object for current test being parsed
+ """
+ if version_num < min(accepted_versions):
+ test.add_error(version_type +
+ ' version lower than expected!')
+ elif version_num > max(accepted_versions):
+ test.add_error(
+ version_type + ' version higher than expected!')
+
+def parse_ktap_header(lines: LineStream, test: Test) -> bool:
+ """
+ If the next line in LineStream matches the format of KTAP or TAP
+ header line, the version number is checked, the line is popped,
+ and returns True. Otherwise the method returns False.
+
+ Accepted formats:
+ - 'KTAP version [version number]'
+ - 'TAP version [version number]'
+
+ Parameters:
+ lines - LineStream of KTAP output to parse
+ test - Test object for current test being parsed
+
+ Return:
+ Boolean that represents if the next line in the LineStream was parsed
+ as the KTAP or TAP header line
+ """
+ ktap_match = KTAP_START.match(lines.peek())
+ tap_match = TAP_START.match(lines.peek())
+ if ktap_match:
+ version_num = int(ktap_match.group(1))
+ check_version(version_num, KTAP_VERSIONS, 'KTAP', test)
+ elif tap_match:
+ version_num = int(tap_match.group(1))
+ check_version(version_num, TAP_VERSIONS, 'TAP', test)
else:
return False
-
-SUBTEST_DIAGNOSTIC = re.compile(r'^[\s]+# (.*)$')
-DIAGNOSTIC_CRASH_MESSAGE = re.compile(r'^[\s]+# .*?: kunit test case crashed!$')
-
-def parse_diagnostic(lines: LineStream, test_case: TestCase) -> bool:
- save_non_diagnostic(lines, test_case)
- if not lines:
+ test.log.append(lines.pop())
+ return True
+
+TEST_HEADER = re.compile(r'^# Subtest: (.*)$')
+
+def parse_test_header(lines: LineStream, test: Test) -> bool:
+ """
+ If the next line in LineStream matches the format of a test
+ header line, the name of test is set, the line is popped,
+ and returns True. Otherwise the method returns False.
+
+ Accepted format:
+ - '# Subtest: [test name]'
+
+ Parameters:
+ lines - LineStream of ktap output to parse
+ test - Test object for current test being parsed
+
+ Return:
+ Boolean that represents if the next line in the LineStream was parsed
+ as a test header
+ """
+ match = TEST_HEADER.match(lines.peek())
+ if not match:
return False
+ test.log.append(lines.pop())
+ test.name = match.group(1)
+ return True
+
+TEST_PLAN = re.compile(r'1\.\.([0-9]+)')
+
+def parse_test_plan(lines: LineStream, test: Test) -> bool:
+ """
+ If the next line in LineStream matches the format of a test
+ plan line, the expected number of subtests is set in test object, an
+ error is thrown if there are 0 tests, the line is popped,
+ and returns True. Otherwise the method adds an error that the test
+ plan is missing to the test object and returns False.
+
+ Accepted format:
+ - '1..[number of subtests]'
+
+ Parameters:
+ lines - LineStream of ktap output to parse
+ test - Test object for current test being parsed
+
+ Return:
+ Boolean that represents if the next line in the LineStream was parsed
+ as a test plan
+ """
+ match = TEST_PLAN.match(lines.peek())
+ if not match:
+ test.expected_count = None
+ test.add_error('missing plan line!')
+ return False
+ test.log.append(lines.pop())
+ expected_count = int(match.group(1))
+ test.expected_count = expected_count
+ if expected_count == 0:
+ test.status = TestStatus.NO_TESTS
+ test.add_error('0 tests run!')
+ return True
+
+TEST_RESULT = re.compile(r'^(ok|not ok) ([0-9]+) (- )?(.*)$')
+
+TEST_RESULT_SKIP = re.compile(r'^(ok|not ok) ([0-9]+) (- )?(.*) # SKIP(.*)$')
+
+def peek_test_name_match(lines: LineStream, test: Test) -> bool:
+ """
+ If the next line in LineStream matches the format of a test
+ result line and the name of the result line matches the name of the
+ current test, the method returns True. Otherwise it returns False.
+
+ Accepted format:
+ - '[ok|not ok] [test number] [-] [test name] [optional skip
+ directive]'
+
+ Parameters:
+ lines - LineStream of KTAP output to parse
+ test - Test object for current test being parsed
+
+ Return:
+ Boolean that represents if the next line in the LineStream matched a
+ test result line and the name matched the expected test name
+ """
line = lines.peek()
- match = SUBTEST_DIAGNOSTIC.match(line)
- if match:
- test_case.log.append(lines.pop())
- crash_match = DIAGNOSTIC_CRASH_MESSAGE.match(line)
- if crash_match:
- test_case.status = TestStatus.TEST_CRASHED
- return True
- else:
+ match = TEST_RESULT.match(line)
+ if not match:
return False
+ name = match.group(4)
+ return (name == test.name)
+
+def parse_test_result(lines: LineStream, test: Test,
+ expected_num: int) -> bool:
+ """
+ If the next line in LineStream matches the format of a test
+ result line, the status in the result line is added to the test
+ object, the test number is checked to match the expected test number
+ and if not an error is added to the test object, and returns True.
+ Otherwise it returns False.
+
+ Note that the skip diirective is the only
+ directive that causes a change in status and otherwise the directive
+ is included in the name of the test.
+
+ Accepted format:
+ - '[ok|not ok] [test number] [-] [test name] [optional skip
+ directive]'
+
+ Parameters:
+ lines - LineStream of KTAP output to parse
+ test - Test object for current test being parsed
+ expected_num - expected test number for current test
+
+ Return:
+ Boolean that represents if the next line in the LineStream was parsed
+ as a test result line.
+ """
+ line = lines.peek()
+ match = TEST_RESULT.match(line)
+ skip_match = TEST_RESULT_SKIP.match(line)
-def parse_test_case(lines: LineStream) -> Optional[TestCase]:
- test_case = TestCase()
- save_non_diagnostic(lines, test_case)
- while parse_diagnostic(lines, test_case):
- pass
- if parse_ok_not_ok_test_case(lines, test_case):
- return test_case
- else:
- return None
-
-SUBTEST_HEADER = re.compile(r'^[\s]+# Subtest: (.*)$')
+ # Check if line matches test result line format
+ if not match:
+ return False
+ test.log.append(lines.pop())
-def parse_subtest_header(lines: LineStream) -> Optional[str]:
- consume_non_diagnostic(lines)
- if not lines:
- return None
- match = SUBTEST_HEADER.match(lines.peek())
- if match:
- lines.pop()
- return match.group(1)
+ # Set name of test object
+ if skip_match:
+ test.name = skip_match.group(4)
else:
- return None
+ test.name = match.group(4)
-SUBTEST_PLAN = re.compile(r'[\s]+[0-9]+\.\.([0-9]+)')
+ # Check test num
+ num = int(match.group(2))
+ if num != expected_num:
+ test.add_error('Expected test number ' +
+ str(expected_num) + ' but found ' + str(num))
-def parse_subtest_plan(lines: LineStream) -> Optional[int]:
- consume_non_diagnostic(lines)
- match = SUBTEST_PLAN.match(lines.peek())
- if match:
- lines.pop()
- return int(match.group(1))
- else:
- return None
-
-def max_status(left: TestStatus, right: TestStatus) -> TestStatus:
- if left == right:
- return left
- elif left == TestStatus.TEST_CRASHED or right == TestStatus.TEST_CRASHED:
- return TestStatus.TEST_CRASHED
- elif left == TestStatus.FAILURE or right == TestStatus.FAILURE:
- return TestStatus.FAILURE
- elif left == TestStatus.SKIPPED:
- return right
- else:
- return left
-
-def parse_ok_not_ok_test_suite(lines: LineStream,
- test_suite: TestSuite,
- expected_suite_index: int) -> bool:
- consume_non_diagnostic(lines)
- if not lines:
- test_suite.status = TestStatus.TEST_CRASHED
- return False
- line = lines.peek()
- match = OK_NOT_OK_MODULE.match(line)
- if match:
- lines.pop()
- if match.group(1) == 'ok':
- test_suite.status = TestStatus.SUCCESS
- else:
- test_suite.status = TestStatus.FAILURE
- skip_match = OK_NOT_OK_SKIP.match(line)
- if skip_match:
- test_suite.status = TestStatus.SKIPPED
- suite_index = int(match.group(2))
- if suite_index != expected_suite_index:
- print_with_timestamp(
- red('[ERROR] ') + 'expected_suite_index ' +
- str(expected_suite_index) + ', but got ' +
- str(suite_index))
+ # Set status of test object
+ status = match.group(1)
+ if test.status == TestStatus.TEST_CRASHED:
return True
+ elif skip_match:
+ test.status = TestStatus.SKIPPED
+ elif status == 'ok':
+ test.status = TestStatus.SUCCESS
else:
- return False
-
-def bubble_up_errors(status_list: Iterable[TestStatus]) -> TestStatus:
- return reduce(max_status, status_list, TestStatus.SKIPPED)
+ test.status = TestStatus.FAILURE
+ return True
+
+def parse_diagnostic(lines: LineStream) -> List[str]:
+ """
+ If the next line in LineStream does not match the format of a test
+ case line or test header line, the line is checked if the test has
+ crashed and if so adds an error message, pops the line and adds it to
+ the log.
+
+ Line formats that are not parsed:
+ - '# Subtest: [test name]'
+ - '[ok|not ok] [test number] [-] [test name] [optional skip
+ directive]'
+
+ Parameters:
+ lines - LineStream of KTAP output to parse
+
+ Return:
+ Log of diagnostic lines
+ """
+ log = [] # type: List[str]
+ while lines and not TEST_RESULT.match(lines.peek()) and not \
+ TEST_HEADER.match(lines.peek()):
+ log.append(lines.pop())
+ return log
+
+DIAGNOSTIC_CRASH_MESSAGE = re.compile(r'^# .*?: kunit test case crashed!$')
+
+def parse_crash_in_log(test: Test) -> bool:
+ """
+ Iterate through the lines of the log to parse for crash message.
+ If crash message found, set status to crashed and return True.
+ Otherwise return False.
+
+ Parameters:
+ test - Test object for current test being parsed
+
+ Return:
+ Boolean that represents if crash message found in log
+ """
+ for line in test.log:
+ if DIAGNOSTIC_CRASH_MESSAGE.match(line):
+ test.status = TestStatus.TEST_CRASHED
+ return True
+ return False
-def bubble_up_test_case_errors(test_suite: TestSuite) -> TestStatus:
- max_test_case_status = bubble_up_errors(x.status for x in test_suite.cases)
- return max_status(max_test_case_status, test_suite.status)
+# Printing helper methods:
-def parse_test_suite(lines: LineStream, expected_suite_index: int) -> Optional[TestSuite]:
- if not lines:
- return None
- consume_non_diagnostic(lines)
- test_suite = TestSuite()
- test_suite.status = TestStatus.SUCCESS
- name = parse_subtest_header(lines)
- if not name:
- return None
- test_suite.name = name
- expected_test_case_num = parse_subtest_plan(lines)
- if expected_test_case_num is None:
- return None
- while expected_test_case_num > 0:
- test_case = parse_test_case(lines)
- if not test_case:
- break
- test_suite.cases.append(test_case)
- expected_test_case_num -= 1
- if parse_ok_not_ok_test_suite(lines, test_suite, expected_suite_index):
- test_suite.status = bubble_up_test_case_errors(test_suite)
- return test_suite
- elif not lines:
- print_with_timestamp(red('[ERROR] ') + 'ran out of lines before end token')
- return test_suite
- else:
- print(f'failed to parse end of suite "{name}", at line {lines.line_number()}: {lines.peek()}')
- return None
+DIVIDER = '=' * 60
-TAP_HEADER = re.compile(r'^TAP version 14$')
+RESET = '\033[0;0m'
-def parse_tap_header(lines: LineStream) -> bool:
- consume_non_diagnostic(lines)
- if TAP_HEADER.match(lines.peek()):
- lines.pop()
- return True
- else:
- return False
+def red(text: str) -> str:
+ """Returns inputted string with red color code."""
+ return '\033[1;31m' + text + RESET
-TEST_PLAN = re.compile(r'[0-9]+\.\.([0-9]+)')
+def yellow(text: str) -> str:
+ """Returns inputted string with yellow color code."""
+ return '\033[1;33m' + text + RESET
-def parse_test_plan(lines: LineStream) -> Optional[int]:
- consume_non_diagnostic(lines)
- match = TEST_PLAN.match(lines.peek())
- if match:
- lines.pop()
- return int(match.group(1))
- else:
- return None
-
-def bubble_up_suite_errors(test_suites: Iterable[TestSuite]) -> TestStatus:
- return bubble_up_errors(x.status for x in test_suites)
-
-def parse_test_result(lines: LineStream) -> TestResult:
- consume_non_diagnostic(lines)
- if not lines or not parse_tap_header(lines):
- return TestResult(TestStatus.FAILURE_TO_PARSE_TESTS, [], lines)
- expected_test_suite_num = parse_test_plan(lines)
- if expected_test_suite_num == 0:
- return TestResult(TestStatus.NO_TESTS, [], lines)
- elif expected_test_suite_num is None:
- return TestResult(TestStatus.FAILURE_TO_PARSE_TESTS, [], lines)
- test_suites = []
- for i in range(1, expected_test_suite_num + 1):
- test_suite = parse_test_suite(lines, i)
- if test_suite:
- test_suites.append(test_suite)
- else:
- print_with_timestamp(
- red('[ERROR] ') + ' expected ' +
- str(expected_test_suite_num) +
- ' test suites, but got ' + str(i - 2))
- break
- test_suite = parse_test_suite(lines, -1)
- if test_suite:
- print_with_timestamp(red('[ERROR] ') +
- 'got unexpected test suite: ' + test_suite.name)
- if test_suites:
- return TestResult(bubble_up_suite_errors(test_suites), test_suites, lines)
- else:
- return TestResult(TestStatus.NO_TESTS, [], lines)
+def green(text: str) -> str:
+ """Returns inputted string with green color code."""
+ return '\033[1;32m' + text + RESET
-class TestCounts:
- passed: int
- failed: int
- crashed: int
- skipped: int
+ANSI_LEN = len(red(''))
- def __init__(self):
- self.passed = 0
- self.failed = 0
- self.crashed = 0
- self.skipped = 0
+def print_with_timestamp(message: str) -> None:
+ """Prints message with timestamp at beginning."""
+ print('[%s] %s' % (datetime.now().strftime('%H:%M:%S'), message))
- def total(self) -> int:
- return self.passed + self.failed + self.crashed + self.skipped
-
-def print_and_count_results(test_result: TestResult) -> TestCounts:
- counts = TestCounts()
- for test_suite in test_result.suites:
- if test_suite.status == TestStatus.SUCCESS:
- print_suite_divider(green('[PASSED] ') + test_suite.name)
- elif test_suite.status == TestStatus.SKIPPED:
- print_suite_divider(yellow('[SKIPPED] ') + test_suite.name)
- elif test_suite.status == TestStatus.TEST_CRASHED:
- print_suite_divider(red('[CRASHED] ' + test_suite.name))
+def format_test_divider(message: str, len_message: int) -> str:
+ """
+ Returns string with message centered in fixed width divider.
+
+ Example:
+ '===================== message example ====================='
+
+ Parameters:
+ message - message to be centered in divider line
+ len_message - length of the message to be printed such that
+ any characters of the color codes are not counted
+
+ Return:
+ String containing message centered in fixed width divider
+ """
+ default_count = 3 # default number of dashes
+ len_1 = default_count
+ len_2 = default_count
+ difference = len(DIVIDER) - len_message - 2 # 2 spaces added
+ if difference > 0:
+ # calculate number of dashes for each side of the divider
+ len_1 = int(difference / 2)
+ len_2 = difference - len_1
+ return ('=' * len_1) + ' ' + message + ' ' + ('=' * len_2)
+
+def print_test_header(test: Test) -> None:
+ """
+ Prints test header with test name and optionally the expected number
+ of subtests.
+
+ Example:
+ '=================== example (2 subtests) ==================='
+
+ Parameters:
+ test - Test object representing current test being printed
+ """
+ message = test.name
+ if test.expected_count:
+ message += ' (' + str(test.expected_count) + ' subtests)'
+ print_with_timestamp(format_test_divider(message, len(message)))
+
+def print_log(log: Iterable[str]) -> None:
+ """
+ Prints all strings in saved log for test in yellow.
+
+ Parameters:
+ log - Iterable object with all strings saved in log for test
+ """
+ for m in log:
+ print_with_timestamp(yellow(m))
+ print_with_timestamp('')
+
+def format_test_result(test: Test) -> str:
+ """
+ Returns string with formatted test result with colored status and test
+ name.
+
+ Example:
+ '[PASSED] example'
+
+ Parameters:
+ test - Test object representing current test being printed
+
+ Return:
+ String containing formatted test result
+ """
+ if test.status == TestStatus.SUCCESS:
+ return (green('[PASSED] ') + test.name)
+ elif test.status == TestStatus.SKIPPED:
+ return (yellow('[SKIPPED] ') + test.name)
+ elif test.status == TestStatus.TEST_CRASHED:
+ print_log(test.log)
+ return (red('[CRASHED] ') + test.name)
+ else:
+ print_log(test.log)
+ return (red('[FAILED] ') + test.name)
+
+def print_test_result(test: Test) -> None:
+ """
+ Prints result line with status of test.
+
+ Example:
+ '[PASSED] example'
+
+ Parameters:
+ test - Test object representing current test being printed
+ """
+ print_with_timestamp(format_test_result(test))
+
+def print_test_footer(test: Test) -> None:
+ """
+ Prints test footer with status of test.
+
+ Example:
+ '===================== [PASSED] example ====================='
+
+ Parameters:
+ test - Test object representing current test being printed
+ """
+ message = format_test_result(test)
+ print_with_timestamp(format_test_divider(message,
+ len(message) - ANSI_LEN))
+
+def print_summary_line(test: Test) -> None:
+ """
+ Prints summary line of test object. Color of line is dependent on
+ status of test. Color is green if test passes, yellow if test is
+ skipped, and red if the test fails or crashes. Summary line contains
+ counts of the statuses of the tests subtests or the test itself if it
+ has no subtests.
+
+ Example:
+ "Testing complete. Passed: 2, Failed: 0, Crashed: 0, Skipped: 0,
+ Errors: 0"
+
+ test - Test object representing current test being printed
+ """
+ if test.status == TestStatus.SUCCESS or \
+ test.status == TestStatus.NO_TESTS:
+ color = green
+ elif test.status == TestStatus.SKIPPED:
+ color = yellow
+ else:
+ color = red
+ counts = test.counts
+ print_with_timestamp(color('Testing complete. ' + str(counts)))
+
+def print_error(error_message: str) -> None:
+ """
+ Prints error message with error format.
+
+ Example:
+ "[ERROR] Test example: missing test plan!"
+
+ Parameters:
+ error_message - message describing error
+ """
+ print_with_timestamp(red('[ERROR] ') + error_message)
+
+# Other methods:
+
+def bubble_up_test_results(test: Test) -> None:
+ """
+ If the test has subtests, add the test counts of the subtests to the
+ test and check if any of the tests crashed and if so set the test
+ status to crashed. Otherwise if the test has no subtests add the
+ status of the test to the test counts.
+
+ Parameters:
+ test - Test object for current test being parsed
+ """
+ parse_crash_in_log(test)
+ subtests = test.subtests
+ counts = test.counts
+ status = test.status
+ for t in subtests:
+ counts.add_subtest_counts(t.counts)
+ if counts.total() == 0:
+ counts.add_status(status)
+ elif test.counts.get_status() == TestStatus.TEST_CRASHED:
+ test.status = TestStatus.TEST_CRASHED
+
+def parse_test(lines: LineStream, expected_num: int, log: List[str]) -> Test:
+ """
+ Finds next test to parse in LineStream, creates new Test object,
+ parses any subtests of the test, populates Test object with all
+ information (status, name) about the test and the Test objects for
+ any subtests, and then returns the Test object. The method accepts
+ three formats of tests:
+
+ Accepted test formats:
+
+ - Main KTAP/TAP header
+
+ Example:
+
+ KTAP version 1
+ 1..4
+ [subtests]
+
+ - Subtest header line
+
+ Example:
+
+ # Subtest: name
+ 1..3
+ [subtests]
+ ok 1 name
+
+ - Test result line
+
+ Example:
+
+ ok 1 - test
+
+ Parameters:
+ lines - LineStream of KTAP output to parse
+ expected_num - expected test number for test to be parsed
+ log - list of strings containing any preceding diagnostic lines
+ corresponding to the current test
+
+ Return:
+ Test object populated with characteristics and any subtests
+ """
+ test = Test()
+ test.log.extend(log)
+ parent_test = False
+ main = parse_ktap_header(lines, test)
+ if main:
+ # If KTAP/TAP header is found, attempt to parse
+ # test plan
+ test.name = "main"
+ parse_test_plan(lines, test)
+ else:
+ # If KTAP/TAP header is not found, test must be subtest
+ # header or test result line so parse attempt to parser
+ # subtest header
+ parent_test = parse_test_header(lines, test)
+ if parent_test:
+ # If subtest header is found, attempt to parse
+ # test plan and print header
+ parse_test_plan(lines, test)
+ print_test_header(test)
+ expected_count = test.expected_count
+ subtests = []
+ test_num = 1
+ while main or expected_count is None or test_num <= expected_count:
+ # Loop to parse any subtests.
+ # If test is main test, do not break until no lines left.
+ # Otherwise, break after parsing expected number of tests or
+ # if expected number of tests is unknown break when found
+ # test result line with matching name to subtest header.
+ if not lines:
+ if expected_count and test_num <= expected_count:
+ test.add_error('missing expected subtests!')
+ break
+ sub_log = parse_diagnostic(lines)
+ if not expected_count and not main and \
+ peek_test_name_match(lines, test):
+ test.log.extend(sub_log)
+ break
+ subtests.append(parse_test(lines, test_num, sub_log))
+ test_num += 1
+ test.subtests = subtests
+ if not main:
+ # If not main test, look for test result line
+ test.log.extend(parse_diagnostic(lines))
+ if (parent_test and peek_test_name_match(lines, test)) or \
+ not parent_test:
+ parse_test_result(lines, test, expected_num)
else:
- print_suite_divider(red('[FAILED] ') + test_suite.name)
- for test_case in test_suite.cases:
- if test_case.status == TestStatus.SUCCESS:
- counts.passed += 1
- print_with_timestamp(green('[PASSED] ') + test_case.name)
- elif test_case.status == TestStatus.SKIPPED:
- counts.skipped += 1
- print_with_timestamp(yellow('[SKIPPED] ') + test_case.name)
- elif test_case.status == TestStatus.TEST_CRASHED:
- counts.crashed += 1
- print_with_timestamp(red('[CRASHED] ' + test_case.name))
- print_log(map(yellow, test_case.log))
- print_with_timestamp('')
- else:
- counts.failed += 1
- print_with_timestamp(red('[FAILED] ') + test_case.name)
- print_log(map(yellow, test_case.log))
- print_with_timestamp('')
- return counts
+ test.add_error('missing subtest result line!')
+ # Add statuses to TestCounts attribute in Test object
+ bubble_up_test_results(test)
+ if parent_test:
+ # If test has subtests and is not the main test object, print
+ # footer.
+ print_test_footer(test)
+ elif not main:
+ print_test_result(test)
+ return test
def parse_run_tests(kernel_output: Iterable[str]) -> TestResult:
- counts = TestCounts()
+ """
+ Using kernel output, extract KTAP lines, parse the lines for test
+ results and print condensed test results and summary line .
+
+ Parameters:
+ kernel_output - Iterable object contains lines of kernel output
+
+ Return:
+ TestResult - Tuple containg status of main test object, main test
+ object with all subtests, and log of all KTAP lines.
+ """
+ print_with_timestamp(DIVIDER)
lines = extract_tap_lines(kernel_output)
- test_result = parse_test_result(lines)
- if test_result.status == TestStatus.NO_TESTS:
- print(red('[ERROR] ') + yellow('no tests run!'))
- elif test_result.status == TestStatus.FAILURE_TO_PARSE_TESTS:
- print(red('[ERROR] ') + yellow('could not parse test results!'))
+ test = Test()
+ if not lines:
+ test.add_error('invalid KTAP input!')
+ test.status = TestStatus.FAILURE_TO_PARSE_TESTS
else:
- counts = print_and_count_results(test_result)
+ test = parse_test(lines, 0, [])
+ if test.status != TestStatus.NO_TESTS:
+ test.status = test.counts.get_status()
print_with_timestamp(DIVIDER)
- if test_result.status == TestStatus.SUCCESS:
- fmt = green
- elif test_result.status == TestStatus.SKIPPED:
- fmt = yellow
- else:
- fmt =red
- print_with_timestamp(
- fmt('Testing complete. %d tests run. %d failed. %d crashed. %d skipped.' %
- (counts.total(), counts.failed, counts.crashed, counts.skipped)))
- return test_result
+ print_summary_line(test)
+ return TestResult(test.status, test, lines)
diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py
index 3251cef25b76..50c4d3e74115 100755
--- a/tools/testing/kunit/kunit_tool_test.py
+++ b/tools/testing/kunit/kunit_tool_test.py
@@ -106,10 +106,10 @@ class KUnitParserTest(unittest.TestCase):
with open(log_path) as file:
result = kunit_parser.extract_tap_lines(file.readlines())
self.assertContains('TAP version 14', result)
- self.assertContains(' # Subtest: example', result)
- self.assertContains(' 1..2', result)
- self.assertContains(' ok 1 - example_simple_test', result)
- self.assertContains(' ok 2 - example_mock_test', result)
+ self.assertContains('# Subtest: example', result)
+ self.assertContains('1..2', result)
+ self.assertContains('ok 1 - example_simple_test', result)
+ self.assertContains('ok 2 - example_mock_test', result)
self.assertContains('ok 1 - example', result)
def test_output_with_prefix_isolated_correctly(self):
@@ -117,28 +117,28 @@ class KUnitParserTest(unittest.TestCase):
with open(log_path) as file:
result = kunit_parser.extract_tap_lines(file.readlines())
self.assertContains('TAP version 14', result)
- self.assertContains(' # Subtest: kunit-resource-test', result)
- self.assertContains(' 1..5', result)
- self.assertContains(' ok 1 - kunit_resource_test_init_resources', result)
- self.assertContains(' ok 2 - kunit_resource_test_alloc_resource', result)
- self.assertContains(' ok 3 - kunit_resource_test_destroy_resource', result)
- self.assertContains(' foo bar #', result)
- self.assertContains(' ok 4 - kunit_resource_test_cleanup_resources', result)
- self.assertContains(' ok 5 - kunit_resource_test_proper_free_ordering', result)
+ self.assertContains('# Subtest: kunit-resource-test', result)
+ self.assertContains('1..5', result)
+ self.assertContains('ok 1 - kunit_resource_test_init_resources', result)
+ self.assertContains('ok 2 - kunit_resource_test_alloc_resource', result)
+ self.assertContains('ok 3 - kunit_resource_test_destroy_resource', result)
+ self.assertContains('foo bar #', result)
+ self.assertContains('ok 4 - kunit_resource_test_cleanup_resources', result)
+ self.assertContains('ok 5 - kunit_resource_test_proper_free_ordering', result)
self.assertContains('ok 1 - kunit-resource-test', result)
- self.assertContains(' foo bar # non-kunit output', result)
- self.assertContains(' # Subtest: kunit-try-catch-test', result)
- self.assertContains(' 1..2', result)
- self.assertContains(' ok 1 - kunit_test_try_catch_successful_try_no_catch',
+ self.assertContains('foo bar # non-kunit output', result)
+ self.assertContains('# Subtest: kunit-try-catch-test', result)
+ self.assertContains('1..2', result)
+ self.assertContains('ok 1 - kunit_test_try_catch_successful_try_no_catch',
result)
- self.assertContains(' ok 2 - kunit_test_try_catch_unsuccessful_try_does_catch',
+ self.assertContains('ok 2 - kunit_test_try_catch_unsuccessful_try_does_catch',
result)
self.assertContains('ok 2 - kunit-try-catch-test', result)
- self.assertContains(' # Subtest: string-stream-test', result)
- self.assertContains(' 1..3', result)
- self.assertContains(' ok 1 - string_stream_test_empty_on_creation', result)
- self.assertContains(' ok 2 - string_stream_test_not_empty_after_add', result)
- self.assertContains(' ok 3 - string_stream_test_get_string', result)
+ self.assertContains('# Subtest: string-stream-test', result)
+ self.assertContains('1..3', result)
+ self.assertContains('ok 1 - string_stream_test_empty_on_creation', result)
+ self.assertContains('ok 2 - string_stream_test_not_empty_after_add', result)
+ self.assertContains('ok 3 - string_stream_test_get_string', result)
self.assertContains('ok 3 - string-stream-test', result)
def test_parse_successful_test_log(self):
@@ -148,6 +148,13 @@ class KUnitParserTest(unittest.TestCase):
self.assertEqual(
kunit_parser.TestStatus.SUCCESS,
result.status)
+ def test_parse_successful_nested_tests_log(self):
+ all_passed_log = test_data_path('test_is_test_passed-all_passed_nested.log')
+ with open(all_passed_log) as file:
+ result = kunit_parser.parse_run_tests(file.readlines())
+ self.assertEqual(
+ kunit_parser.TestStatus.SUCCESS,
+ result.status)
def test_parse_failed_test_log(self):
failed_log = test_data_path('test_is_test_passed-failure.log')
@@ -162,17 +169,31 @@ class KUnitParserTest(unittest.TestCase):
with open(empty_log) as file:
result = kunit_parser.parse_run_tests(
kunit_parser.extract_tap_lines(file.readlines()))
- self.assertEqual(0, len(result.suites))
+ self.assertEqual(0, len(result.test.subtests))
self.assertEqual(
kunit_parser.TestStatus.FAILURE_TO_PARSE_TESTS,
result.status)
+ def test_missing_test_plan(self):
+ missing_plan_log = test_data_path('test_is_test_passed-'
+ 'missing_plan.log')
+ with open(missing_plan_log) as file:
+ result = kunit_parser.parse_run_tests(
+ kunit_parser.extract_tap_lines(
+ file.readlines()))
+ self.assertEqual(2, result.test.counts.errors)
+ self.assertEqual(
+ kunit_parser.TestStatus.SUCCESS,
+ result.status)
+
def test_no_tests(self):
- empty_log = test_data_path('test_is_test_passed-no_tests_run_with_header.log')
- with open(empty_log) as file:
+ header_log = test_data_path('test_is_test_passed-'
+ 'no_tests_run_with_header.log')
+ with open(header_log) as file:
result = kunit_parser.parse_run_tests(
- kunit_parser.extract_tap_lines(file.readlines()))
- self.assertEqual(0, len(result.suites))
+ kunit_parser.extract_tap_lines(
+ file.readlines()))
+ self.assertEqual(0, len(result.test.subtests))
self.assertEqual(
kunit_parser.TestStatus.NO_TESTS,
result.status)
@@ -182,15 +203,17 @@ class KUnitParserTest(unittest.TestCase):
print_mock = mock.patch('builtins.print').start()
with open(crash_log) as file:
result = kunit_parser.parse_run_tests(
- kunit_parser.extract_tap_lines(file.readlines()))
- print_mock.assert_any_call(StrContains('could not parse test results!'))
+ kunit_parser.extract_tap_lines(
+ file.readlines()))
+ print_mock.assert_any_call(StrContains('invalid KTAP input!'))
print_mock.stop()
file.close()
def test_crashed_test(self):
crashed_log = test_data_path('test_is_test_passed-crash.log')
with open(crashed_log) as file:
- result = kunit_parser.parse_run_tests(file.readlines())
+ result = kunit_parser.parse_run_tests(
+ file.readlines())
self.assertEqual(
kunit_parser.TestStatus.TEST_CRASHED,
result.status)
@@ -216,6 +239,23 @@ class KUnitParserTest(unittest.TestCase):
result.status)
file.close()
+ def test_ignores_hyphen(self):
+ hyphen_log = test_data_path('test_strip_hyphen.log')
+ file = open(hyphen_log)
+ result = kunit_parser.parse_run_tests(file.readlines())
+
+ # A skipped test does not fail the whole suite.
+ self.assertEqual(
+ kunit_parser.TestStatus.SUCCESS,
+ result.status)
+ self.assertEqual(
+ "sysctl_test",
+ result.test.subtests[0].name)
+ self.assertEqual(
+ "example",
+ result.test.subtests[1].name)
+ file.close()
+
def test_ignores_prefix_printk_time(self):
prefix_log = test_data_path('test_config_printk_time.log')
@@ -224,7 +264,7 @@ class KUnitParserTest(unittest.TestCase):
self.assertEqual(
kunit_parser.TestStatus.SUCCESS,
result.status)
- self.assertEqual('kunit-resource-test', result.suites[0].name)
+ self.assertEqual('kunit-resource-test', result.test.subtests[0].name)
def test_ignores_multiple_prefixes(self):
prefix_log = test_data_path('test_multiple_prefixes.log')
@@ -233,7 +273,7 @@ class KUnitParserTest(unittest.TestCase):
self.assertEqual(
kunit_parser.TestStatus.SUCCESS,
result.status)
- self.assertEqual('kunit-resource-test', result.suites[0].name)
+ self.assertEqual('kunit-resource-test', result.test.subtests[0].name)
def test_prefix_mixed_kernel_output(self):
mixed_prefix_log = test_data_path('test_interrupted_tap_output.log')
@@ -242,7 +282,7 @@ class KUnitParserTest(unittest.TestCase):
self.assertEqual(
kunit_parser.TestStatus.SUCCESS,
result.status)
- self.assertEqual('kunit-resource-test', result.suites[0].name)
+ self.assertEqual('kunit-resource-test', result.test.subtests[0].name)
def test_prefix_poundsign(self):
pound_log = test_data_path('test_pound_sign.log')
@@ -251,16 +291,16 @@ class KUnitParserTest(unittest.TestCase):
self.assertEqual(
kunit_parser.TestStatus.SUCCESS,
result.status)
- self.assertEqual('kunit-resource-test', result.suites[0].name)
+ self.assertEqual('kunit-resource-test', result.test.subtests[0].name)
def test_kernel_panic_end(self):
panic_log = test_data_path('test_kernel_panic_interrupt.log')
with open(panic_log) as file:
result = kunit_parser.parse_run_tests(file.readlines())
self.assertEqual(
- kunit_parser.TestStatus.TEST_CRASHED,
+ kunit_parser.TestStatus.SUCCESS,
result.status)
- self.assertEqual('kunit-resource-test', result.suites[0].name)
+ self.assertEqual('kunit-resource-test', result.test.subtests[0].name)
def test_pound_no_prefix(self):
pound_log = test_data_path('test_pound_no_prefix.log')
@@ -269,7 +309,7 @@ class KUnitParserTest(unittest.TestCase):
self.assertEqual(
kunit_parser.TestStatus.SUCCESS,
result.status)
- self.assertEqual('kunit-resource-test', result.suites[0].name)
+ self.assertEqual('kunit-resource-test', result.test.subtests[0].name)
class LinuxSourceTreeTest(unittest.TestCase):
@@ -291,6 +331,14 @@ class LinuxSourceTreeTest(unittest.TestCase):
pass
tree = kunit_kernel.LinuxSourceTree('', kunitconfig_path=dir)
+ def test_kselftest_nested(self):
+ kselftest_log = test_data_path('test_is_test_passed-kselftest.log')
+ with open(kselftest_log) as file:
+ result = kunit_parser.parse_run_tests(file.readlines())
+ self.assertEqual(
+ kunit_parser.TestStatus.SUCCESS,
+ result.status)
+
# TODO: add more test cases.
@@ -322,6 +370,12 @@ class KUnitJsonTest(unittest.TestCase):
result = self._json_for('test_is_test_passed-no_tests_run_with_header.log')
self.assertEqual(0, len(result['sub_groups']))
+ def test_nested_json(self):
+ result = self._json_for('test_is_test_passed-all_passed_nested.log')
+ self.assertEqual(
+ {'name': 'example_simple_test', 'status': 'PASS'},
+ result["sub_groups"][0]["sub_groups"][0]["test_cases"][0])
+
class StrContains(str):
def __eq__(self, other):
return self in other
@@ -380,7 +434,7 @@ class KUnitMainTest(unittest.TestCase):
self.assertEqual(e.exception.code, 1)
self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1)
- self.print_mock.assert_any_call(StrContains(' 0 tests run'))
+ self.print_mock.assert_any_call(StrContains('invalid KTAP input!'))
def test_exec_raw_output(self):
self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
@@ -388,7 +442,7 @@ class KUnitMainTest(unittest.TestCase):
self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1)
for call in self.print_mock.call_args_list:
self.assertNotEqual(call, mock.call(StrContains('Testing complete.')))
- self.assertNotEqual(call, mock.call(StrContains(' 0 tests run')))
+ self.assertNotEqual(call, mock.call(StrContains(' 0 tests run!')))
def test_run_raw_output(self):
self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
@@ -397,7 +451,7 @@ class KUnitMainTest(unittest.TestCase):
self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1)
for call in self.print_mock.call_args_list:
self.assertNotEqual(call, mock.call(StrContains('Testing complete.')))
- self.assertNotEqual(call, mock.call(StrContains(' 0 tests run')))
+ self.assertNotEqual(call, mock.call(StrContains(' 0 tests run!')))
def test_run_raw_output_kunit(self):
self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
diff --git a/tools/testing/kunit/test_data/test_is_test_passed-all_passed_nested.log b/tools/testing/kunit/test_data/test_is_test_passed-all_passed_nested.log
new file mode 100644
index 000000000000..9d5b04fe43a6
--- /dev/null
+++ b/tools/testing/kunit/test_data/test_is_test_passed-all_passed_nested.log
@@ -0,0 +1,34 @@
+TAP version 14
+1..2
+ # Subtest: sysctl_test
+ 1..4
+ # sysctl_test_dointvec_null_tbl_data: sysctl_test_dointvec_null_tbl_data passed
+ ok 1 - sysctl_test_dointvec_null_tbl_data
+ # Subtest: example
+ 1..2
+ init_suite
+ # example_simple_test: initializing
+ # example_simple_test: example_simple_test passed
+ ok 1 - example_simple_test
+ # example_mock_test: initializing
+ # example_mock_test: example_mock_test passed
+ ok 2 - example_mock_test
+ kunit example: all tests passed
+ ok 2 - example
+ # sysctl_test_dointvec_table_len_is_zero: sysctl_test_dointvec_table_len_is_zero passed
+ ok 3 - sysctl_test_dointvec_table_len_is_zero
+ # sysctl_test_dointvec_table_read_but_position_set: sysctl_test_dointvec_table_read_but_position_set passed
+ ok 4 - sysctl_test_dointvec_table_read_but_position_set
+kunit sysctl_test: all tests passed
+ok 1 - sysctl_test
+ # Subtest: example
+ 1..2
+init_suite
+ # example_simple_test: initializing
+ # example_simple_test: example_simple_test passed
+ ok 1 - example_simple_test
+ # example_mock_test: initializing
+ # example_mock_test: example_mock_test passed
+ ok 2 - example_mock_test
+kunit example: all tests passed
+ok 2 - example
diff --git a/tools/testing/kunit/test_data/test_is_test_passed-kselftest.log b/tools/testing/kunit/test_data/test_is_test_passed-kselftest.log
new file mode 100644
index 000000000000..65d3f27feaf2
--- /dev/null
+++ b/tools/testing/kunit/test_data/test_is_test_passed-kselftest.log
@@ -0,0 +1,14 @@
+TAP version 13
+1..2
+# selftests: membarrier: membarrier_test_single_thread
+# TAP version 13
+# 1..2
+# ok 1 sys_membarrier available
+# ok 2 sys membarrier invalid command test: command = -1, flags = 0, errno = 22. Failed as expected
+ok 1 selftests: membarrier: membarrier_test_single_thread
+# selftests: membarrier: membarrier_test_multi_thread
+# TAP version 13
+# 1..2
+# ok 1 sys_membarrier available
+# ok 2 sys membarrier invalid command test: command = -1, flags = 0, errno = 22. Failed as expected
+ok 2 selftests: membarrier: membarrier_test_multi_thread
diff --git a/tools/testing/kunit/test_data/test_is_test_passed-missing_plan.log b/tools/testing/kunit/test_data/test_is_test_passed-missing_plan.log
new file mode 100644
index 000000000000..5cd17b7f818a
--- /dev/null
+++ b/tools/testing/kunit/test_data/test_is_test_passed-missing_plan.log
@@ -0,0 +1,31 @@
+KTAP version 1
+ # Subtest: sysctl_test
+ # sysctl_test_dointvec_null_tbl_data: sysctl_test_dointvec_null_tbl_data passed
+ ok 1 - sysctl_test_dointvec_null_tbl_data
+ # sysctl_test_dointvec_table_maxlen_unset: sysctl_test_dointvec_table_maxlen_unset passed
+ ok 2 - sysctl_test_dointvec_table_maxlen_unset
+ # sysctl_test_dointvec_table_len_is_zero: sysctl_test_dointvec_table_len_is_zero passed
+ ok 3 - sysctl_test_dointvec_table_len_is_zero
+ # sysctl_test_dointvec_table_read_but_position_set: sysctl_test_dointvec_table_read_but_position_set passed
+ ok 4 - sysctl_test_dointvec_table_read_but_position_set
+ # sysctl_test_dointvec_happy_single_positive: sysctl_test_dointvec_happy_single_positive passed
+ ok 5 - sysctl_test_dointvec_happy_single_positive
+ # sysctl_test_dointvec_happy_single_negative: sysctl_test_dointvec_happy_single_negative passed
+ ok 6 - sysctl_test_dointvec_happy_single_negative
+ # sysctl_test_dointvec_single_less_int_min: sysctl_test_dointvec_single_less_int_min passed
+ ok 7 - sysctl_test_dointvec_single_less_int_min
+ # sysctl_test_dointvec_single_greater_int_max: sysctl_test_dointvec_single_greater_int_max passed
+ ok 8 - sysctl_test_dointvec_single_greater_int_max
+kunit sysctl_test: all tests passed
+ok 1 - sysctl_test
+ # Subtest: example
+ 1..2
+init_suite
+ # example_simple_test: initializing
+ # example_simple_test: example_simple_test passed
+ ok 1 - example_simple_test
+ # example_mock_test: initializing
+ # example_mock_test: example_mock_test passed
+ ok 2 - example_mock_test
+kunit example: all tests passed
+ok 2 - example
diff --git a/tools/testing/kunit/test_data/test_strip_hyphen.log b/tools/testing/kunit/test_data/test_strip_hyphen.log
new file mode 100644
index 000000000000..92ac7c24b374
--- /dev/null
+++ b/tools/testing/kunit/test_data/test_strip_hyphen.log
@@ -0,0 +1,16 @@
+KTAP version 1
+1..2
+ # Subtest: sysctl_test
+ 1..1
+ # sysctl_test_dointvec_null_tbl_data: sysctl_test_dointvec_null_tbl_data passed
+ ok 1 - sysctl_test_dointvec_null_tbl_data
+kunit sysctl_test: all tests passed
+ok 1 - sysctl_test
+ # Subtest: example
+ 1..1
+init_suite
+ # example_simple_test: initializing
+ # example_simple_test: example_simple_test passed
+ ok 1 example_simple_test
+kunit example: all tests passed
+ok 2 example
base-commit: 28a8b037484044d79f3b8d409a076a925ac75f8d
--
2.33.0.800.g4c38ced690-goog
Currently, `run_kernel()` dumps all the kernel output to a file
(.kunit/test.log) and then opens the file and yields it to callers.
This made it easier to respect the requested timeout, if any.
But it means that we can't yield the results in real time, either to the
parser or to stdout (if --raw_output is set).
This change spins up a background thread to enforce the timeout, which
allows us to yield the kernel output in real time, while also copying it
to the .kunit/test.log file.
It's also careful to ensure that the .kunit/test.log file is complete,
even in the kunit_parser throws an exception/otherwise doesn't consume
every line, see the new `finally` block and unit test.
For example:
$ ./tools/testing/kunit/kunit.py run --arch=x86_64 --raw_output
<configure + build steps>
...
<can now see output from QEMU in real time>
This does not currently have a visible effect when --raw_output is not
passed, as kunit_parser.py currently only outputs everything at the end.
But that could change, and this patch is a necessary step towards
showing parsed test results in real time.
Signed-off-by: Daniel Latypov <dlatypov(a)google.com>
---
tools/testing/kunit/kunit_kernel.py | 73 +++++++++++++++-----------
tools/testing/kunit/kunit_tool_test.py | 17 ++++++
2 files changed, 60 insertions(+), 30 deletions(-)
diff --git a/tools/testing/kunit/kunit_kernel.py b/tools/testing/kunit/kunit_kernel.py
index 2c6f916ccbaf..b8cba8123aa3 100644
--- a/tools/testing/kunit/kunit_kernel.py
+++ b/tools/testing/kunit/kunit_kernel.py
@@ -12,7 +12,8 @@ import subprocess
import os
import shutil
import signal
-from typing import Iterator, Optional, Tuple
+import threading
+from typing import Iterator, List, Optional, Tuple
from contextlib import ExitStack
@@ -103,8 +104,8 @@ class LinuxSourceTreeOperations(object):
if stderr: # likely only due to build warnings
print(stderr.decode())
- def run(self, params, timeout, build_dir, outfile) -> None:
- pass
+ def start(self, params: List[str], build_dir: str) -> subprocess.Popen:
+ raise RuntimeError('not implemented!')
class LinuxSourceTreeOperationsQemu(LinuxSourceTreeOperations):
@@ -123,7 +124,7 @@ class LinuxSourceTreeOperationsQemu(LinuxSourceTreeOperations):
kconfig.parse_from_string(self._kconfig)
base_kunitconfig.merge_in_entries(kconfig)
- def run(self, params, timeout, build_dir, outfile):
+ def start(self, params: List[str], build_dir: str) -> subprocess.Popen:
kernel_path = os.path.join(build_dir, self._kernel_path)
qemu_command = ['qemu-system-' + self._qemu_arch,
'-nodefaults',
@@ -134,18 +135,10 @@ class LinuxSourceTreeOperationsQemu(LinuxSourceTreeOperations):
'-nographic',
'-serial stdio'] + self._extra_qemu_params
print('Running tests with:\n$', ' '.join(qemu_command))
- with open(outfile, 'w') as output:
- process = subprocess.Popen(' '.join(qemu_command),
- stdin=subprocess.PIPE,
- stdout=output,
- stderr=subprocess.STDOUT,
- text=True, shell=True)
- try:
- process.wait(timeout=timeout)
- except Exception as e:
- print(e)
- process.terminate()
- return process
+ return subprocess.Popen(' '.join(qemu_command),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ text=True, shell=True)
class LinuxSourceTreeOperationsUml(LinuxSourceTreeOperations):
"""An abstraction over command line operations performed on a source tree."""
@@ -175,17 +168,13 @@ class LinuxSourceTreeOperationsUml(LinuxSourceTreeOperations):
kunit_parser.print_with_timestamp(
'Starting Kernel with all configs takes a few minutes...')
- def run(self, params, timeout, build_dir, outfile):
+ def start(self, params: List[str], build_dir: str) -> subprocess.Popen:
"""Runs the Linux UML binary. Must be named 'linux'."""
linux_bin = get_file_path(build_dir, 'linux')
- outfile = get_outfile_path(build_dir)
- with open(outfile, 'w') as output:
- process = subprocess.Popen([linux_bin] + params,
- stdin=subprocess.PIPE,
- stdout=output,
- stderr=subprocess.STDOUT,
- text=True)
- process.wait(timeout)
+ return subprocess.Popen([linux_bin] + params,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ text=True)
def get_kconfig_path(build_dir) -> str:
return get_file_path(build_dir, KCONFIG_PATH)
@@ -330,12 +319,36 @@ class LinuxSourceTree(object):
args.extend(['mem=1G', 'console=tty', 'kunit_shutdown=halt'])
if filter_glob:
args.append('kunit.filter_glob='+filter_glob)
- outfile = get_outfile_path(build_dir)
- self._ops.run(args, timeout, build_dir, outfile)
- subprocess.call(['stty', 'sane'])
- with open(outfile, 'r') as file:
- for line in file:
+
+ process = self._ops.start(args, build_dir)
+ assert process.stdout is not None # tell mypy it's set
+
+ # Enforce the timeout in a background thread.
+ def _wait_proc():
+ try:
+ process.wait(timeout=timeout)
+ except Exception as e:
+ print(e)
+ process.terminate()
+ process.wait()
+ waiter = threading.Thread(target=_wait_proc)
+ waiter.start()
+
+ output = open(get_outfile_path(build_dir), 'w')
+ try:
+ # Tee the output to the file and to our caller in real time.
+ for line in process.stdout:
+ output.write(line)
yield line
+ # This runs even if our caller doesn't consume every line.
+ finally:
+ # Flush any leftover output to the file
+ output.write(process.stdout.read())
+ output.close()
+ process.stdout.close()
+
+ waiter.join()
+ subprocess.call(['stty', 'sane'])
def signal_handler(self, sig, frame) -> None:
logging.error('Build interruption occurred. Cleaning console.')
diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py
index 619c4554cbff..f9a7398a9584 100755
--- a/tools/testing/kunit/kunit_tool_test.py
+++ b/tools/testing/kunit/kunit_tool_test.py
@@ -14,6 +14,7 @@ import tempfile, shutil # Handling test_tmpdir
import itertools
import json
import signal
+import subprocess
import os
import kunit_config
@@ -291,6 +292,22 @@ class LinuxSourceTreeTest(unittest.TestCase):
pass
tree = kunit_kernel.LinuxSourceTree('', kunitconfig_path=dir)
+ def test_run_kernel_hits_exception(self):
+ def fake_start(unused_args, unused_build_dir):
+ return subprocess.Popen(['echo "hi\nbye"'], shell=True, text=True, stdout=subprocess.PIPE)
+
+ with tempfile.TemporaryDirectory('') as build_dir:
+ tree = kunit_kernel.LinuxSourceTree(build_dir, load_config=False)
+ mock.patch.object(tree._ops, 'start', side_effect=fake_start).start()
+
+ with self.assertRaises(ValueError):
+ for line in tree.run_kernel(build_dir=build_dir):
+ self.assertEqual(line, 'hi\n')
+ raise ValueError('uh oh, did not read all output')
+
+ with open(kunit_kernel.get_outfile_path(build_dir), 'rt') as outfile:
+ self.assertEqual(outfile.read(), 'hi\nbye\n', msg='Missing some output')
+
# TODO: add more test cases.
base-commit: 3b29021ddd10cfb6b2565c623595bd3b02036f33
--
2.33.0.800.g4c38ced690-goog
Drop some variables in unit tests that were unused and/or add assertions
based on them.
For ExitStack, it was imported, but the `es` variable wasn't used so it
didn't do anything, and we were leaking the file objects.
Refactor it to just use nested `with` statements to properly close them.
And drop the direct use of .close() on file objects in the kunit tool
unit test, as these can be leaked if test assertions fail.
Signed-off-by: Daniel Latypov <dlatypov(a)google.com>
---
tools/testing/kunit/kunit.py | 1 -
tools/testing/kunit/kunit_kernel.py | 12 ++++--------
tools/testing/kunit/kunit_tool_test.py | 18 ++++++++----------
3 files changed, 12 insertions(+), 19 deletions(-)
diff --git a/tools/testing/kunit/kunit.py b/tools/testing/kunit/kunit.py
index 66f67af97971..1b2b7f06bb8c 100755
--- a/tools/testing/kunit/kunit.py
+++ b/tools/testing/kunit/kunit.py
@@ -18,7 +18,6 @@ from collections import namedtuple
from enum import Enum, auto
from typing import Iterable
-import kunit_config
import kunit_json
import kunit_kernel
import kunit_parser
diff --git a/tools/testing/kunit/kunit_kernel.py b/tools/testing/kunit/kunit_kernel.py
index 2c6f916ccbaf..1870e75ff153 100644
--- a/tools/testing/kunit/kunit_kernel.py
+++ b/tools/testing/kunit/kunit_kernel.py
@@ -14,10 +14,6 @@ import shutil
import signal
from typing import Iterator, Optional, Tuple
-from contextlib import ExitStack
-
-from collections import namedtuple
-
import kunit_config
import kunit_parser
import qemu_config
@@ -168,10 +164,10 @@ class LinuxSourceTreeOperationsUml(LinuxSourceTreeOperations):
process.wait()
kunit_parser.print_with_timestamp(
'Disabling broken configs to run KUnit tests...')
- with ExitStack() as es:
- config = open(get_kconfig_path(build_dir), 'a')
- disable = open(BROKEN_ALLCONFIG_PATH, 'r').read()
- config.write(disable)
+
+ with open(get_kconfig_path(build_dir), 'a') as config:
+ with open(BROKEN_ALLCONFIG_PATH, 'r') as disable:
+ config.write(disable.read())
kunit_parser.print_with_timestamp(
'Starting Kernel with all configs takes a few minutes...')
diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py
index 619c4554cbff..cad37a98e599 100755
--- a/tools/testing/kunit/kunit_tool_test.py
+++ b/tools/testing/kunit/kunit_tool_test.py
@@ -185,7 +185,7 @@ class KUnitParserTest(unittest.TestCase):
kunit_parser.extract_tap_lines(file.readlines()))
print_mock.assert_any_call(StrContains('could not parse test results!'))
print_mock.stop()
- file.close()
+ self.assertEqual(0, len(result.suites))
def test_crashed_test(self):
crashed_log = test_data_path('test_is_test_passed-crash.log')
@@ -197,24 +197,22 @@ class KUnitParserTest(unittest.TestCase):
def test_skipped_test(self):
skipped_log = test_data_path('test_skip_tests.log')
- file = open(skipped_log)
- result = kunit_parser.parse_run_tests(file.readlines())
+ with open(skipped_log) as file:
+ result = kunit_parser.parse_run_tests(file.readlines())
# A skipped test does not fail the whole suite.
self.assertEqual(
kunit_parser.TestStatus.SUCCESS,
result.status)
- file.close()
def test_skipped_all_tests(self):
skipped_log = test_data_path('test_skip_all_tests.log')
- file = open(skipped_log)
- result = kunit_parser.parse_run_tests(file.readlines())
+ with open(skipped_log) as file:
+ result = kunit_parser.parse_run_tests(file.readlines())
self.assertEqual(
kunit_parser.TestStatus.SKIPPED,
result.status)
- file.close()
def test_ignores_prefix_printk_time(self):
@@ -283,13 +281,13 @@ class LinuxSourceTreeTest(unittest.TestCase):
def test_valid_kunitconfig(self):
with tempfile.NamedTemporaryFile('wt') as kunitconfig:
- tree = kunit_kernel.LinuxSourceTree('', kunitconfig_path=kunitconfig.name)
+ kunit_kernel.LinuxSourceTree('', kunitconfig_path=kunitconfig.name)
def test_dir_kunitconfig(self):
with tempfile.TemporaryDirectory('') as dir:
- with open(os.path.join(dir, '.kunitconfig'), 'w') as f:
+ with open(os.path.join(dir, '.kunitconfig'), 'w'):
pass
- tree = kunit_kernel.LinuxSourceTree('', kunitconfig_path=dir)
+ kunit_kernel.LinuxSourceTree('', kunitconfig_path=dir)
# TODO: add more test cases.
base-commit: 3b29021ddd10cfb6b2565c623595bd3b02036f33
--
2.33.0.685.g46640cef36-goog
Hi Linus,
Please pull the following Kselftest fixes update for Linux 5.15-rc5.
This Kselftest fixes update for Linux 5.15-rc5 consists of a fix
to implicit declaration warns in drivers/dma-buf test.
I have been seeing this for a while and managed to fix it.
diff is attached.
thanks,
-- Shuah
----------------------------------------------------------------
The following changes since commit f5013d412a43662b63f3d5f3a804d63213acd471:
selftests: kvm: fix get_run_delay() ignoring fscanf() return warn (2021-09-16 12:57:32 -0600)
are available in the Git repository at:
git://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux-kselftest tags/linux-kselftest-fixes-5.15-rc5
for you to fetch changes up to 2f9602870886af74d97bac23ee6db5f5466d0a49:
selftests: drivers/dma-buf: Fix implicit declaration warns (2021-09-27 09:52:29 -0600)
----------------------------------------------------------------
linux-kselftest-fixes-5.15-rc5
This Kselftest fixes update for Linux 5.15-rc5 consists of a fix
to implicit declaration warns in drivers/dma-buf test.
----------------------------------------------------------------
Shuah Khan (1):
selftests: drivers/dma-buf: Fix implicit declaration warns
tools/testing/selftests/drivers/dma-buf/udmabuf.c | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------
When a user filters by a suite and not a test, e.g.
$ ./tools/testing/kunit/kunit.py run 'suite_name'
it hits this code
const int len = strlen(filter_glob);
...
parsed->suite_glob = kmalloc(len, GFP_KERNEL);
which fails to allocate space for the terminating NULL.
Somehow, it seems like we can't easily reproduce this under UML, so the
existing `parse_filter_test()` didn't catch this.
Fix this by allocating `len + 1` and switch to kzalloc() just to be a
bit more defensive. We're only going to run this code once per kernel
boot, and it should never be very long.
Also update the unit tests to be a bit more cautious.
This bug showed up as a NULL pointer dereference here:
> KUNIT_EXPECT_STREQ(test, (const char *)filtered.start[0][0]->name, "suite0");
`filtered.start[0][0]` was NULL, and `name` is at offset 0 in the struct,
so `...->name` was also NULL.
Fixes: 3b29021ddd10 ("kunit: tool: allow filtering test cases via glob")
Reported-by: kernel test robot <oliver.sang(a)intel.com>
Signed-off-by: Daniel Latypov <dlatypov(a)google.com>
---
lib/kunit/executor.c | 2 +-
lib/kunit/executor_test.c | 1 +
2 files changed, 2 insertions(+), 1 deletion(-)
diff --git a/lib/kunit/executor.c b/lib/kunit/executor.c
index bab3ab940acc..1d7fecd33261 100644
--- a/lib/kunit/executor.c
+++ b/lib/kunit/executor.c
@@ -33,7 +33,7 @@ static void kunit_parse_filter_glob(struct kunit_test_filter *parsed,
const char *period = strchr(filter_glob, '.');
if (!period) {
- parsed->suite_glob = kmalloc(len, GFP_KERNEL);
+ parsed->suite_glob = kzalloc(len + 1, GFP_KERNEL);
parsed->test_glob = NULL;
strcpy(parsed->suite_glob, filter_glob);
return;
diff --git a/lib/kunit/executor_test.c b/lib/kunit/executor_test.c
index e6323f398dfa..7d2b8dc668b1 100644
--- a/lib/kunit/executor_test.c
+++ b/lib/kunit/executor_test.c
@@ -149,6 +149,7 @@ static void filter_suites_test(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered.start);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered.start[0]);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered.start[0][0]);
KUNIT_EXPECT_STREQ(test, (const char *)filtered.start[0][0]->name, "suite0");
}
base-commit: 3b29021ddd10cfb6b2565c623595bd3b02036f33
--
2.33.0.800.g4c38ced690-goog
This change adds some initial kunit tests for the MCTP core. We'll
expand the coverage in a future series, and augment with a few
selftests, but this establishes a baseline set of tests for now.
Thanks to the kunit folks for the framework!
Cheers,
Jeremy
---
v2:
- fix MCTP=m, KUNIT={y,m} breakage
- fix mctp test netdev initialisation
- strict route reference count checking
---
Jeremy Kerr (5):
mctp: Add initial test structure and fragmentation test
mctp: Add test utils
mctp: Add packet rx tests
mctp: Add route input to socket tests
mctp: Add input reassembly tests
net/mctp/Kconfig | 5 +
net/mctp/Makefile | 3 +
net/mctp/route.c | 5 +
net/mctp/test/route-test.c | 544 +++++++++++++++++++++++++++++++++++++
net/mctp/test/utils.c | 67 +++++
net/mctp/test/utils.h | 20 ++
6 files changed, 644 insertions(+)
create mode 100644 net/mctp/test/route-test.c
create mode 100644 net/mctp/test/utils.c
create mode 100644 net/mctp/test/utils.h
--
2.33.0
This change adds some initial kunit tests for the MCTP core. We'll
expand the coverage in a future series, and augment with a few
selftests, but this establishes a baseline set of tests for now.
Thanks to the kunit folks for the framework!
Cheers,
Jeremy
---
Jeremy Kerr (5):
mctp: Add initial test structure and fragmentation test
mctp: Add test utils
mctp: Add packet rx tests
mctp: Add route input to socket tests
mctp: Add input reassembly tests
net/mctp/Kconfig | 5 +
net/mctp/Makefile | 3 +
net/mctp/route.c | 5 +
net/mctp/test/route-test.c | 532 +++++++++++++++++++++++++++++++++++++
net/mctp/test/utils.c | 67 +++++
net/mctp/test/utils.h | 20 ++
6 files changed, 632 insertions(+)
create mode 100644 net/mctp/test/route-test.c
create mode 100644 net/mctp/test/utils.c
create mode 100644 net/mctp/test/utils.h
--
2.33.0
Good day,
My name is Luis Fernandez.I would like to discuss something
important that will benefit both of us. I will send you more
details upon your response
Regards
Luis Fernandez
Allow running each suite or each test case alone per kernel boot.
The motivation for this is to debug "test hermeticity" issues.
This new --run_isolated flag would be a good first step to try and
narrow down root causes.
Context: sometimes tests pass/fail depending on what ran before them.
Memory corruption errors in particular might only cause noticeable
issues later on. But you can also have the opposite, where "fixing" one
test causes another to start failing.
Usage:
$ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite
$ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=test
$ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=test example
The last one would provide output like
======== [PASSED] example ========
[PASSED] example_simple_test
============================================================
Testing complete. 1 tests run. 0 failed. 0 crashed. 0 skipped.
Starting KUnit Kernel (2/3)...
============================================================
======== [SKIPPED] example ========
[SKIPPED] example_skip_test # SKIP this test should be skipped
============================================================
Testing complete. 1 tests run. 0 failed. 0 crashed. 1 skipped.
Starting KUnit Kernel (3/3)...
============================================================
======== [SKIPPED] example ========
[SKIPPED] example_mark_skipped_test # SKIP this test should be skipped
============================================================
Testing complete. 1 tests run. 0 failed. 0 crashed. 1 skipped.
See the last patch's description for a bit more detail.
Meta:
The first patch is from another series with just a reworded commit
message, https://lore.kernel.org/linux-kselftest/20210805235145.2528054-2-dlatypov@g…
This patch series is based on Shuah's kunit branch:
https://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux-kselftest.git/?…
Changes:
v1 -> v2: rebase onto Shuah's kunit branch, fix missing code in patch 1.
v2 -> v3: fix mypy errors, drop test plan from output, fix pre-existing
bug where kunit was not actually tracking test execution time (new patch 3).
Daniel Latypov (4):
kunit: add 'kunit.action' param to allow listing out tests
kunit: tool: factor exec + parse steps into a function
kunit: tool: actually track how long it took to run tests
kunit: tool: support running each suite/test separately
lib/kunit/executor.c | 45 ++++++++-
tools/testing/kunit/kunit.py | 129 +++++++++++++++++--------
tools/testing/kunit/kunit_tool_test.py | 40 ++++++++
3 files changed, 169 insertions(+), 45 deletions(-)
base-commit: 3b29021ddd10cfb6b2565c623595bd3b02036f33
--
2.33.0.685.g46640cef36-goog
The structleak plugin causes the stack frame size to grow immensely when
used with KUnit; this is caused because KUnit allocates lots of
moderately sized structs on the stack as part of its assertion macro
implementation. For most tests with small to moderately sized tests
cases there are never enough KUnit assertions to be an issue at all;
even when a single test cases has many KUnit assertions, the compiler
should never put all these struct allocations on the stack at the same
time since the scope of the structs is so limited; however, the
structleak plugin does not seem to respect the compiler doing the right
thing and will still warn of excessive stack size in some cases.
These patches are not a permanent solution since new tests can be added
with huge test cases, but this serves as a stop gap to stop structleak
from being used on KUnit tests which will currently result in excessive
stack size.
Please see the discussion thread here[1] for more context.
Changes since last revision:
- Dropped mmc: sdhci-of-aspeed patch since it was not a pure test and I
could not reproduce the stack size warning anyway.
- Removed Wframe-larger-than=10240 warning from the bitfield kunit
test.
- All other patches are the same except with updated
reviewers/contributor commit footers.
[1] https://lore.kernel.org/linux-arm-kernel/CAFd5g44udqkDiYBWh+VeDVJ=ELXeoXwun…
Arnd Bergmann (1):
bitfield: build kunit tests without structleak plugin
Brendan Higgins (4):
gcc-plugins/structleak: add makefile var for disabling structleak
iio/test-format: build kunit tests without structleak plugin
device property: build kunit tests without structleak plugin
thunderbolt: build kunit tests without structleak plugin
drivers/base/test/Makefile | 2 +-
drivers/iio/test/Makefile | 1 +
drivers/thunderbolt/Makefile | 1 +
lib/Makefile | 2 +-
scripts/Makefile.gcc-plugins | 4 ++++
5 files changed, 8 insertions(+), 2 deletions(-)
base-commit: 02d5e016800d082058b3d3b7c3ede136cdc6ddcb
--
2.33.0.685.g46640cef36-goog
The structleak plugin causes the stack frame size to grow immensely when
used with KUnit; this is caused because KUnit allocates lots of
moderately sized structs on the stack as part of its assertion macro
implementation. For most tests with small to moderately sized tests
cases there are never enough KUnit assertions to be an issue at all;
even when a single test cases has many KUnit assertions, the compiler
should never put all these struct allocations on the stack at the same
time since the scope of the structs is so limited; however, the
structleak plugin does not seem to respect the compiler doing the right
thing and will still warn of excessive stack size in some cases.
These patches are not a permanent solution since new tests can be added
with huge test cases, but this serves as a stop gap to stop structleak
from being used on KUnit tests which will currently result in excessive
stack size.
Of the following patches, I think the thunderbolt patch may be
unnecessary since Linus already fixed that test. Additionally, I was not
able to reproduce the error on the sdhci-of-aspeed test. Nevertheless, I
included these tests cases for completeness. Please see my discussion
with Arnd for more context[1].
NOTE: Arnd did the legwork for most of these patches, but did not
actually share code for some of them, so I left his Signed-off-by off of
those patches as I don't want to misrepresent him. Arnd, please sign off
on those patches at your soonest convenience.
[1] https://lore.kernel.org/linux-arm-kernel/CAFd5g44udqkDiYBWh+VeDVJ=ELXeoXwun…
Arnd Bergmann (1):
bitfield: build kunit tests without structleak plugin
Brendan Higgins (5):
gcc-plugins/structleak: add makefile var for disabling structleak
iio/test-format: build kunit tests without structleak plugin
device property: build kunit tests without structleak plugin
thunderbolt: build kunit tests without structleak plugin
mmc: sdhci-of-aspeed: build kunit tests without structleak plugin
drivers/base/test/Makefile | 2 +-
drivers/iio/test/Makefile | 1 +
drivers/mmc/host/Makefile | 1 +
drivers/thunderbolt/Makefile | 1 +
lib/Makefile | 2 +-
scripts/Makefile.gcc-plugins | 4 ++++
6 files changed, 9 insertions(+), 2 deletions(-)
base-commit: 316346243be6df12799c0b64b788e06bad97c30b
--
2.33.0.464.g1972c5931b-goog
Allow running each suite or each test case alone per kernel boot.
The motivation for this is to debug "test hermeticity" issues.
This new --run_isolated flag would be a good first step to try and
narrow down root causes.
Context: sometimes tests pass/fail depending on what ran before them.
Memory corruption errors in particular might only cause noticeable
issues later on. But you can also have the opposite, where "fixing" one
test causes another to start failing.
Usage:
$ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite
$ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=test
$ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=test example
The last one would provide output like
======== [PASSED] example ========
[PASSED] example_simple_test
============================================================
Testing complete. 1 tests run. 0 failed. 0 crashed. 0 skipped.
Starting KUnit Kernel (2/3)...
============================================================
======== [SKIPPED] example ========
[SKIPPED] example_skip_test # SKIP this test should be skipped
============================================================
Testing complete. 1 tests run. 0 failed. 0 crashed. 1 skipped.
Starting KUnit Kernel (3/3)...
============================================================
======== [SKIPPED] example ========
[SKIPPED] example_mark_skipped_test # SKIP this test should be skipped
============================================================
Testing complete. 1 tests run. 0 failed. 0 crashed. 1 skipped.
See the last patch's description for a bit more detail.
Meta:
The first patch is from another series with just a reworded commit
message, https://lore.kernel.org/linux-kselftest/20210805235145.2528054-2-dlatypov@g…
This patch series is based on Shuah's kunit branch:
https://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux-kselftest.git/?…
Changes:
v1 -> v2: rebase onto Shuah's kunit branch, fix missing code in patch 1.
Daniel Latypov (3):
kunit: add 'kunit.action' param to allow listing out tests
kunit: tool: factor exec + parse steps into a function
kunit: tool: support running each suite/test separately
lib/kunit/executor.c | 45 ++++++++-
tools/testing/kunit/kunit.py | 127 +++++++++++++++++--------
tools/testing/kunit/kunit_tool_test.py | 40 ++++++++
3 files changed, 167 insertions(+), 45 deletions(-)
base-commit: 3b29021ddd10cfb6b2565c623595bd3b02036f33
--
2.33.0.685.g46640cef36-goog
The nr_cpus = CPU_COUNT(&possible_mask) is the number of available CPUs in
possible_mask. As a result, the "cpu = i % nr_cpus" may always return CPU
that is not available in possible_mask.
Suppose the server has 8 CPUs. The below Failure is encountered immediately
if the task is bound to CPU 5 and 6.
==== Test Assertion Failure ====
rseq_test.c:228: i > (NR_TASK_MIGRATIONS / 2)
pid=10127 tid=10127 errno=4 - Interrupted system call
1 0x00000000004018e5: main at rseq_test.c:227
2 0x00007fcc8fc66bf6: ?? ??:0
3 0x0000000000401959: _start at ??:?
Only performed 4 KVM_RUNs, task stalled too much?
Signed-off-by: Dongli Zhang <dongli.zhang(a)oracle.com>
---
tools/testing/selftests/kvm/rseq_test.c | 18 +++++++++++++++++-
1 file changed, 17 insertions(+), 1 deletion(-)
diff --git a/tools/testing/selftests/kvm/rseq_test.c b/tools/testing/selftests/kvm/rseq_test.c
index c5e0dd664a7b..41df5173970c 100644
--- a/tools/testing/selftests/kvm/rseq_test.c
+++ b/tools/testing/selftests/kvm/rseq_test.c
@@ -10,6 +10,7 @@
#include <signal.h>
#include <syscall.h>
#include <sys/ioctl.h>
+#include <sys/sysinfo.h>
#include <asm/barrier.h>
#include <linux/atomic.h>
#include <linux/rseq.h>
@@ -43,6 +44,18 @@ static bool done;
static atomic_t seq_cnt;
+static int get_max_cpu_idx(void)
+{
+ int nproc = get_nprocs_conf();
+ int i, max = -ENOENT;
+
+ for (i = 0; i < nproc; i++)
+ if (CPU_ISSET(i, &possible_mask))
+ max = i;
+
+ return max;
+}
+
static void guest_code(void)
{
for (;;)
@@ -61,10 +74,13 @@ static void *migration_worker(void *ign)
{
cpu_set_t allowed_mask;
int r, i, nr_cpus, cpu;
+ int max_cpu_idx;
CPU_ZERO(&allowed_mask);
- nr_cpus = CPU_COUNT(&possible_mask);
+ max_cpu_idx = get_max_cpu_idx();
+ TEST_ASSERT(max_cpu_idx >= 0, "Invalid possible_mask");
+ nr_cpus = max_cpu_idx + 1;
for (i = 0; i < NR_TASK_MIGRATIONS; i++) {
cpu = i % nr_cpus;
--
2.17.1
This series fixes up a few issues introduced into vec-syscfg during
refactoring in the review process, then adds a new test which ensures
that the behaviour when we attempt to set a vector length which is not
supported by the current system matches what is documented in the SVE
ABI documentation.
v2:
- Fix handling of missing VLs when checking that vector length setting
works as expected.
Mark Brown (4):
selftests: arm64: Fix printf() format mismatch in vec-syscfg
selftests: arm64: Remove bogus error check on writing to files
selftests: arm64: Fix and enable test for setting current VL in
vec-syscfg
selftests: arm64: Verify that all possible vector lengths are handled
tools/testing/selftests/arm64/fp/vec-syscfg.c | 89 ++++++++++++++++---
1 file changed, 76 insertions(+), 13 deletions(-)
base-commit: 6880fa6c56601bb8ed59df6c30fd390cc5f6dd8f
--
2.20.1
This series fixes up a few issues introduced into vec-syscfg during
refactoring in the review process, then adds a new test which ensures
that the behaviour when we attempt to set a vector length which is not
supported by the current system matches what is documented in the SVE
ABI documentation.
v3:
- Rebased onto v5.14-rc3.
- Check to see if we discovered the system vector lengths before trying
to set all possible vector lengths since we need that information to
validate the results.
v2:
- Fix handling of missing VLs when checking that vector length setting
works as expected.
Mark Brown (4):
selftests: arm64: Fix printf() format mismatch in vec-syscfg
selftests: arm64: Remove bogus error check on writing to files
selftests: arm64: Fix and enable test for setting current VL in
vec-syscfg
selftests: arm64: Verify that all possible vector lengths are handled
tools/testing/selftests/arm64/fp/vec-syscfg.c | 95 ++++++++++++++++---
1 file changed, 82 insertions(+), 13 deletions(-)
base-commit: 5816b3e6577eaa676ceb00a848f0fd65fe2adc29
--
2.20.1
This series overhauls the selftests we have for the SVE ptrace interface
to make them much more comprehensive than they are currently, making the
coverage of the data read and written more complete. The new coverage
for setting data on all vector lengths showed the issue with using the
wrong buffer size with ptrace reported and fixed by:
https://lore.kernel.org/linux-arm-kernel/20210909165356.10675-1-broonie@ker…
(arm64/sve: Use correct size when reinitialising SVE state).
Mark Brown (8):
selftests: arm64: Use a define for the number of SVE ptrace tests to
be run
selftests: arm64: Don't log child creation as a test in SVE ptrace
test
selftests: arm64: Remove extraneous register setting code
selftests: arm64: Document what the SVE ptrace test is doing
selftests: arm64: Clarify output when verifying SVE register set
selftests: arm64: Verify interoperation of SVE and FPSIMD register
sets
selftests: arm64: More comprehensively test the SVE ptrace interface
selftests: arm64: Move FPSIMD in SVE ptrace test into a function
tools/testing/selftests/arm64/fp/Makefile | 2 +-
tools/testing/selftests/arm64/fp/TODO | 9 +-
.../selftests/arm64/fp/sve-ptrace-asm.S | 33 --
tools/testing/selftests/arm64/fp/sve-ptrace.c | 460 ++++++++++++------
4 files changed, 321 insertions(+), 183 deletions(-)
delete mode 100644 tools/testing/selftests/arm64/fp/sve-ptrace-asm.S
base-commit: 6880fa6c56601bb8ed59df6c30fd390cc5f6dd8f
--
2.20.1
RFC: https://lkml.org/lkml/2021/6/4/791
PATCH v1: https://lkml.org/lkml/2021/6/16/805
PATCH v2: https://lkml.org/lkml/2021/7/6/138
PATCH v3: https://lkml.org/lkml/2021/7/12/2799
PATCH v4: https://lkml.org/lkml/2021/7/16/532
PATCH v5: https://lkml.org/lkml/2021/7/19/247
PATCH v6: https://lkml.org/lkml/2021/7/20/36
PATCH v7: https://lkml.org/lkml/2021/7/23/26
Changelog v7-->v8
1. Rebased and tested against 5.15
2. Added a selftest to check if the energy and frequency attribues
exist and their files populated
Also, have implemented a POC using this interface for the powerpc-utils'
ppc64_cpu --frequency command-line tool to utilize this information
in userspace.
The POC for the new interface has been sent to the powerpc-utils mailing
list for early review: https://groups.google.com/g/powerpc-utils-devel/c/r4i7JnlyQ8s
Sample output from the powerpc-utils tool is as follows:
# ppc64_cpu --frequency
Power and Performance Mode: XXXX
Idle Power Saver Status : XXXX
Processor Folding Status : XXXX --> Printed if Idle power save status is supported
Platform reported frequencies --> Frequencies reported from the platform's H_CALL i.e PAPR interface
min : NNNN GHz
max : NNNN GHz
static : NNNN GHz
Tool Computed frequencies
min : NNNN GHz (cpu XX)
max : NNNN GHz (cpu XX)
avg : NNNN GHz
Pratik R. Sampat (2):
powerpc/pseries: Interface to represent PAPR firmware attributes
selftest/powerpc: Add PAPR sysfs attributes sniff test
.../sysfs-firmware-papr-energy-scale-info | 26 ++
arch/powerpc/include/asm/hvcall.h | 24 +-
arch/powerpc/kvm/trace_hv.h | 1 +
arch/powerpc/platforms/pseries/Makefile | 3 +-
.../pseries/papr_platform_attributes.c | 312 ++++++++++++++++++
tools/testing/selftests/powerpc/Makefile | 1 +
.../powerpc/papr_attributes/.gitignore | 2 +
.../powerpc/papr_attributes/Makefile | 7 +
.../powerpc/papr_attributes/attr_test.c | 107 ++++++
9 files changed, 481 insertions(+), 2 deletions(-)
create mode 100644 Documentation/ABI/testing/sysfs-firmware-papr-energy-scale-info
create mode 100644 arch/powerpc/platforms/pseries/papr_platform_attributes.c
create mode 100644 tools/testing/selftests/powerpc/papr_attributes/.gitignore
create mode 100644 tools/testing/selftests/powerpc/papr_attributes/Makefile
create mode 100644 tools/testing/selftests/powerpc/papr_attributes/attr_test.c
--
2.31.1
The phc.sh script in the ptp directory is still using exit 0 when
the test has been skipped due to some unmet requirements.
Use kselftest framework skip code instead so it can help us to
distinguish the return status.
Criterion to filter out what should be fixed in ptp directory:
grep -r "exit 0" -B1 | grep -i skip
This change might cause some false-positives if people are running
these test scripts directly and only checking their return codes,
which will change from 0 to 4. However I think the impact should be
small as most of our scripts here are already using this skip code.
And there will be no such issue if running them with the kselftest
framework.
Note that there are some SKIP messages exit with 1, I leave those
unchanged.
Signed-off-by: Po-Hsu Lin <po-hsu.lin(a)canonical.com>
---
tools/testing/selftests/ptp/phc.sh | 7 +++++--
1 file changed, 5 insertions(+), 2 deletions(-)
diff --git a/tools/testing/selftests/ptp/phc.sh b/tools/testing/selftests/ptp/phc.sh
index ac6e5a6..0820544 100755
--- a/tools/testing/selftests/ptp/phc.sh
+++ b/tools/testing/selftests/ptp/phc.sh
@@ -1,6 +1,9 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
+# Kselftest framework requirement - SKIP code is 4.
+readonly KSFT_SKIP=4
+
ALL_TESTS="
settime
adjtime
@@ -13,12 +16,12 @@ DEV=$1
if [[ "$(id -u)" -ne 0 ]]; then
echo "SKIP: need root privileges"
- exit 0
+ exit $KSFT_SKIP
fi
if [[ "$DEV" == "" ]]; then
echo "SKIP: PTP device not provided"
- exit 0
+ exit $KSFT_SKIP
fi
require_command()
--
2.7.4
There are several test cases in the bpf directory are still using
exit 0 when they need to be skipped. Use kselftest framework skip
code instead so it can help us to distinguish the return status.
Criterion to filter out what should be fixed in bpf directory:
grep -r "exit 0" -B1 | grep -i skip
This change might cause some false-positives if people are running
these test scripts directly and only checking their return codes,
which will change from 0 to 4. However I think the impact should be
small as most of our scripts here are already using this skip code.
And there will be no such issue if running them with the kselftest
framework.
Signed-off-by: Po-Hsu Lin <po-hsu.lin(a)canonical.com>
---
tools/testing/selftests/bpf/test_bpftool_build.sh | 5 ++++-
tools/testing/selftests/bpf/test_xdp_meta.sh | 5 ++++-
tools/testing/selftests/bpf/test_xdp_vlan.sh | 7 +++++--
3 files changed, 13 insertions(+), 4 deletions(-)
diff --git a/tools/testing/selftests/bpf/test_bpftool_build.sh b/tools/testing/selftests/bpf/test_bpftool_build.sh
index ac349a5..b6fab1e 100755
--- a/tools/testing/selftests/bpf/test_bpftool_build.sh
+++ b/tools/testing/selftests/bpf/test_bpftool_build.sh
@@ -1,6 +1,9 @@
#!/bin/bash
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
case $1 in
-h|--help)
echo -e "$0 [-j <n>]"
@@ -22,7 +25,7 @@ KDIR_ROOT_DIR=$(realpath $PWD/$SCRIPT_REL_DIR/../../../../)
cd $KDIR_ROOT_DIR
if [ ! -e tools/bpf/bpftool/Makefile ]; then
echo -e "skip: bpftool files not found!\n"
- exit 0
+ exit $ksft_skip
fi
ERROR=0
diff --git a/tools/testing/selftests/bpf/test_xdp_meta.sh b/tools/testing/selftests/bpf/test_xdp_meta.sh
index 637fcf4..fd3f218 100755
--- a/tools/testing/selftests/bpf/test_xdp_meta.sh
+++ b/tools/testing/selftests/bpf/test_xdp_meta.sh
@@ -1,5 +1,8 @@
#!/bin/sh
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
cleanup()
{
if [ "$?" = "0" ]; then
@@ -17,7 +20,7 @@ cleanup()
ip link set dev lo xdp off 2>/dev/null > /dev/null
if [ $? -ne 0 ];then
echo "selftests: [SKIP] Could not run test without the ip xdp support"
- exit 0
+ exit $ksft_skip
fi
set -e
diff --git a/tools/testing/selftests/bpf/test_xdp_vlan.sh b/tools/testing/selftests/bpf/test_xdp_vlan.sh
index bb8b0da..1aa7404 100755
--- a/tools/testing/selftests/bpf/test_xdp_vlan.sh
+++ b/tools/testing/selftests/bpf/test_xdp_vlan.sh
@@ -2,6 +2,9 @@
# SPDX-License-Identifier: GPL-2.0
# Author: Jesper Dangaard Brouer <hawk(a)kernel.org>
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
# Allow wrapper scripts to name test
if [ -z "$TESTNAME" ]; then
TESTNAME=xdp_vlan
@@ -94,7 +97,7 @@ while true; do
-h | --help )
usage;
echo "selftests: $TESTNAME [SKIP] usage help info requested"
- exit 0
+ exit $ksft_skip
;;
* )
shift
@@ -117,7 +120,7 @@ fi
ip link set dev lo xdpgeneric off 2>/dev/null > /dev/null
if [ $? -ne 0 ]; then
echo "selftests: $TESTNAME [SKIP] need ip xdp support"
- exit 0
+ exit $ksft_skip
fi
# Interactive mode likely require us to cleanup netns
--
2.7.4
There are several test cases in the bpf directory are still using
exit 0 when they need to be skipped. Use kselftest framework skip
code instead so it can help us to distinguish the return status.
Criterion to filter out what should be fixed in bpf directory:
grep -r "exit 0" -B1 | grep -i skip
This change might cause some false-positives if people are running
these test scripts directly and only checking their return codes,
which will change from 0 to 4. However I think the impact should be
small as most of our scripts here are already using this skip code.
And there will be no such issue if running them with the kselftest
framework.
v1 -> v2:
- Ignore bpf/test_bpftool_build.sh as similar changes has been made.
- Make KSFT_SKIP readonly as suggested by Jakub Sitnicki.
Signed-off-by: Po-Hsu Lin <po-hsu.lin(a)canonical.com>
---
tools/testing/selftests/bpf/test_xdp_meta.sh | 5 ++++-
tools/testing/selftests/bpf/test_xdp_vlan.sh | 7 +++++--
2 files changed, 9 insertions(+), 3 deletions(-)
diff --git a/tools/testing/selftests/bpf/test_xdp_meta.sh b/tools/testing/selftests/bpf/test_xdp_meta.sh
index 637fcf4..d10cefd 100755
--- a/tools/testing/selftests/bpf/test_xdp_meta.sh
+++ b/tools/testing/selftests/bpf/test_xdp_meta.sh
@@ -1,5 +1,8 @@
#!/bin/sh
+# Kselftest framework requirement - SKIP code is 4.
+readonly KSFT_SKIP=4
+
cleanup()
{
if [ "$?" = "0" ]; then
@@ -17,7 +20,7 @@ cleanup()
ip link set dev lo xdp off 2>/dev/null > /dev/null
if [ $? -ne 0 ];then
echo "selftests: [SKIP] Could not run test without the ip xdp support"
- exit 0
+ exit $KSFT_SKIP
fi
set -e
diff --git a/tools/testing/selftests/bpf/test_xdp_vlan.sh b/tools/testing/selftests/bpf/test_xdp_vlan.sh
index bb8b0da..0cbc760 100755
--- a/tools/testing/selftests/bpf/test_xdp_vlan.sh
+++ b/tools/testing/selftests/bpf/test_xdp_vlan.sh
@@ -2,6 +2,9 @@
# SPDX-License-Identifier: GPL-2.0
# Author: Jesper Dangaard Brouer <hawk(a)kernel.org>
+# Kselftest framework requirement - SKIP code is 4.
+readonly KSFT_SKIP=4
+
# Allow wrapper scripts to name test
if [ -z "$TESTNAME" ]; then
TESTNAME=xdp_vlan
@@ -94,7 +97,7 @@ while true; do
-h | --help )
usage;
echo "selftests: $TESTNAME [SKIP] usage help info requested"
- exit 0
+ exit $KSFT_SKIP
;;
* )
shift
@@ -117,7 +120,7 @@ fi
ip link set dev lo xdpgeneric off 2>/dev/null > /dev/null
if [ $? -ne 0 ]; then
echo "selftests: $TESTNAME [SKIP] need ip xdp support"
- exit 0
+ exit $KSFT_SKIP
fi
# Interactive mode likely require us to cleanup netns
--
2.7.4
Problem:
What does this do?
$ kunit.py run --json
Well, it runs all the tests and prints test results out as JSON.
And next is
$ kunit.py run my-test-suite --json
This runs just `my-test-suite` and prints results out as JSON.
But what about?
$ kunit.py run --json my-test-suite
This runs all the tests and stores the json results in a "my-test-suite"
file.
Why:
--json, and now --raw_output are actually string flags. They just have a
default value. --json in particular takes the name of an output file.
It was intended that you'd do
$ kunit.py run --json=my_output_file my-test-suite
if you ever wanted to specify the value.
Workaround:
It doesn't seem like there's a way to make
https://docs.python.org/3/library/argparse.html only accept arg values
after a '='.
I believe that `--json` should "just work" regardless of where it is.
So this patch automatically rewrites a bare `--json` to `--json=stdout`.
That makes the examples above work the same way.
Add a regression test that can catch this for --raw_output.
Fixes: 6a499c9c42d0 ("kunit: tool: make --raw_output support only showing kunit output")
Signed-off-by: Daniel Latypov <dlatypov(a)google.com>
Tested-by: David Gow <davidgow(a)google.com>
---
v1 -> v2: fix mypy error by converting mapped argv to a list.
---
tools/testing/kunit/kunit.py | 24 ++++++++++++++++++++++--
tools/testing/kunit/kunit_tool_test.py | 8 ++++++++
2 files changed, 30 insertions(+), 2 deletions(-)
diff --git a/tools/testing/kunit/kunit.py b/tools/testing/kunit/kunit.py
index 5a931456e718..ac35c61f65f5 100755
--- a/tools/testing/kunit/kunit.py
+++ b/tools/testing/kunit/kunit.py
@@ -16,7 +16,7 @@ assert sys.version_info >= (3, 7), "Python version is too old"
from collections import namedtuple
from enum import Enum, auto
-from typing import Iterable
+from typing import Iterable, Sequence
import kunit_config
import kunit_json
@@ -186,6 +186,26 @@ def run_tests(linux: kunit_kernel.LinuxSourceTree,
exec_result.elapsed_time))
return parse_result
+# Problem:
+# $ kunit.py run --json
+# works as one would expect and prints the parsed test results as JSON.
+# $ kunit.py run --json suite_name
+# would *not* pass suite_name as the filter_glob and print as json.
+# argparse will consider it to be another way of writing
+# $ kunit.py run --json=suite_name
+# i.e. it would run all tests, and dump the json to a `suite_name` file.
+# So we hackily automatically rewrite --json => --json=stdout
+pseudo_bool_flag_defaults = {
+ '--json': 'stdout',
+ '--raw_output': 'kunit',
+}
+def massage_argv(argv: Sequence[str]) -> Sequence[str]:
+ def massage_arg(arg: str) -> str:
+ if arg not in pseudo_bool_flag_defaults:
+ return arg
+ return f'{arg}={pseudo_bool_flag_defaults[arg]}'
+ return list(map(massage_arg, argv))
+
def add_common_opts(parser) -> None:
parser.add_argument('--build_dir',
help='As in the make command, it specifies the build '
@@ -303,7 +323,7 @@ def main(argv, linux=None):
help='Specifies the file to read results from.',
type=str, nargs='?', metavar='input_file')
- cli_args = parser.parse_args(argv)
+ cli_args = parser.parse_args(massage_argv(argv))
if get_kernel_root_path():
os.chdir(get_kernel_root_path())
diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py
index 619c4554cbff..1edcc8373b4e 100755
--- a/tools/testing/kunit/kunit_tool_test.py
+++ b/tools/testing/kunit/kunit_tool_test.py
@@ -408,6 +408,14 @@ class KUnitMainTest(unittest.TestCase):
self.assertNotEqual(call, mock.call(StrContains('Testing complete.')))
self.assertNotEqual(call, mock.call(StrContains(' 0 tests run')))
+ def test_run_raw_output_does_not_take_positional_args(self):
+ # --raw_output is a string flag, but we don't want it to consume
+ # any positional arguments, only ones after an '='
+ self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
+ kunit.main(['run', '--raw_output', 'filter_glob'], self.linux_source_mock)
+ self.linux_source_mock.run_kernel.assert_called_once_with(
+ args=None, build_dir='.kunit', filter_glob='filter_glob', timeout=300)
+
def test_exec_timeout(self):
timeout = 3453
kunit.main(['exec', '--timeout', str(timeout)], self.linux_source_mock)
base-commit: 4c17ca27923c16fd73bbb9ad033c7d749c3bcfcc
--
2.33.0.464.g1972c5931b-goog
From: Li Zhijian <lizhijian(a)cn.fujitsu.com>
[ Upstream commit 8914a7a247e065438a0ec86a58c1c359223d2c9e ]
LKP/0Day reported some building errors about kvm, and errors message
are not always same:
- lib/x86_64/processor.c:1083:31: error: ‘KVM_CAP_NESTED_STATE’ undeclared
(first use in this function); did you mean ‘KVM_CAP_PIT_STATE2’?
- lib/test_util.c:189:30: error: ‘MAP_HUGE_16KB’ undeclared (first use
in this function); did you mean ‘MAP_HUGE_16GB’?
Although kvm relies on the khdr, they still be built in parallel when -j
is specified. In this case, it will cause compiling errors.
Here we mark target khdr as NOTPARALLEL to make it be always built
first.
CC: Philip Li <philip.li(a)intel.com>
Reported-by: kernel test robot <lkp(a)intel.com>
Signed-off-by: Li Zhijian <lizhijian(a)cn.fujitsu.com>
Signed-off-by: Shuah Khan <skhan(a)linuxfoundation.org>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
---
tools/testing/selftests/lib.mk | 1 +
1 file changed, 1 insertion(+)
diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
index a5d40653a921..9700281bee4c 100644
--- a/tools/testing/selftests/lib.mk
+++ b/tools/testing/selftests/lib.mk
@@ -26,6 +26,7 @@ include $(top_srcdir)/scripts/subarch.include
ARCH ?= $(SUBARCH)
.PHONY: khdr
+.NOTPARALLEL:
khdr:
make ARCH=$(ARCH) -C $(top_srcdir) headers_install
--
2.33.0
From: Li Zhijian <lizhijian(a)cn.fujitsu.com>
[ Upstream commit 8914a7a247e065438a0ec86a58c1c359223d2c9e ]
LKP/0Day reported some building errors about kvm, and errors message
are not always same:
- lib/x86_64/processor.c:1083:31: error: ‘KVM_CAP_NESTED_STATE’ undeclared
(first use in this function); did you mean ‘KVM_CAP_PIT_STATE2’?
- lib/test_util.c:189:30: error: ‘MAP_HUGE_16KB’ undeclared (first use
in this function); did you mean ‘MAP_HUGE_16GB’?
Although kvm relies on the khdr, they still be built in parallel when -j
is specified. In this case, it will cause compiling errors.
Here we mark target khdr as NOTPARALLEL to make it be always built
first.
CC: Philip Li <philip.li(a)intel.com>
Reported-by: kernel test robot <lkp(a)intel.com>
Signed-off-by: Li Zhijian <lizhijian(a)cn.fujitsu.com>
Signed-off-by: Shuah Khan <skhan(a)linuxfoundation.org>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
---
tools/testing/selftests/lib.mk | 1 +
1 file changed, 1 insertion(+)
diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
index 67386aa3f31d..8794ce382bf5 100644
--- a/tools/testing/selftests/lib.mk
+++ b/tools/testing/selftests/lib.mk
@@ -48,6 +48,7 @@ ARCH ?= $(SUBARCH)
# When local build is done, headers are installed in the default
# INSTALL_HDR_PATH usr/include.
.PHONY: khdr
+.NOTPARALLEL:
khdr:
ifndef KSFT_KHDR_INSTALL_DONE
ifeq (1,$(DEFAULT_INSTALL_HDR_PATH))
--
2.33.0
From: Li Zhijian <lizhijian(a)cn.fujitsu.com>
[ Upstream commit 8914a7a247e065438a0ec86a58c1c359223d2c9e ]
LKP/0Day reported some building errors about kvm, and errors message
are not always same:
- lib/x86_64/processor.c:1083:31: error: ‘KVM_CAP_NESTED_STATE’ undeclared
(first use in this function); did you mean ‘KVM_CAP_PIT_STATE2’?
- lib/test_util.c:189:30: error: ‘MAP_HUGE_16KB’ undeclared (first use
in this function); did you mean ‘MAP_HUGE_16GB’?
Although kvm relies on the khdr, they still be built in parallel when -j
is specified. In this case, it will cause compiling errors.
Here we mark target khdr as NOTPARALLEL to make it be always built
first.
CC: Philip Li <philip.li(a)intel.com>
Reported-by: kernel test robot <lkp(a)intel.com>
Signed-off-by: Li Zhijian <lizhijian(a)cn.fujitsu.com>
Signed-off-by: Shuah Khan <skhan(a)linuxfoundation.org>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
---
tools/testing/selftests/lib.mk | 1 +
1 file changed, 1 insertion(+)
diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
index 0af84ad48aa7..b7217b5251f5 100644
--- a/tools/testing/selftests/lib.mk
+++ b/tools/testing/selftests/lib.mk
@@ -48,6 +48,7 @@ ARCH ?= $(SUBARCH)
# When local build is done, headers are installed in the default
# INSTALL_HDR_PATH usr/include.
.PHONY: khdr
+.NOTPARALLEL:
khdr:
ifndef KSFT_KHDR_INSTALL_DONE
ifeq (1,$(DEFAULT_INSTALL_HDR_PATH))
--
2.33.0
From: Li Zhijian <lizhijian(a)cn.fujitsu.com>
[ Upstream commit 8914a7a247e065438a0ec86a58c1c359223d2c9e ]
LKP/0Day reported some building errors about kvm, and errors message
are not always same:
- lib/x86_64/processor.c:1083:31: error: ‘KVM_CAP_NESTED_STATE’ undeclared
(first use in this function); did you mean ‘KVM_CAP_PIT_STATE2’?
- lib/test_util.c:189:30: error: ‘MAP_HUGE_16KB’ undeclared (first use
in this function); did you mean ‘MAP_HUGE_16GB’?
Although kvm relies on the khdr, they still be built in parallel when -j
is specified. In this case, it will cause compiling errors.
Here we mark target khdr as NOTPARALLEL to make it be always built
first.
CC: Philip Li <philip.li(a)intel.com>
Reported-by: kernel test robot <lkp(a)intel.com>
Signed-off-by: Li Zhijian <lizhijian(a)cn.fujitsu.com>
Signed-off-by: Shuah Khan <skhan(a)linuxfoundation.org>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
---
tools/testing/selftests/lib.mk | 1 +
1 file changed, 1 insertion(+)
diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
index fa2ac0e56b43..fe7ee2b0f29c 100644
--- a/tools/testing/selftests/lib.mk
+++ b/tools/testing/selftests/lib.mk
@@ -48,6 +48,7 @@ ARCH ?= $(SUBARCH)
# When local build is done, headers are installed in the default
# INSTALL_HDR_PATH usr/include.
.PHONY: khdr
+.NOTPARALLEL:
khdr:
ifndef KSFT_KHDR_INSTALL_DONE
ifeq (1,$(DEFAULT_INSTALL_HDR_PATH))
--
2.33.0
On Fri, Sep 17, 2021 at 10:04:18PM -0700, Luis Chamberlain wrote:
> In this v7 I've decided it is best to merge all the effort together into
> one patch set because communication was being lost when I split the
> patches up. This was not helping in any way to either fix the zram
> issues or come to consensus on a generic solution. The patches are also
> merged now because they are all related now.
Building up all the testing framewoork is really great. I have no opinions
about the license related stuff but all other changes generally look good to
me.
Thanks.
--
tejun
Hi Linus,
Please pull the following Kselftest fixes update for Linux 5.15-rc3.
This Kselftest fixes update for Linux 5.15-rc3 consists of:
- fix to Kselftest common framework header install to run before
other targets for it work correctly in parallel build case.
- fixes to kvm test to not ignore fscanf() returns which could
result in inconsistent test behavior and failures.
diff is attached.
thanks,
-- Shuah
----------------------------------------------------------------
The following changes since commit 6880fa6c56601bb8ed59df6c30fd390cc5f6dd8f:
Linux 5.15-rc1 (2021-09-12 16:28:37 -0700)
are available in the Git repository at:
git://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux-kselftest tags/linux-kselftest-fixes-5.15-rc3
for you to fetch changes up to f5013d412a43662b63f3d5f3a804d63213acd471:
selftests: kvm: fix get_run_delay() ignoring fscanf() return warn (2021-09-16 12:57:32 -0600)
----------------------------------------------------------------
linux-kselftest-fixes-5.15-rc3
This Kselftest fixes update for Linux 5.15-rc3 consists of:
- fix to Kselftest common framework header install to run before
other targets for it work correctly in parallel build case.
- fixes to kvm test to not ignore fscanf() returns which could
result in inconsistent test behavior and failures.
----------------------------------------------------------------
Li Zhijian (1):
selftests: be sure to make khdr before other targets
Shuah Khan (4):
selftests:kvm: fix get_warnings_count() ignoring fscanf() return warn
selftests:kvm: fix get_trans_hugepagesz() ignoring fscanf() return warn
selftests: kvm: move get_run_delay() into lib/test_util
selftests: kvm: fix get_run_delay() ignoring fscanf() return warn
tools/testing/selftests/kvm/include/test_util.h | 3 +++
tools/testing/selftests/kvm/lib/test_util.c | 22 +++++++++++++++++++++-
tools/testing/selftests/kvm/steal_time.c | 16 ----------------
.../selftests/kvm/x86_64/mmio_warning_test.c | 3 ++-
.../testing/selftests/kvm/x86_64/xen_shinfo_test.c | 15 ---------------
tools/testing/selftests/lib.mk | 1 +
6 files changed, 27 insertions(+), 33 deletions(-)
----------------------------------------------------------------
This is similar to TCP MD5 in functionality but it's sufficiently
different that wire formats are incompatible. Compared to TCP-MD5 more
algorithms are supported and multiple keys can be used on the same
connection but there is still no negotiation mechanism.
Expected use-case is protecting long-duration BGP/LDP connections
between routers using pre-shared keys. The goal of this series is to
allow routers using the linux TCP stack to interoperate with vendors
such as Cisco and Juniper.
Both algorithms described in RFC5926 are implemented but the code is not
very easily extensible beyond that. In particular there are several code
paths making stack allocations based on RFC5926 maximum, those would
have to be increased.
This version is incorporates previous feedback and expands the handling
of timewait sockets and RST packets. Here are some known flaws and
limits:
* Interaction with TCP-MD5 is not tested in all corners
* Interaction with FASTOPEN not tested but unlikely to work because
sequence number assumptions for syn/ack.
* Sequence Number Extension not implemented so connections will flap
every ~4G of traffic.
* Not clear if crypto_shash_setkey might sleep. If some implementation
do that then maybe they could be excluded through alloc flags.
* Traffic key is not cached (reducing performance)
* User is responsible for ensuring keys do not overlap
I labeled this as [PATCH]] because the issues above are not critical.
Test suite was added to tools/selftests/tcp_authopt. Tests are written
in python using pytest and scapy and check the API in some detail and
validate packet captures. Python code is already used in linux and in
kselftests but virtualenvs not very much. This test suite uses `tox` to
create a private virtualenv and hide dependencies. There is no clear
guidance for how to add python-based kselftests so I made it up.
Limited testing support is also included in nettest and fcnal-test.sh.
Coverage is extremely limited, I did not expand it because the tests run
too slowly.
Changes for frr: https://github.com/FRRouting/frr/pull/9442
That PR was made early for ABI feedback, it has many issues.
Changes for yabgp: https://github.com/cdleonard/yabgp/commits/tcp_authopt
This can be use for easy interoperability testing with cisco/juniper/etc.
Changes since RFCv3:
* Implement TCP_AUTHOPT handling for timewait and reset replies. Write
tests to execute these paths by injecting packets with scapy
* Handle combining md5 and authopt: if both are configured use authopt.
* Fix locking issues around send_key, introduced in on of the later
patches.
* Handle IPv4-mapped-IPv6 addresses: it used to be that an ipv4 SYN sent
to an ipv6 socket with TCP-AO triggered WARN
* Implement un-namespaced sysctl disabled this feature by default
* Allocate new key before removing any old one in setsockopt (Dmitry)
* Remove tcp_authopt_key_info.local_id because it's no longer used (Dmitry)
* Propagate errors from TCP_AUTHOPT getsockopt (Dmitry)
* Fix no-longer-correct TCP_AUTHOPT_KEY_DEL docs (Dmitry)
* Simplify crypto allocation (Eric)
* Use kzmalloc instead of __GFP_ZERO (Eric)
* Add static_key_false tcp_authopt_needed (Eric)
* Clear authopt_info copied from oldsk in __tcp_authopt_openreq (Eric)
* Replace memcmp in ipv4 and ipv6 addr comparisons (Eric)
* Export symbols for CONFIG_IPV6=m (kernel test robot)
* Mark more functions static (kernel test robot)
* Fix build with CONFIG_PROVE_RCU_LIST=y (kernel test robot)
Link: https://lore.kernel.org/netdev/cover.1629840814.git.cdleonard@gmail.com/
Changes since RFCv2:
* Removed local_id from ABI and match on send_id/recv_id/addr
* Add all relevant out-of-tree tests to tools/testing/selftests
* Return an error instead of ignoring unknown flags, hopefully this makes
it easier to extend.
* Check sk_family before __tcp_authopt_info_get_or_create in tcp_set_authopt_key
* Use sock_owned_by_me instead of WARN_ON(!lockdep_sock_is_held(sk))
* Fix some intermediate build failures reported by kbuild robot
* Improve documentation
Link: https://lore.kernel.org/netdev/cover.1628544649.git.cdleonard@gmail.com/
Changes since RFC:
* Split into per-topic commits for ease of review. The intermediate
commits compile with a few "unused function" warnings and don't do
anything useful by themselves.
* Add ABI documention including kernel-doc on uapi
* Fix lockdep warnings from crypto by creating pools with one shash for
each cpu
* Accept short options to setsockopt by padding with zeros; this
approach allows increasing the size of the structs in the future.
* Support for aes-128-cmac-96
* Support for binding addresses to keys in a way similar to old tcp_md5
* Add support for retrieving received keyid/rnextkeyid and controling
the keyid/rnextkeyid being sent.
Link: https://lore.kernel.org/netdev/01383a8751e97ef826ef2adf93bfde3a08195a43.162…
Leonard Crestez (19):
tcp: authopt: Initial support and key management
docs: Add user documentation for tcp_authopt
selftests: Initial tcp_authopt test module
selftests: tcp_authopt: Initial sockopt manipulation
tcp: authopt: Add crypto initialization
tcp: authopt: Compute packet signatures
tcp: authopt: Hook into tcp core
tcp: authopt: Disable via sysctl by default
selftests: tcp_authopt: Test key address binding
tcp: ipv6: Add AO signing for tcp_v6_send_response
tcp: authopt: Add support for signing skb-less replies
tcp: ipv4: Add AO signing for skb-less replies
selftests: tcp_authopt: Add scapy-based packet signing code
selftests: tcp_authopt: Add packet-level tests
selftests: Initial tcp_authopt support for nettest
selftests: Initial tcp_authopt support for fcnal-test
selftests: Add -t tcp_authopt option for fcnal-test.sh
tcp: authopt: Add key selection controls
selftests: tcp_authopt: Add tests for rollover
Documentation/networking/index.rst | 1 +
Documentation/networking/ip-sysctl.rst | 6 +
Documentation/networking/tcp_authopt.rst | 69 +
include/linux/tcp.h | 9 +
include/net/tcp.h | 1 +
include/net/tcp_authopt.h | 200 +++
include/uapi/linux/snmp.h | 1 +
include/uapi/linux/tcp.h | 110 ++
net/ipv4/Kconfig | 14 +
net/ipv4/Makefile | 1 +
net/ipv4/proc.c | 1 +
net/ipv4/sysctl_net_ipv4.c | 10 +
net/ipv4/tcp.c | 30 +
net/ipv4/tcp_authopt.c | 1450 +++++++++++++++++
net/ipv4/tcp_input.c | 17 +
net/ipv4/tcp_ipv4.c | 101 +-
net/ipv4/tcp_minisocks.c | 12 +
net/ipv4/tcp_output.c | 80 +-
net/ipv6/tcp_ipv6.c | 56 +-
tools/testing/selftests/net/fcnal-test.sh | 34 +
tools/testing/selftests/net/nettest.c | 34 +-
tools/testing/selftests/tcp_authopt/Makefile | 5 +
.../testing/selftests/tcp_authopt/README.rst | 15 +
tools/testing/selftests/tcp_authopt/config | 6 +
.../selftests/tcp_authopt/requirements.txt | 40 +
tools/testing/selftests/tcp_authopt/run.sh | 15 +
tools/testing/selftests/tcp_authopt/setup.cfg | 17 +
tools/testing/selftests/tcp_authopt/setup.py | 5 +
.../tcp_authopt/tcp_authopt_test/__init__.py | 0
.../tcp_authopt/tcp_authopt_test/conftest.py | 41 +
.../full_tcp_sniff_session.py | 81 +
.../tcp_authopt_test/linux_tcp_authopt.py | 248 +++
.../tcp_authopt_test/linux_tcp_md5sig.py | 95 ++
.../tcp_authopt_test/netns_fixture.py | 83 +
.../tcp_authopt_test/scapy_conntrack.py | 150 ++
.../tcp_authopt_test/scapy_tcp_authopt.py | 211 +++
.../tcp_authopt_test/scapy_utils.py | 176 ++
.../tcp_authopt/tcp_authopt_test/server.py | 95 ++
.../tcp_authopt/tcp_authopt_test/sockaddr.py | 112 ++
.../tcp_connection_fixture.py | 269 +++
.../tcp_authopt/tcp_authopt_test/test_bind.py | 145 ++
.../tcp_authopt_test/test_rollover.py | 180 ++
.../tcp_authopt_test/test_sockopt.py | 185 +++
.../tcp_authopt_test/test_vectors.py | 359 ++++
.../tcp_authopt_test/test_verify_capture.py | 555 +++++++
.../tcp_authopt/tcp_authopt_test/utils.py | 102 ++
.../tcp_authopt/tcp_authopt_test/validator.py | 127 ++
47 files changed, 5544 insertions(+), 10 deletions(-)
create mode 100644 Documentation/networking/tcp_authopt.rst
create mode 100644 include/net/tcp_authopt.h
create mode 100644 net/ipv4/tcp_authopt.c
create mode 100644 tools/testing/selftests/tcp_authopt/Makefile
create mode 100644 tools/testing/selftests/tcp_authopt/README.rst
create mode 100644 tools/testing/selftests/tcp_authopt/config
create mode 100644 tools/testing/selftests/tcp_authopt/requirements.txt
create mode 100755 tools/testing/selftests/tcp_authopt/run.sh
create mode 100644 tools/testing/selftests/tcp_authopt/setup.cfg
create mode 100644 tools/testing/selftests/tcp_authopt/setup.py
create mode 100644 tools/testing/selftests/tcp_authopt/tcp_authopt_test/__init__.py
create mode 100644 tools/testing/selftests/tcp_authopt/tcp_authopt_test/conftest.py
create mode 100644 tools/testing/selftests/tcp_authopt/tcp_authopt_test/full_tcp_sniff_session.py
create mode 100644 tools/testing/selftests/tcp_authopt/tcp_authopt_test/linux_tcp_authopt.py
create mode 100644 tools/testing/selftests/tcp_authopt/tcp_authopt_test/linux_tcp_md5sig.py
create mode 100644 tools/testing/selftests/tcp_authopt/tcp_authopt_test/netns_fixture.py
create mode 100644 tools/testing/selftests/tcp_authopt/tcp_authopt_test/scapy_conntrack.py
create mode 100644 tools/testing/selftests/tcp_authopt/tcp_authopt_test/scapy_tcp_authopt.py
create mode 100644 tools/testing/selftests/tcp_authopt/tcp_authopt_test/scapy_utils.py
create mode 100644 tools/testing/selftests/tcp_authopt/tcp_authopt_test/server.py
create mode 100644 tools/testing/selftests/tcp_authopt/tcp_authopt_test/sockaddr.py
create mode 100644 tools/testing/selftests/tcp_authopt/tcp_authopt_test/tcp_connection_fixture.py
create mode 100644 tools/testing/selftests/tcp_authopt/tcp_authopt_test/test_bind.py
create mode 100644 tools/testing/selftests/tcp_authopt/tcp_authopt_test/test_rollover.py
create mode 100644 tools/testing/selftests/tcp_authopt/tcp_authopt_test/test_sockopt.py
create mode 100644 tools/testing/selftests/tcp_authopt/tcp_authopt_test/test_vectors.py
create mode 100644 tools/testing/selftests/tcp_authopt/tcp_authopt_test/test_verify_capture.py
create mode 100644 tools/testing/selftests/tcp_authopt/tcp_authopt_test/utils.py
create mode 100644 tools/testing/selftests/tcp_authopt/tcp_authopt_test/validator.py
base-commit: 07b855628c226511542d0911cba1b180541fbb84
--
2.25.1
Subject: Introduction: I am a Linux and open source software enthusiast
Greetings from Singapore,
My name is Mr. Turritopsis Dohrnii Teo En Ming, 43 years old as of 25
September 2021. My country is Singapore. Presently I am an IT
Consultant with a System Integrator (SI)/computer firm in Singapore. I
am also a Linux and open source software and information technology
enthusiast.
You can read my autobiography on my redundant blogs. The title of my
autobiography is:
"Autobiography of Singaporean Targeted Individual Mr. Turritopsis
Dohrnii Teo En Ming (Very First Draft, Lots More to Add in Future)"
Links to my redundant blogs (Blogger and Wordpress) can be found in my
email signature below. These are my main blogs.
I have three other redundant blogs, namely:
https://teo-en-ming.tumblr.com/https://teo-en-ming.medium.com/https://teo-en-ming.livejournal.com/
Future/subsequent versions of my autobiography will be published on my
redundant blogs.
My Blog Books (in PDF format) are also available for download on my
redundant blogs.
I have also published many guides, howtos, tutorials, and information
technology articles on my redundant blogs.
Thank you very much.
-----BEGIN EMAIL SIGNATURE-----
The Gospel for all Targeted Individuals (TIs):
[The New York Times] Microwave Weapons Are Prime Suspect in Ills of
U.S. Embassy Workers
Link:
https://www.nytimes.com/2018/09/01/science/sonic-attack-cuba-microwave.html
********************************************************************************************
Singaporean Targeted Individual Mr. Turritopsis Dohrnii Teo En Ming's
Academic Qualifications as at 14 Feb 2019 and refugee seeking attempts
at the United Nations Refugee Agency Bangkok (21 Mar 2017), in Taiwan
(5 Aug 2019) and Australia (25 Dec 2019 to 9 Jan 2020):
[1] https://tdtemcerts.wordpress.com/
[2] https://tdtemcerts.blogspot.sg/
[3] https://www.scribd.com/user/270125049/Teo-En-Ming
-----END EMAIL SIGNATURE-----
Currently, the test decides whether or not to test certain features
(e.g., writeprotect support) essentially by examining command-line
arguments. For example, if we're testing anonymous memory, then we
should test writeprotect support as well (since it generally is
supported for anonymous).
This is broken, however. Take writeprotect support as an example: sure
it's supported for anon, but it also requires that we have
CONFIG_HAVE_ARCH_USERFAULTFD_WP. I.e., it is not supported at all on
aarch64. So, running the test on such an arch fails: it tries to test
writeprotect for anon, but since it isn't *actually* supported, it
fails.
So, instead of checking command-line arguments to the test, check the
features the way the UFFD API intends: when we open a new userfaultfd,
pass in the feature(s) this test case would like to try to exercise. The
kernel reports back a subset of those features which are actually
supported: check these returned flags to see if the features are
*actually* supported.
(For a couple of cases, where *registration* would fail [with -EINVAL]
even though UFFDIO_API reports the feature as supported, we have to
check test_type as well as the feature flag.)
In some cases, we check immediately after opening the userfaultfd, and
if the features are missing, we skip the entire test. In some other
cases, we can proceed with "most" of the test, only skipping a few
pieces.
This lets us remove the global test_uffdio_wp and test_uffdio_minor
variables entirely.
Signed-off-by: Axel Rasmussen <axelrasmussen(a)google.com>
---
tools/testing/selftests/vm/userfaultfd.c | 94 +++++++++++-------------
1 file changed, 43 insertions(+), 51 deletions(-)
diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c
index 10ab56c2484a..2366caf90435 100644
--- a/tools/testing/selftests/vm/userfaultfd.c
+++ b/tools/testing/selftests/vm/userfaultfd.c
@@ -79,10 +79,6 @@ static int test_type;
#define ALARM_INTERVAL_SECS 10
static volatile bool test_uffdio_copy_eexist = true;
static volatile bool test_uffdio_zeropage_eexist = true;
-/* Whether to test uffd write-protection */
-static bool test_uffdio_wp = false;
-/* Whether to test uffd minor faults */
-static bool test_uffdio_minor = false;
static bool map_shared;
static int shm_fd;
@@ -90,6 +86,7 @@ static int huge_fd;
static char *huge_fd_off0;
static unsigned long long *count_verify;
static int uffd = -1;
+static uint64_t uffd_features;
static int uffd_flags, finished, *pipefd;
static char *area_src, *area_src_alias, *area_dst, *area_dst_alias;
static char *zeropage;
@@ -345,7 +342,7 @@ static struct uffd_test_ops hugetlb_uffd_test_ops = {
static struct uffd_test_ops *uffd_test_ops;
-static void userfaultfd_open(uint64_t *features)
+static void userfaultfd_open(uint64_t features)
{
struct uffdio_api uffdio_api;
@@ -355,14 +352,20 @@ static void userfaultfd_open(uint64_t *features)
uffd_flags = fcntl(uffd, F_GETFD, NULL);
uffdio_api.api = UFFD_API;
- uffdio_api.features = *features;
+ uffdio_api.features = features;
if (ioctl(uffd, UFFDIO_API, &uffdio_api))
err("UFFDIO_API failed.\nPlease make sure to "
"run with either root or ptrace capability.");
if (uffdio_api.api != UFFD_API)
err("UFFDIO_API error: %" PRIu64, (uint64_t)uffdio_api.api);
- *features = uffdio_api.features;
+ uffd_features = uffdio_api.features;
+}
+
+static inline bool uffd_wp_supported(void)
+{
+ return test_type == TEST_ANON &&
+ (uffd_features & UFFD_FEATURE_PAGEFAULT_FLAG_WP);
}
static inline void munmap_area(void **area)
@@ -397,6 +400,7 @@ static void uffd_test_ctx_clear(void)
err("close uffd");
uffd = -1;
}
+ uffd_features = 0;
huge_fd_off0 = NULL;
munmap_area((void **)&area_src);
@@ -405,7 +409,7 @@ static void uffd_test_ctx_clear(void)
munmap_area((void **)&area_dst_alias);
}
-static void uffd_test_ctx_init_ext(uint64_t *features)
+static void uffd_test_ctx_init(uint64_t features)
{
unsigned long nr, cpu;
@@ -445,11 +449,6 @@ static void uffd_test_ctx_init_ext(uint64_t *features)
err("pipe");
}
-static inline void uffd_test_ctx_init(uint64_t features)
-{
- uffd_test_ctx_init_ext(&features);
-}
-
static int my_bcmp(char *str1, char *str2, size_t n)
{
unsigned long i;
@@ -587,7 +586,7 @@ static int __copy_page(int ufd, unsigned long offset, bool retry)
uffdio_copy.dst = (unsigned long) area_dst + offset;
uffdio_copy.src = (unsigned long) area_src + offset;
uffdio_copy.len = page_size;
- if (test_uffdio_wp)
+ if (uffd_wp_supported())
uffdio_copy.mode = UFFDIO_COPY_MODE_WP;
else
uffdio_copy.mode = 0;
@@ -778,7 +777,7 @@ static void *background_thread(void *arg)
* at least the first half of the pages mapped already which
* can be write-protected for testing
*/
- if (test_uffdio_wp)
+ if (uffd_wp_supported())
wp_range(uffd, (unsigned long)area_dst + start_nr * page_size,
nr_pages_per_cpu * page_size, true);
@@ -1062,12 +1061,12 @@ static int userfaultfd_zeropage_test(void)
printf("testing UFFDIO_ZEROPAGE: ");
fflush(stdout);
- uffd_test_ctx_init(0);
+ uffd_test_ctx_init(UFFD_FEATURE_PAGEFAULT_FLAG_WP);
uffdio_register.range.start = (unsigned long) area_dst;
uffdio_register.range.len = nr_pages * page_size;
uffdio_register.mode = UFFDIO_REGISTER_MODE_MISSING;
- if (test_uffdio_wp)
+ if (uffd_wp_supported())
uffdio_register.mode |= UFFDIO_REGISTER_MODE_WP;
if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register))
err("register failure");
@@ -1089,7 +1088,7 @@ static int userfaultfd_events_test(void)
struct uffdio_register uffdio_register;
unsigned long expected_ioctls;
pthread_t uffd_mon;
- int err, features;
+ int err;
pid_t pid;
char c;
struct uffd_stats stats = { 0 };
@@ -1097,16 +1096,15 @@ static int userfaultfd_events_test(void)
printf("testing events (fork, remap, remove): ");
fflush(stdout);
- features = UFFD_FEATURE_EVENT_FORK | UFFD_FEATURE_EVENT_REMAP |
- UFFD_FEATURE_EVENT_REMOVE;
- uffd_test_ctx_init(features);
+ uffd_test_ctx_init(UFFD_FEATURE_EVENT_FORK | UFFD_FEATURE_EVENT_REMAP |
+ UFFD_FEATURE_EVENT_REMOVE | UFFD_FEATURE_PAGEFAULT_FLAG_WP);
fcntl(uffd, F_SETFL, uffd_flags | O_NONBLOCK);
uffdio_register.range.start = (unsigned long) area_dst;
uffdio_register.range.len = nr_pages * page_size;
uffdio_register.mode = UFFDIO_REGISTER_MODE_MISSING;
- if (test_uffdio_wp)
+ if (uffd_wp_supported())
uffdio_register.mode |= UFFDIO_REGISTER_MODE_WP;
if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register))
err("register failure");
@@ -1144,7 +1142,7 @@ static int userfaultfd_sig_test(void)
unsigned long expected_ioctls;
unsigned long userfaults;
pthread_t uffd_mon;
- int err, features;
+ int err;
pid_t pid;
char c;
struct uffd_stats stats = { 0 };
@@ -1152,15 +1150,15 @@ static int userfaultfd_sig_test(void)
printf("testing signal delivery: ");
fflush(stdout);
- features = UFFD_FEATURE_EVENT_FORK|UFFD_FEATURE_SIGBUS;
- uffd_test_ctx_init(features);
+ uffd_test_ctx_init(UFFD_FEATURE_EVENT_FORK | UFFD_FEATURE_SIGBUS |
+ UFFD_FEATURE_PAGEFAULT_FLAG_WP);
fcntl(uffd, F_SETFL, uffd_flags | O_NONBLOCK);
uffdio_register.range.start = (unsigned long) area_dst;
uffdio_register.range.len = nr_pages * page_size;
uffdio_register.mode = UFFDIO_REGISTER_MODE_MISSING;
- if (test_uffdio_wp)
+ if (uffd_wp_supported())
uffdio_register.mode |= UFFDIO_REGISTER_MODE_WP;
if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register))
err("register failure");
@@ -1209,25 +1207,23 @@ static int userfaultfd_minor_test(void)
void *expected_page;
char c;
struct uffd_stats stats = { 0 };
- uint64_t req_features, features_out;
-
- if (!test_uffdio_minor)
- return 0;
+ uint64_t features;
printf("testing minor faults: ");
fflush(stdout);
- if (test_type == TEST_HUGETLB)
- req_features = UFFD_FEATURE_MINOR_HUGETLBFS;
+ if (test_type == TEST_HUGETLB && map_shared)
+ features = UFFD_FEATURE_MINOR_HUGETLBFS;
else if (test_type == TEST_SHMEM)
- req_features = UFFD_FEATURE_MINOR_SHMEM;
- else
- return 1;
+ features = UFFD_FEATURE_MINOR_SHMEM;
+ else {
+ printf("skipping test due to unsupported memory type\n");
+ return 0;
+ }
- features_out = req_features;
- uffd_test_ctx_init_ext(&features_out);
+ uffd_test_ctx_init(features);
/* If kernel reports required features aren't supported, skip test. */
- if ((features_out & req_features) != req_features) {
+ if ((uffd_features & features) != features) {
printf("skipping test due to lack of feature support\n");
fflush(stdout);
return 0;
@@ -1349,10 +1345,6 @@ static void userfaultfd_pagemap_test(unsigned int test_pgsize)
int pagemap_fd;
uint64_t value;
- /* Pagemap tests uffd-wp only */
- if (!test_uffdio_wp)
- return;
-
/* Not enough memory to test this page size */
if (test_pgsize > nr_pages * page_size)
return;
@@ -1361,7 +1353,12 @@ static void userfaultfd_pagemap_test(unsigned int test_pgsize)
/* Flush so it doesn't flush twice in parent/child later */
fflush(stdout);
- uffd_test_ctx_init(0);
+ uffd_test_ctx_init(UFFD_FEATURE_PAGEFAULT_FLAG_WP);
+ /* Pagemap tests uffd-wp only */
+ if (!uffd_wp_supported()) {
+ printf("skipping test due to lack of feature support\n");
+ return;
+ }
if (test_pgsize > page_size) {
/* This is a thp test */
@@ -1426,7 +1423,7 @@ static int userfaultfd_stress(void)
struct uffdio_register uffdio_register;
struct uffd_stats uffd_stats[nr_cpus];
- uffd_test_ctx_init(0);
+ uffd_test_ctx_init(UFFD_FEATURE_PAGEFAULT_FLAG_WP);
if (posix_memalign(&area, page_size, page_size))
err("out of memory");
@@ -1464,7 +1461,7 @@ static int userfaultfd_stress(void)
uffdio_register.range.start = (unsigned long) area_dst;
uffdio_register.range.len = nr_pages * page_size;
uffdio_register.mode = UFFDIO_REGISTER_MODE_MISSING;
- if (test_uffdio_wp)
+ if (uffd_wp_supported())
uffdio_register.mode |= UFFDIO_REGISTER_MODE_WP;
if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register))
err("register failure");
@@ -1513,7 +1510,7 @@ static int userfaultfd_stress(void)
return 1;
/* Clear all the write protections if there is any */
- if (test_uffdio_wp)
+ if (uffd_wp_supported())
wp_range(uffd, (unsigned long)area_dst,
nr_pages * page_size, false);
@@ -1595,8 +1592,6 @@ static void set_test_type(const char *type)
if (!strcmp(type, "anon")) {
test_type = TEST_ANON;
uffd_test_ops = &anon_uffd_test_ops;
- /* Only enable write-protect test for anonymous test */
- test_uffdio_wp = true;
} else if (!strcmp(type, "hugetlb")) {
test_type = TEST_HUGETLB;
uffd_test_ops = &hugetlb_uffd_test_ops;
@@ -1604,13 +1599,10 @@ static void set_test_type(const char *type)
map_shared = true;
test_type = TEST_HUGETLB;
uffd_test_ops = &hugetlb_uffd_test_ops;
- /* Minor faults require shared hugetlb; only enable here. */
- test_uffdio_minor = true;
} else if (!strcmp(type, "shmem")) {
map_shared = true;
test_type = TEST_SHMEM;
uffd_test_ops = &shmem_uffd_test_ops;
- test_uffdio_minor = true;
} else {
err("Unknown test type: %s", type);
}
--
2.33.0.464.g1972c5931b-goog
From: Colin Ian King <colin.king(a)canonical.com>
There is a spelling mistake in an error message. Fix it.
Signed-off-by: Colin Ian King <colin.king(a)canonical.com>
---
tools/testing/selftests/kvm/lib/sparsebit.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tools/testing/selftests/kvm/lib/sparsebit.c b/tools/testing/selftests/kvm/lib/sparsebit.c
index a0d0c83d83de..50e0cf41a7dd 100644
--- a/tools/testing/selftests/kvm/lib/sparsebit.c
+++ b/tools/testing/selftests/kvm/lib/sparsebit.c
@@ -1866,7 +1866,7 @@ void sparsebit_validate_internal(struct sparsebit *s)
* of total bits set.
*/
if (s->num_set != total_bits_set) {
- fprintf(stderr, "Number of bits set missmatch,\n"
+ fprintf(stderr, "Number of bits set mismatch,\n"
" s->num_set: 0x%lx total_bits_set: 0x%lx",
s->num_set, total_bits_set);
--
2.32.0
The kvm_vm_free() statement here is currently dead code, since the loop
in front of it can only be left with the "goto done" that jumps right
after the kvm_vm_free(). Fix it by swapping the locations of the "done"
label and the kvm_vm_free().
Signed-off-by: Thomas Huth <thuth(a)redhat.com>
---
tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c | 3 +--
tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c | 2 +-
2 files changed, 2 insertions(+), 3 deletions(-)
diff --git a/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c b/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c
index f40fd097cb35..6f6fd189dda3 100644
--- a/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c
+++ b/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c
@@ -109,8 +109,7 @@ int main(int argc, char *argv[])
}
}
- kvm_vm_free(vm);
-
done:
+ kvm_vm_free(vm);
return 0;
}
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c b/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c
index 7e33a350b053..e683d0ac3e45 100644
--- a/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c
+++ b/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c
@@ -161,7 +161,7 @@ int main(int argc, char *argv[])
}
}
- kvm_vm_free(vm);
done:
+ kvm_vm_free(vm);
return 0;
}
--
2.27.0
Patch 1 fixes a KVM+rseq bug where KVM's handling of TIF_NOTIFY_RESUME,
e.g. for task migration, clears the flag without informing rseq and leads
to stale data in userspace's rseq struct.
Patch 2 is a cleanup to try and make future bugs less likely. It's also
a baby step towards moving and renaming tracehook_notify_resume() since
it has nothing to do with tracing. It kills me to not do the move/rename
as part of this series, but having a dedicated series/discussion seems
more appropriate given the sheer number of architectures that call
tracehook_notify_resume() and the lack of an obvious home for the code.
Patch 3 is a fix/cleanup to stop overriding x86's unistd_{32,64}.h when
the include path (intentionally) omits tools' uapi headers. KVM's
selftests do exactly that so that they can pick up the uapi headers from
the installed kernel headers, and still use various tools/ headers that
mirror kernel code, e.g. linux/types.h. This allows the new test in
patch 4 to reference __NR_rseq without having to manually define it.
Patch 4 is a regression test for the KVM+rseq bug.
Patch 5 is a cleanup made possible by patch 3.
Sean Christopherson (5):
KVM: rseq: Update rseq when processing NOTIFY_RESUME on xfer to KVM
guest
entry: rseq: Call rseq_handle_notify_resume() in
tracehook_notify_resume()
tools: Move x86 syscall number fallbacks to .../uapi/
KVM: selftests: Add a test for KVM_RUN+rseq to detect task migration
bugs
KVM: selftests: Remove __NR_userfaultfd syscall fallback
arch/arm/kernel/signal.c | 1 -
arch/arm64/kernel/signal.c | 1 -
arch/csky/kernel/signal.c | 4 +-
arch/mips/kernel/signal.c | 4 +-
arch/powerpc/kernel/signal.c | 4 +-
arch/s390/kernel/signal.c | 1 -
include/linux/tracehook.h | 2 +
kernel/entry/common.c | 4 +-
kernel/rseq.c | 4 +-
.../x86/include/{ => uapi}/asm/unistd_32.h | 0
.../x86/include/{ => uapi}/asm/unistd_64.h | 3 -
tools/testing/selftests/kvm/.gitignore | 1 +
tools/testing/selftests/kvm/Makefile | 3 +
tools/testing/selftests/kvm/rseq_test.c | 131 ++++++++++++++++++
14 files changed, 143 insertions(+), 20 deletions(-)
rename tools/arch/x86/include/{ => uapi}/asm/unistd_32.h (100%)
rename tools/arch/x86/include/{ => uapi}/asm/unistd_64.h (83%)
create mode 100644 tools/testing/selftests/kvm/rseq_test.c
--
2.33.0.rc1.237.g0d66db33f3-goog
Synchronous Ethernet networks use a physical layer clock to syntonize
the frequency across different network elements.
Basic SyncE node defined in the ITU-T G.8264 consist of an Ethernet
Equipment Clock (EEC) and have the ability to recover synchronization
from the synchronization inputs - either traffic interfaces or external
frequency sources.
The EEC can synchronize its frequency (syntonize) to any of those sources.
It is also able to select synchronization source through priority tables
and synchronization status messaging. It also provides neccessary
filtering and holdover capabilities
This patch series introduces basic interface for reading the Ethernet
Equipment Clock (EEC) state on a SyncE capable device. This state gives
information about the source of the syntonization signal (ether my port,
or any external one) and the state of EEC. This interface is required\
to implement Synchronization Status Messaging on upper layers.
Next steps:
- add interface to enable source clocks and get information about them
- properly return the EEC_SRC_PORT flag depending on the port recovered
clock being enabled and locked
v2:
- removed whitespace changes
- fix issues reported by test robot
v3:
- Changed naming from SyncE to EEC
- Clarify cover letter and commit message for patch 1
v4:
- Removed sync_source and pin_idx info
- Changed one structure to attributes
- Added EEC_SRC_PORT flag to indicate that the EEC is synchronized
to the recovered clock of a port that returns the state
Maciej Machnikowski (2):
rtnetlink: Add new RTM_GETEECSTATE message to get SyncE status
ice: add support for reading SyncE DPLL state
drivers/net/ethernet/intel/ice/ice.h | 5 ++
.../net/ethernet/intel/ice/ice_adminq_cmd.h | 34 +++++++++
drivers/net/ethernet/intel/ice/ice_common.c | 62 ++++++++++++++++
drivers/net/ethernet/intel/ice/ice_common.h | 4 ++
drivers/net/ethernet/intel/ice/ice_devids.h | 3 +
drivers/net/ethernet/intel/ice/ice_main.c | 29 ++++++++
drivers/net/ethernet/intel/ice/ice_ptp.c | 35 +++++++++
drivers/net/ethernet/intel/ice/ice_ptp_hw.c | 44 ++++++++++++
drivers/net/ethernet/intel/ice/ice_ptp_hw.h | 22 ++++++
include/linux/netdevice.h | 6 ++
include/uapi/linux/if_link.h | 31 ++++++++
include/uapi/linux/rtnetlink.h | 3 +
net/core/rtnetlink.c | 71 +++++++++++++++++++
security/selinux/nlmsgtab.c | 3 +-
14 files changed, 351 insertions(+), 1 deletion(-)
--
2.26.3
During initialization of a signal testcase, features declared as required
are properly checked against the running system but no action is then taken
to effectively skip such a testcase.
Fix core signals test logic to abort initialization and report such a
testcase as skipped to the KSelfTest framework.
Fixes: f96bf4340316 ("kselftest: arm64: mangle_pstate_invalid_compat_toggle and common utils")
Signed-off-by: Cristian Marussi <cristian.marussi(a)arm.com>
---
As a consequence KSelfTest TAP results will now report this when a signal-SVE
testcase run on a system missing SVE:
\# selftests: arm64: fake_sigreturn_sve_change_vl
\# Registered handlers for all signals.
\# Detected MINSTKSIGSZ:4720
\# Required Features: [ SVE ] NOT supported
\# ==>> completed. SKIP.
\# # FAKE_SIGRETURN_SVE_CHANGE :: Attempt to change SVE VL
\# ok 7 selftests: arm64: fake_sigreturn_sve_change_vl # SKIP
---
tools/testing/selftests/arm64/signal/test_signals_utils.c | 7 +++++--
1 file changed, 5 insertions(+), 2 deletions(-)
diff --git a/tools/testing/selftests/arm64/signal/test_signals_utils.c b/tools/testing/selftests/arm64/signal/test_signals_utils.c
index 6836510a522f..22722abc9dfa 100644
--- a/tools/testing/selftests/arm64/signal/test_signals_utils.c
+++ b/tools/testing/selftests/arm64/signal/test_signals_utils.c
@@ -266,16 +266,19 @@ int test_init(struct tdescr *td)
td->feats_supported |= FEAT_SSBS;
if (getauxval(AT_HWCAP) & HWCAP_SVE)
td->feats_supported |= FEAT_SVE;
- if (feats_ok(td))
+ if (feats_ok(td)) {
fprintf(stderr,
"Required Features: [%s] supported\n",
feats_to_string(td->feats_required &
td->feats_supported));
- else
+ } else {
fprintf(stderr,
"Required Features: [%s] NOT supported\n",
feats_to_string(td->feats_required &
~td->feats_supported));
+ td->result = KSFT_SKIP;
+ return 0;
+ }
}
/* Perform test specific additional initialization */
--
2.17.1
Commit 6a499c9c42d0 ("kunit: tool: make --raw_output support only
showing kunit output") made --raw_output a string-typed argument.
Passing --raw_output=kunit would make it only show KUnit-related output
and not everything.
However, converting it to a string-typed argument had side effects.
These calls used to work:
$ kunit.py run --raw_output
$ kunit.py run --raw_output suite_filter
$ kunit.py run suite_filter --raw_output
But now the second is actually parsed as
$ kunit.py run --raw_output=suite_filter
So the order you add in --raw_output now matters and command lines that
used to work might not anymore.
Change --raw_output back to a boolean flag, but change its behavior to
match that of the former --raw_output=kunit.
The assumption is that this is what most people wanted to see anyways.
To get the old behavior, users can simply do:
$ kunit.py run >/dev/null; cat .kunit/test.log
They don't have any easy way of getting the --raw_output=kunit behavior.
Signed-off-by: Daniel Latypov <dlatypov(a)google.com>
---
Meta: this is an alternative to
https://lore.kernel.org/linux-kselftest/20210903161405.1861312-1-dlatypov@g…
I'd slightly prefer that approach, but if we're fine with giving up the
old --raw_output semantics entirely, this would be cleaner.
I'd also assume that most people would prefer the new semantics, but I'm
not sure of that.
---
Documentation/dev-tools/kunit/kunit-tool.rst | 7 -------
tools/testing/kunit/kunit.py | 12 +++---------
tools/testing/kunit/kunit_tool_test.py | 13 ++++++-------
3 files changed, 9 insertions(+), 23 deletions(-)
diff --git a/Documentation/dev-tools/kunit/kunit-tool.rst b/Documentation/dev-tools/kunit/kunit-tool.rst
index ae52e0f489f9..03404746f1f6 100644
--- a/Documentation/dev-tools/kunit/kunit-tool.rst
+++ b/Documentation/dev-tools/kunit/kunit-tool.rst
@@ -114,13 +114,6 @@ results in TAP format, you can pass the ``--raw_output`` argument.
./tools/testing/kunit/kunit.py run --raw_output
-The raw output from test runs may contain other, non-KUnit kernel log
-lines. You can see just KUnit output with ``--raw_output=kunit``:
-
-.. code-block:: bash
-
- ./tools/testing/kunit/kunit.py run --raw_output=kunit
-
If you have KUnit results in their raw TAP format, you can parse them and print
the human-readable summary with the ``parse`` command for kunit_tool. This
accepts a filename for an argument, or will read from standard input.
diff --git a/tools/testing/kunit/kunit.py b/tools/testing/kunit/kunit.py
index 5a931456e718..3626a56472b5 100755
--- a/tools/testing/kunit/kunit.py
+++ b/tools/testing/kunit/kunit.py
@@ -115,13 +115,7 @@ def parse_tests(request: KunitParseRequest) -> KunitResult:
'Tests not Parsed.')
if request.raw_output:
- output: Iterable[str] = request.input_data
- if request.raw_output == 'all':
- pass
- elif request.raw_output == 'kunit':
- output = kunit_parser.extract_tap_lines(output)
- else:
- print(f'Unknown --raw_output option "{request.raw_output}"', file=sys.stderr)
+ output = kunit_parser.extract_tap_lines(request.input_data)
for line in output:
print(line.rstrip())
@@ -256,8 +250,8 @@ def add_exec_opts(parser) -> None:
def add_parse_opts(parser) -> None:
parser.add_argument('--raw_output', help='If set don\'t format output from kernel. '
- 'If set to --raw_output=kunit, filters to just KUnit output.',
- type=str, nargs='?', const='all', default=None)
+ 'It will only show output from KUnit.',
+ action='store_true')
parser.add_argument('--json',
nargs='?',
help='Stores test results in a JSON, and either '
diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py
index 619c4554cbff..55ed3dac31ee 100755
--- a/tools/testing/kunit/kunit_tool_test.py
+++ b/tools/testing/kunit/kunit_tool_test.py
@@ -399,14 +399,13 @@ class KUnitMainTest(unittest.TestCase):
self.assertNotEqual(call, mock.call(StrContains('Testing complete.')))
self.assertNotEqual(call, mock.call(StrContains(' 0 tests run')))
- def test_run_raw_output_kunit(self):
+ def test_run_raw_output_does_not_take_positional_args(self):
+ # --raw_output might eventually support an argument, but we don't want it
+ # to consume any positional arguments, only ones after an '='.
self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
- kunit.main(['run', '--raw_output=kunit'], self.linux_source_mock)
- self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
- self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1)
- for call in self.print_mock.call_args_list:
- self.assertNotEqual(call, mock.call(StrContains('Testing complete.')))
- self.assertNotEqual(call, mock.call(StrContains(' 0 tests run')))
+ kunit.main(['run', '--raw_output', 'filter_glob'], self.linux_source_mock)
+ self.linux_source_mock.run_kernel.assert_called_once_with(
+ args=None, build_dir='.kunit', filter_glob='filter_glob', timeout=300)
def test_exec_timeout(self):
timeout = 3453
base-commit: 316346243be6df12799c0b64b788e06bad97c30b
--
2.33.0.464.g1972c5931b-goog
Problem:
What does this do?
$ kunit.py run --json
Well, it runs all the tests and prints test results out as JSON.
And next is
$ kunit.py run my-test-suite --json
This runs just `my-test-suite` and prints results out as JSON.
But what about?
$ kunit.py run --json my-test-suite
This runs all the tests and stores the json results in a "my-test-suite"
file.
Why:
--json, and now --raw_output are actually string flags. They just have a
default value. --json in particular takes the name of an output file.
It was intended that you'd do
$ kunit.py run --json=my_output_file my-test-suite
if you ever wanted to specify the value.
Workaround:
It doesn't seem like there's a way to make
https://docs.python.org/3/library/argparse.html only accept arg values
after a '='.
I believe that `--json` should "just work" regardless of where it is.
So this patch automatically rewrites a bare `--json` to `--json=stdout`.
That makes the examples above work the same way.
Add a regression test that can catch this for --raw_output.
Fixes: 6a499c9c42d0 ("kunit: tool: make --raw_output support only showing kunit output")
Signed-off-by: Daniel Latypov <dlatypov(a)google.com>
---
tools/testing/kunit/kunit.py | 24 ++++++++++++++++++++++--
tools/testing/kunit/kunit_tool_test.py | 8 ++++++++
2 files changed, 30 insertions(+), 2 deletions(-)
diff --git a/tools/testing/kunit/kunit.py b/tools/testing/kunit/kunit.py
index 5a931456e718..95d62020e4f2 100755
--- a/tools/testing/kunit/kunit.py
+++ b/tools/testing/kunit/kunit.py
@@ -16,7 +16,7 @@ assert sys.version_info >= (3, 7), "Python version is too old"
from collections import namedtuple
from enum import Enum, auto
-from typing import Iterable
+from typing import Iterable, Sequence
import kunit_config
import kunit_json
@@ -186,6 +186,26 @@ def run_tests(linux: kunit_kernel.LinuxSourceTree,
exec_result.elapsed_time))
return parse_result
+# Problem:
+# $ kunit.py run --json
+# works as one would expect and prints the parsed test results as JSON.
+# $ kunit.py run --json suite_name
+# would *not* pass suite_name as the filter_glob and print as json.
+# argparse will consider it to be another way of writing
+# $ kunit.py run --json=suite_name
+# i.e. it would run all tests, and dump the json to a `suite_name` file.
+# So we hackily automatically rewrite --json => --json=stdout
+pseudo_bool_flag_defaults = {
+ '--json': 'stdout',
+ '--raw_output': 'kunit',
+}
+def massage_argv(argv: Sequence[str]) -> Sequence[str]:
+ def massage_arg(arg: str) -> str:
+ if arg not in pseudo_bool_flag_defaults:
+ return arg
+ return f'{arg}={pseudo_bool_flag_defaults[arg]}'
+ return map(massage_arg, argv)
+
def add_common_opts(parser) -> None:
parser.add_argument('--build_dir',
help='As in the make command, it specifies the build '
@@ -303,7 +323,7 @@ def main(argv, linux=None):
help='Specifies the file to read results from.',
type=str, nargs='?', metavar='input_file')
- cli_args = parser.parse_args(argv)
+ cli_args = parser.parse_args(massage_argv(argv))
if get_kernel_root_path():
os.chdir(get_kernel_root_path())
diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py
index 619c4554cbff..1edcc8373b4e 100755
--- a/tools/testing/kunit/kunit_tool_test.py
+++ b/tools/testing/kunit/kunit_tool_test.py
@@ -408,6 +408,14 @@ class KUnitMainTest(unittest.TestCase):
self.assertNotEqual(call, mock.call(StrContains('Testing complete.')))
self.assertNotEqual(call, mock.call(StrContains(' 0 tests run')))
+ def test_run_raw_output_does_not_take_positional_args(self):
+ # --raw_output is a string flag, but we don't want it to consume
+ # any positional arguments, only ones after an '='
+ self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
+ kunit.main(['run', '--raw_output', 'filter_glob'], self.linux_source_mock)
+ self.linux_source_mock.run_kernel.assert_called_once_with(
+ args=None, build_dir='.kunit', filter_glob='filter_glob', timeout=300)
+
def test_exec_timeout(self):
timeout = 3453
kunit.main(['exec', '--timeout', str(timeout)], self.linux_source_mock)
base-commit: a9c9a6f741cdaa2fa9ba24a790db8d07295761e3
--
2.33.0.153.gba50c8fa24-goog
This test assumes that the declared kunit_suite object is the exact one
which is being executed, which KUnit will not guarantee [1].
Specifically, `suite->log` is not initialized until a suite object is
executed. So if KUnit makes a copy of the suite and runs that instead,
this test dereferences an invalid pointer and (hopefully) segfaults.
N.B. since we no longer assume this, we can no longer verify that
`suite->log` is *not* allocated during normal execution.
An alternative to this patch that would allow us to test that would
require exposing an API for the current test to get its current suite.
Exposing that for one internal kunit test seems like overkill, and
grants users more footguns (e.g. reusing a test case in multiple suites
and changing behavior based on the suite name, dynamically modifying the
setup/cleanup funcs, storing/reading stuff out of the suite->log, etc.).
[1] In a subsequent patch, KUnit will allow running subsets of test
cases within a suite by making a copy of the suite w/ the filtered test
list. But there are other reasons KUnit might execute a copy, e.g. if it
ever wants to support parallel execution of different suites, recovering
from errors and restarting suites
Signed-off-by: Daniel Latypov <dlatypov(a)google.com>
Reviewed-by: Brendan Higgins <brendanhiggins(a)google.com>
---
lib/kunit/kunit-test.c | 14 ++++++++------
1 file changed, 8 insertions(+), 6 deletions(-)
diff --git a/lib/kunit/kunit-test.c b/lib/kunit/kunit-test.c
index d69efcbed624..555601d17f79 100644
--- a/lib/kunit/kunit-test.c
+++ b/lib/kunit/kunit-test.c
@@ -415,12 +415,15 @@ static struct kunit_suite kunit_log_test_suite = {
static void kunit_log_test(struct kunit *test)
{
- struct kunit_suite *suite = &kunit_log_test_suite;
+ struct kunit_suite suite;
+
+ suite.log = kunit_kzalloc(test, KUNIT_LOG_SIZE, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, suite.log);
kunit_log(KERN_INFO, test, "put this in log.");
kunit_log(KERN_INFO, test, "this too.");
- kunit_log(KERN_INFO, suite, "add to suite log.");
- kunit_log(KERN_INFO, suite, "along with this.");
+ kunit_log(KERN_INFO, &suite, "add to suite log.");
+ kunit_log(KERN_INFO, &suite, "along with this.");
#ifdef CONFIG_KUNIT_DEBUGFS
KUNIT_EXPECT_NOT_ERR_OR_NULL(test,
@@ -428,12 +431,11 @@ static void kunit_log_test(struct kunit *test)
KUNIT_EXPECT_NOT_ERR_OR_NULL(test,
strstr(test->log, "this too."));
KUNIT_EXPECT_NOT_ERR_OR_NULL(test,
- strstr(suite->log, "add to suite log."));
+ strstr(suite.log, "add to suite log."));
KUNIT_EXPECT_NOT_ERR_OR_NULL(test,
- strstr(suite->log, "along with this."));
+ strstr(suite.log, "along with this."));
#else
KUNIT_EXPECT_PTR_EQ(test, test->log, (char *)NULL);
- KUNIT_EXPECT_PTR_EQ(test, suite->log, (char *)NULL);
#endif
}
base-commit: 316346243be6df12799c0b64b788e06bad97c30b
--
2.33.0.309.g3052b89438-goog
On Tue, Sep 21, 2021 at 1:24 AM David Laight <David.Laight(a)aculab.com> wrote:
>
> From: Luis Chamberlain
> > Sent: 17 September 2021 20:47
> >
> > When sysfs attributes use a lock also used on module removal we can
> > race to deadlock. This happens when for instance a sysfs file on
> > a driver is used, then at the same time we have module removal call
> > trigger. The module removal call code holds a lock, and then the sysfs
> > file entry waits for the same lock. While holding the lock the module
> > removal tries to remove the sysfs entries, but these cannot be removed
> > yet as one is waiting for a lock. This won't complete as the lock is
> > already held. Likewise module removal cannot complete, and so we deadlock.
>
> Isn't the real problem the race between a sysfs file action and the
> removal of the sysfs node?
Nope, that is taken care of by kernfs.
> This isn't really related to module unload - except that may
> well remove some sysfs nodes.
Nope, the issue is a deadlock that can happen due to a shared lock on
module removal and a driver sysfs operation.
> This is the same problem as removing any other kind of driver callback.
> There are three basic solutions:
> 1) Use a global lock - not usually useful.
> 2) Have the remove call sleep until any callbacks are complete.
> 3) Have the remove just request removal and have a final
> callback (from a different context).
Kernfs already does a sort of combination of 1) and 2) but 1) is using
atomic reference counts.
> If the remove can sleep (as in 2) then there is a requirement
> on the driver code to not hold any locks across the 'remove'
> that can be acquired during the callbacks.
And this is the part that kernfs has no control over since the removal
and sysfs operation are implementation specific.
> Now, for sysfs, you probably only want to sleep the remove code
> while a read/write is in progress - not just because the node
> is open.
> That probably requires marking an open node 'invalid' and
> deferring delete to close.
This is already done by kernfs.
> None of this requires a reference count on the module.
You are missing the point to the other aspect of the try_module_get(),
it lets you also check if module exit has been entered. By using
try_module_get() you let the module exit trump proceeding with an
operation, therefore also preventing any potential use of a shared
lock on module exit and the driver specific sysfs operation.
Luis
Makefile uses TEST_PROGS instead of TEST_GEN_PROGS to define
executables. TEST_PROGS is for shell scripts that need to be
installed and run by the common lib.mk framework. The common
framework doesn't touch TEST_PROGS when it does build and clean.
As a result "make kselftest-clean" and "make clean" fail to remove
executables. Run and install work because the common framework runs
and installs TEST_PROGS. Build works because the Makefile defines
"all" rule which is unnecessary if TEST_GEN_PROGS is used.
Use TEST_GEN_PROGS so the common framework can handle build/run/
install/clean properly.
Signed-off-by: Shuah Khan <skhan(a)linuxfoundation.org>
---
tools/testing/selftests/net/af_unix/Makefile | 5 +----
1 file changed, 1 insertion(+), 4 deletions(-)
diff --git a/tools/testing/selftests/net/af_unix/Makefile b/tools/testing/selftests/net/af_unix/Makefile
index cfc7f4f97fd1..df341648f818 100644
--- a/tools/testing/selftests/net/af_unix/Makefile
+++ b/tools/testing/selftests/net/af_unix/Makefile
@@ -1,5 +1,2 @@
-##TEST_GEN_FILES := test_unix_oob
-TEST_PROGS := test_unix_oob
+TEST_GEN_PROGS := test_unix_oob
include ../../lib.mk
-
-all: $(TEST_PROGS)
--
2.30.2
The ATTRIBUTE_GROUPS is typically used to avoid boiler plate
code which is used in many drivers. Embracing ATTRIBUTE_GROUPS was
long due on the zram driver, however a recent fix for sysfs allows
users of ATTRIBUTE_GROUPS to also associate a module to the group
attribute.
In zram's case this also means it allows us to fix a race which triggers
a deadlock on the zram driver. This deadlock happens when a sysfs attribute
use a lock also used on module removal. This happens when for instance a
sysfs file on a driver is used, then at the same time we have module
removal call trigger. The module removal call code holds a lock, and then
the sysfs file entry waits for the same lock. While holding the lock the
module removal tries to remove the sysfs entries, but these cannot be
removed yet as one is waiting for a lock. This won't complete as the lock
is already held. Likewise module removal cannot complete, and so we
deadlock.
Sysfs fixes this when the group attributes have a module associated to
it, sysfs will *try* to get a refcount to the module when a shared
lock is used, prior to mucking with a sysfs attribute. If this fails we
just give up right away.
This deadlock was first reported with the zram driver, a sketch of how
this can happen follows:
CPU A CPU B
whatever_store()
module_unload
mutex_lock(foo)
mutex_lock(foo)
del_gendisk(zram->disk);
device_del()
device_remove_groups()
In this situation whatever_store() is waiting for the mutex foo to
become unlocked, but that won't happen until module removal is complete.
But module removal won't complete until the sysfs file being poked
completes which is waiting for a lock already held.
This issue can be reproduced easily on the zram driver as follows:
Loop 1 on one terminal:
while true;
do modprobe zram;
modprobe -r zram;
done
Loop 2 on a second terminal:
while true; do
echo 1024 > /sys/block/zram0/disksize;
echo 1 > /sys/block/zram0/reset;
done
Without this patch we end up in a deadlock, and the following
stack trace is produced which hints to us what the issue was:
INFO: task bash:888 blocked for more than 120 seconds.
Tainted: G E 5.12.0-rc1-next-20210304+ #4
"echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
task:bash state:D stack: 0 pid: 888 ppid: 887 flags:<etc>
Call Trace:
__schedule+0x2e4/0x900
schedule+0x46/0xb0
schedule_preempt_disabled+0xa/0x10
__mutex_lock.constprop.0+0x2c3/0x490
? _kstrtoull+0x35/0xd0
reset_store+0x6c/0x160 [zram]
kernfs_fop_write_iter+0x124/0x1b0
new_sync_write+0x11c/0x1b0
vfs_write+0x1c2/0x260
ksys_write+0x5f/0xe0
do_syscall_64+0x33/0x80
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x7f34f2c3df33
RSP: 002b:00007ffe751df6e8 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
RAX: ffffffffffffffda RBX: 0000000000000002 RCX: 00007f34f2c3df33
RDX: 0000000000000002 RSI: 0000561ccb06ec10 RDI: 0000000000000001
RBP: 0000561ccb06ec10 R08: 000000000000000a R09: 0000000000000001
R10: 0000561ccb157590 R11: 0000000000000246 R12: 0000000000000002
R13: 00007f34f2d0e6a0 R14: 0000000000000002 R15: 00007f34f2d0e8a0
INFO: task modprobe:1104 can't die for more than 120 seconds.
task:modprobe state:D stack: 0 pid: 1104 ppid: 916 flags:<etc>
Call Trace:
__schedule+0x2e4/0x900
schedule+0x46/0xb0
__kernfs_remove.part.0+0x228/0x2b0
? finish_wait+0x80/0x80
kernfs_remove_by_name_ns+0x50/0x90
remove_files+0x2b/0x60
sysfs_remove_group+0x38/0x80
sysfs_remove_groups+0x29/0x40
device_remove_attrs+0x4a/0x80
device_del+0x183/0x3e0
? mutex_lock+0xe/0x30
del_gendisk+0x27a/0x2d0
zram_remove+0x8a/0xb0 [zram]
? hot_remove_store+0xf0/0xf0 [zram]
zram_remove_cb+0xd/0x10 [zram]
idr_for_each+0x5e/0xd0
destroy_devices+0x39/0x6f [zram]
__do_sys_delete_module+0x190/0x2a0
do_syscall_64+0x33/0x80
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x7f32adf727d7
RSP: 002b:00007ffc08bb38a8 EFLAGS: 00000206 ORIG_RAX: 00000000000000b0
RAX: ffffffffffffffda RBX: 000055eea23cbb10 RCX: 00007f32adf727d7
RDX: 0000000000000000 RSI: 0000000000000800 RDI: 000055eea23cbb78
RBP: 000055eea23cbb10 R08: 0000000000000000 R09: 0000000000000000
R10: 00007f32adfe5ac0 R11: 0000000000000206 R12: 000055eea23cbb78
R13: 0000000000000000 R14: 0000000000000000 R15: 000055eea23cbc20
[0] https://lkml.kernel.org/r/20210401235925.GR4332@42.do-not-panic.com
Signed-off-by: Luis Chamberlain <mcgrof(a)kernel.org>
---
drivers/block/zram/zram_drv.c | 11 ++---------
1 file changed, 2 insertions(+), 9 deletions(-)
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index b26abcb955cc..60a55ae8cd91 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -1902,14 +1902,7 @@ static struct attribute *zram_disk_attrs[] = {
NULL,
};
-static const struct attribute_group zram_disk_attr_group = {
- .attrs = zram_disk_attrs,
-};
-
-static const struct attribute_group *zram_disk_attr_groups[] = {
- &zram_disk_attr_group,
- NULL,
-};
+ATTRIBUTE_GROUPS(zram_disk);
/*
* Allocate and initialize new zram device. the function returns
@@ -1981,7 +1974,7 @@ static int zram_add(void)
blk_queue_max_write_zeroes_sectors(zram->disk->queue, UINT_MAX);
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, zram->disk->queue);
- device_add_disk(NULL, zram->disk, zram_disk_attr_groups);
+ device_add_disk(NULL, zram->disk, zram_disk_groups);
strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
--
2.30.2
Provide a simple state machine to fix races with driver exit where we
remove the CPU multistate callbacks and re-initialization / creation of
new per CPU instances which should be managed by these callbacks.
The zram driver makes use of cpu hotplug multistate support, whereby it
associates a struct zcomp per CPU. Each struct zcomp represents a
compression algorithm in charge of managing compression streams per
CPU. Although a compiled zram driver only supports a fixed set of
compression algorithms, each zram device gets a struct zcomp allocated
per CPU. The "multi" in CPU hotplug multstate refers to these per
cpu struct zcomp instances. Each of these will have the CPU hotplug
callback called for it on CPU plug / unplug. The kernel's CPU hotplug
multistate keeps a linked list of these different structures so that
it will iterate over them on CPU transitions.
By default at driver initialization we will create just one zram device
(num_devices=1) and a zcomp structure then set for the now default
lzo-rle comrpession algorithm. At driver removal we first remove each
zram device, and so we destroy the associated struct zcomp per CPU. But
since we expose sysfs attributes to create new devices or reset /
initialize existing zram devices, we can easily end up re-initializing
a struct zcomp for a zram device before the exit routine of the module
removes the cpu hotplug callback. When this happens the kernel's CPU
hotplug will detect that at least one instance (struct zcomp for us)
exists. This can happen in the following situation:
CPU 1 CPU 2
disksize_store(...);
class_unregister(...);
idr_for_each(...);
zram_debugfs_destroy();
idr_destroy(...);
unregister_blkdev(...);
cpuhp_remove_multi_state(...);
The warning comes up on cpuhp_remove_multi_state() when it sees that the
state for CPUHP_ZCOMP_PREPARE does not have an empty instance linked list.
In this case, that a struct zcom still exists, the driver allowed its
creation per CPU even though we could have just freed them per CPU
though a call on another CPU, and we are then later trying to remove the
hotplug callback.
Fix all this by providing a zram initialization boolean
protected the shared in the driver zram_index_mutex, which we
can use to annotate when sysfs attributes are safe to use or
not -- once the driver is properly initialized. When the driver
is going down we also are sure to not let userspace muck with
attributes which may affect each per cpu struct zcomp.
This also fixes a series of possible memory leaks. The
crashes and memory leaks can easily be caused by issuing
the zram02.sh script from the LTP project [0] in a loop
in two separate windows:
cd testcases/kernel/device-drivers/zram
while true; do PATH=$PATH:$PWD:$PWD/../../../lib/ ./zram02.sh; done
You end up with a splat as follows:
kernel: zram: Removed device: zram0
kernel: zram: Added device: zram0
kernel: zram0: detected capacity change from 0 to 209715200
kernel: Adding 104857596k swap on /dev/zram0. <etc>
kernel: zram0: detected capacitky change from 209715200 to 0
kernel: zram0: detected capacity change from 0 to 209715200
kernel: ------------[ cut here ]------------
kernel: Error: Removing state 63 which has instances left.
kernel: WARNING: CPU: 7 PID: 70457 at \
kernel/cpu.c:2069 __cpuhp_remove_state_cpuslocked+0xf9/0x100
kernel: Modules linked in: zram(E-) zsmalloc(E) <etc>
kernel: CPU: 7 PID: 70457 Comm: rmmod Tainted: G \
E 5.12.0-rc1-next-20210304 #3
kernel: Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), \
BIOS 1.14.0-2 04/01/2014
kernel: RIP: 0010:__cpuhp_remove_state_cpuslocked+0xf9/0x100
kernel: Code: <etc>
kernel: RSP: 0018:ffffa800c139be98 EFLAGS: 00010282
kernel: RAX: 0000000000000000 RBX: ffffffff9083db58 RCX: ffff9609f7dd86d8
kernel: RDX: 00000000ffffffd8 RSI: 0000000000000027 RDI: ffff9609f7dd86d0
kernel: RBP: 0000000000000000i R08: 0000000000000000 R09: ffffa800c139bcb8
kernel: R10: ffffa800c139bcb0 R11: ffffffff908bea40 R12: 000000000000003f
kernel: R13: 00000000000009d8 R14: 0000000000000000 R15: 0000000000000000
kernel: FS: 00007f1b075a7540(0000) GS:ffff9609f7dc0000(0000) knlGS:<etc>
kernel: CS: 0010 DS: 0000 ES 0000 CR0: 0000000080050033
kernel: CR2: 00007f1b07610490 CR3: 00000001bd04e000 CR4: 0000000000350ee0
kernel: Call Trace:
kernel: __cpuhp_remove_state+0x2e/0x80
kernel: __do_sys_delete_module+0x190/0x2a0
kernel: do_syscall_64+0x33/0x80
kernel: entry_SYSCALL_64_after_hwframe+0x44/0xae
The "Error: Removing state 63 which has instances left" refers
to the zram per CPU struct zcomp instances left.
[0] https://github.com/linux-test-project/ltp.git
Acked-by: Minchan Kim <minchan(a)kernel.org>
Signed-off-by: Luis Chamberlain <mcgrof(a)kernel.org>
---
drivers/block/zram/zram_drv.c | 63 ++++++++++++++++++++++++++++++-----
1 file changed, 55 insertions(+), 8 deletions(-)
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index f61910c65f0f..b26abcb955cc 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -44,6 +44,8 @@ static DEFINE_MUTEX(zram_index_mutex);
static int zram_major;
static const char *default_compressor = CONFIG_ZRAM_DEF_COMP;
+static bool zram_up;
+
/* Module params (documentation at end) */
static unsigned int num_devices = 1;
/*
@@ -1704,6 +1706,7 @@ static void zram_reset_device(struct zram *zram)
comp = zram->comp;
disksize = zram->disksize;
zram->disksize = 0;
+ zram->comp = NULL;
set_capacity_and_notify(zram->disk, 0);
part_stat_set_all(zram->disk->part0, 0);
@@ -1724,9 +1727,18 @@ static ssize_t disksize_store(struct device *dev,
struct zram *zram = dev_to_zram(dev);
int err;
+ mutex_lock(&zram_index_mutex);
+
+ if (!zram_up) {
+ err = -ENODEV;
+ goto out;
+ }
+
disksize = memparse(buf, NULL);
- if (!disksize)
- return -EINVAL;
+ if (!disksize) {
+ err = -EINVAL;
+ goto out;
+ }
down_write(&zram->init_lock);
if (init_done(zram)) {
@@ -1754,12 +1766,16 @@ static ssize_t disksize_store(struct device *dev,
set_capacity_and_notify(zram->disk, zram->disksize >> SECTOR_SHIFT);
up_write(&zram->init_lock);
+ mutex_unlock(&zram_index_mutex);
+
return len;
out_free_meta:
zram_meta_free(zram, disksize);
out_unlock:
up_write(&zram->init_lock);
+out:
+ mutex_unlock(&zram_index_mutex);
return err;
}
@@ -1775,8 +1791,17 @@ static ssize_t reset_store(struct device *dev,
if (ret)
return ret;
- if (!do_reset)
- return -EINVAL;
+ mutex_lock(&zram_index_mutex);
+
+ if (!zram_up) {
+ len = -ENODEV;
+ goto out;
+ }
+
+ if (!do_reset) {
+ len = -EINVAL;
+ goto out;
+ }
zram = dev_to_zram(dev);
bdev = zram->disk->part0;
@@ -1785,7 +1810,8 @@ static ssize_t reset_store(struct device *dev,
/* Do not reset an active device or claimed device */
if (bdev->bd_openers || zram->claim) {
mutex_unlock(&bdev->bd_disk->open_mutex);
- return -EBUSY;
+ len = -EBUSY;
+ goto out;
}
/* From now on, anyone can't open /dev/zram[0-9] */
@@ -1800,6 +1826,8 @@ static ssize_t reset_store(struct device *dev,
zram->claim = false;
mutex_unlock(&bdev->bd_disk->open_mutex);
+out:
+ mutex_unlock(&zram_index_mutex);
return len;
}
@@ -2010,6 +2038,10 @@ static ssize_t hot_add_show(struct class *class,
int ret;
mutex_lock(&zram_index_mutex);
+ if (!zram_up) {
+ mutex_unlock(&zram_index_mutex);
+ return -ENODEV;
+ }
ret = zram_add();
mutex_unlock(&zram_index_mutex);
@@ -2037,6 +2069,11 @@ static ssize_t hot_remove_store(struct class *class,
mutex_lock(&zram_index_mutex);
+ if (!zram_up) {
+ ret = -ENODEV;
+ goto out;
+ }
+
zram = idr_find(&zram_index_idr, dev_id);
if (zram) {
ret = zram_remove(zram);
@@ -2046,6 +2083,7 @@ static ssize_t hot_remove_store(struct class *class,
ret = -ENODEV;
}
+out:
mutex_unlock(&zram_index_mutex);
return ret ? ret : count;
}
@@ -2072,12 +2110,15 @@ static int zram_remove_cb(int id, void *ptr, void *data)
static void destroy_devices(void)
{
+ mutex_lock(&zram_index_mutex);
+ zram_up = false;
class_unregister(&zram_control_class);
idr_for_each(&zram_index_idr, &zram_remove_cb, NULL);
zram_debugfs_destroy();
idr_destroy(&zram_index_idr);
unregister_blkdev(zram_major, "zram");
cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
+ mutex_unlock(&zram_index_mutex);
}
static int __init zram_init(void)
@@ -2105,15 +2146,21 @@ static int __init zram_init(void)
return -EBUSY;
}
+ mutex_lock(&zram_index_mutex);
+
while (num_devices != 0) {
- mutex_lock(&zram_index_mutex);
ret = zram_add();
- mutex_unlock(&zram_index_mutex);
- if (ret < 0)
+ if (ret < 0) {
+ mutex_unlock(&zram_index_mutex);
goto out_error;
+ }
num_devices--;
}
+ zram_up = true;
+
+ mutex_unlock(&zram_index_mutex);
+
return 0;
out_error:
--
2.30.2
Now that sysfs has the deadlock race fixed with module removal,
enable the deadlock tests module removal tests. They were left
disabled by default as otherwise you would deadlock your system
./tools/testing/selftests/sysfs/sysfs.sh -t 0027
Running test: sysfs_test_0027 - run #0
Test for possible rmmod deadlock while writing x ... ok
./tools/testing/selftests/sysfs/sysfs.sh -t 0028
Running test: sysfs_test_0028 - run #0
Test for possible rmmod deadlock using rtnl_lock while writing x ... ok
Signed-off-by: Luis Chamberlain <mcgrof(a)kernel.org>
---
tools/testing/selftests/sysfs/sysfs.sh | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/tools/testing/selftests/sysfs/sysfs.sh b/tools/testing/selftests/sysfs/sysfs.sh
index f928635d0e35..4047ac48e764 100755
--- a/tools/testing/selftests/sysfs/sysfs.sh
+++ b/tools/testing/selftests/sysfs/sysfs.sh
@@ -60,8 +60,8 @@ ALL_TESTS="$ALL_TESTS 0023:1:1:test_dev_y:block"
ALL_TESTS="$ALL_TESTS 0024:1:1:test_dev_x:block"
ALL_TESTS="$ALL_TESTS 0025:1:1:test_dev_y:block"
ALL_TESTS="$ALL_TESTS 0026:1:1:test_dev_y:block"
-ALL_TESTS="$ALL_TESTS 0027:1:0:test_dev_x:block" # deadlock test
-ALL_TESTS="$ALL_TESTS 0028:1:0:test_dev_x:block" # deadlock test with rntl_lock
+ALL_TESTS="$ALL_TESTS 0027:1:1:test_dev_x:block" # deadlock test
+ALL_TESTS="$ALL_TESTS 0028:1:1:test_dev_x:block" # deadlock test with rntl_lock
ALL_TESTS="$ALL_TESTS 0029:1:1:test_dev_x:block" # kernfs race removal of store
ALL_TESTS="$ALL_TESTS 0030:1:1:test_dev_x:block" # kernfs race removal before mutex
ALL_TESTS="$ALL_TESTS 0031:1:1:test_dev_x:block" # kernfs race removal after mutex
--
2.30.2
If one ends up expanding on this line checkpatch will complain that the
combination S_IRWXU|S_IRUGO|S_IXUGO should just be replaced with the
octal 0755. Do that.
This makes no functional changes.
Signed-off-by: Luis Chamberlain <mcgrof(a)kernel.org>
---
fs/sysfs/dir.c | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index 59dffd5ca517..b6b6796e1616 100644
--- a/fs/sysfs/dir.c
+++ b/fs/sysfs/dir.c
@@ -56,8 +56,7 @@ int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
kobject_get_ownership(kobj, &uid, &gid);
- kn = kernfs_create_dir_ns(parent, kobject_name(kobj),
- S_IRWXU | S_IRUGO | S_IXUGO, uid, gid,
+ kn = kernfs_create_dir_ns(parent, kobject_name(kobj), 0755, uid, gid,
kobj, ns);
if (IS_ERR(kn)) {
if (PTR_ERR(kn) == -EEXIST)
--
2.30.2
If one ends up extending this line checkpatch will complain about the
use of S_IRWXUGO suggesting it is not preferred and that 0777
should be used instead. Take the tip from checkpatch and do that
change before we do our subsequent changes.
This makes no functional changes.
Signed-off-by: Luis Chamberlain <mcgrof(a)kernel.org>
---
fs/kernfs/symlink.c | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/fs/kernfs/symlink.c b/fs/kernfs/symlink.c
index c8f8e41b8411..19a6c71c6ff5 100644
--- a/fs/kernfs/symlink.c
+++ b/fs/kernfs/symlink.c
@@ -36,8 +36,7 @@ struct kernfs_node *kernfs_create_link(struct kernfs_node *parent,
gid = target->iattr->ia_gid;
}
- kn = kernfs_new_node(parent, name, S_IFLNK|S_IRWXUGO, uid, gid,
- KERNFS_LINK);
+ kn = kernfs_new_node(parent, name, S_IFLNK|0777, uid, gid, KERNFS_LINK);
if (!kn)
return ERR_PTR(-ENOMEM);
--
2.30.2
There is quite a bit of tribal knowledge around proper use of
try_module_get() and that it must be used only in a context which
can ensure the module won't be gone during the operation. Document
this little bit of tribal knowledge.
I'm extending this tribal knowledge with new developments which it
seems some folks do not yet believe to be true: we can be sure a
module will exist during the lifetime of a sysfs file operation.
For proof, refer to test_sysfs test #32:
./tools/testing/selftests/sysfs/sysfs.sh -t 0032
Without this being true, the write would fail or worse,
a crash would happen, in this test. It does not.
Signed-off-by: Luis Chamberlain <mcgrof(a)kernel.org>
---
include/linux/module.h | 34 ++++++++++++++++++++++++++++++++--
1 file changed, 32 insertions(+), 2 deletions(-)
diff --git a/include/linux/module.h b/include/linux/module.h
index c9f1200b2312..22eacd5e1e85 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -609,10 +609,40 @@ void symbol_put_addr(void *addr);
to handle the error case (which only happens with rmmod --wait). */
extern void __module_get(struct module *module);
-/* This is the Right Way to get a module: if it fails, it's being removed,
- * so pretend it's not there. */
+/**
+ * try_module_get() - yields to module removal and bumps refcnt otherwise
+ * @module: the module we should check for
+ *
+ * This can be used to try to bump the reference count of a module, so to
+ * prevent module removal. The reference count of a module is not allowed
+ * to be incremented if the module is already being removed.
+ *
+ * Care must be taken to ensure the module cannot be removed during the call to
+ * try_module_get(). This can be done by having another entity other than the
+ * module itself increment the module reference count, or through some other
+ * means which guarantees the module could not be removed during an operation.
+ * An example of this later case is using try_module_get() in a sysfs file
+ * which the module created. The sysfs store / read file operations are
+ * gauranteed to exist through the use of kernfs's active reference (see
+ * kernfs_active()). If a sysfs file operation is being run, the module which
+ * created it must still exist as the module is in charge of removing the same
+ * sysfs file being read. Also, a sysfs / kernfs file removal cannot happen
+ * unless the same file is not active.
+ *
+ * One of the real values to try_module_get() is the module_is_live() check
+ * which ensures this the caller of try_module_get() can yield to userspace
+ * module removal requests and fail whatever it was about to process.
+ */
extern bool try_module_get(struct module *module);
+/**
+ * module_put() - release a reference count to a module
+ * @module: the module we should release a reference count for
+ *
+ * If you successfully bump a reference count to a module with try_module_get(),
+ * when you are finished you must call module_put() to release that reference
+ * count.
+ */
extern void module_put(struct module *module);
#else /*!CONFIG_MODULE_UNLOAD*/
--
2.30.2
This extends test_sysfs with support for using the failure injection
wait completion and knobs to force a few race conditions which
demonstrates that kernfs active reference protection is sufficient
for kobject / device protection at higher layers.
This adds 4 new tests which tries to remove the device attribute
store operation in 4 different situations:
1) at the start of kernfs_kernfs_fop_write_iter()
2) before the of->mutex is held in kernfs_kernfs_fop_write_iter()
3) after the of->mutex is held in kernfs_kernfs_fop_write_iter()
4) after the kernfs node active reference is taken
A write fails in call cases except the last one, test number #32. There
is a good explanation for this: *once* kernfs_get_active() gets called
we have a guarantee that the kernfs entry cannot be removed. If
kernfs_get_active() succeeds that entry cannot be removed and so
anything trying to remove that entry will have to wait. It is perhaps
not obvious but since a sysfs write will trigger eventually a
kernfs_get_active() call, and *only* if this succeeds will the sysfs
op be called, this and the fact that you cannot remove the kernfs
entry while the kenfs entry is active implies that a module that
created the respective sysfs / kernfs entry *cannot* possibly be
removed during a sysfs operation. And test number 32 provides us with
proof of this. If it were not true test #32 should crash.
No null dereferences are reproduced, even though this has been observed
in some complex testing cases [0]. If this issue really exists we should
have enough tools on the sysfs_test toolbox now to try to reproduce
this easily without having to poke around other drivers. It very likley
was the case that the issue reported [0] was possibly a side issue after
the first bug which was zram specific. This is why it is important to
isolate the issue and try to reproduce it in a generic form using the
test_sysfs driver.
[0] https://lkml.kernel.org/r/20210623215007.862787-1-mcgrof@kernel.org
Signed-off-by: Luis Chamberlain <mcgrof(a)kernel.org>
---
lib/Kconfig.debug | 3 +
lib/test_sysfs.c | 31 +++++
tools/testing/selftests/sysfs/config | 3 +
tools/testing/selftests/sysfs/sysfs.sh | 175 +++++++++++++++++++++++++
4 files changed, 212 insertions(+)
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index a29b7d398c4e..176b822654e5 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -2358,6 +2358,9 @@ config TEST_SYSFS
depends on SYSFS
depends on NET
depends on BLOCK
+ select FAULT_INJECTION
+ select FAULT_INJECTION_DEBUG_FS
+ select FAIL_KERNFS_KNOBS
help
This builds the "test_sysfs" module. This driver enables to test the
sysfs file system safely without affecting production knobs which
diff --git a/lib/test_sysfs.c b/lib/test_sysfs.c
index 273fc3f39740..391e0af2864a 100644
--- a/lib/test_sysfs.c
+++ b/lib/test_sysfs.c
@@ -38,6 +38,11 @@
#include <linux/rtnetlink.h>
#include <linux/genhd.h>
#include <linux/blkdev.h>
+#include <linux/kernfs.h>
+
+#ifdef CONFIG_FAIL_KERNFS_KNOBS
+MODULE_IMPORT_NS(KERNFS_DEBUG_PRIVATE);
+#endif
static bool enable_lock;
module_param(enable_lock, bool_enable_only, 0644);
@@ -82,6 +87,13 @@ static bool enable_verbose_rmmod;
module_param(enable_verbose_rmmod, bool_enable_only, 0644);
MODULE_PARM_DESC(enable_verbose_rmmod, "enable verbose print messages on rmmod");
+#ifdef CONFIG_FAIL_KERNFS_KNOBS
+static bool enable_completion_on_rmmod;
+module_param(enable_completion_on_rmmod, bool_enable_only, 0644);
+MODULE_PARM_DESC(enable_completion_on_rmmod,
+ "enable sending a kernfs completion on rmmod");
+#endif
+
static int sysfs_test_major;
/**
@@ -289,6 +301,12 @@ static ssize_t config_show(struct device *dev,
"enable_verbose_writes:\t%s\n",
enable_verbose_writes ? "true" : "false");
+#ifdef CONFIG_FAIL_KERNFS_KNOBS
+ len += snprintf(buf+len, PAGE_SIZE - len,
+ "enable_completion_on_rmmod:\t%s\n",
+ enable_completion_on_rmmod ? "true" : "false");
+#endif
+
test_dev_config_unlock(test_dev);
return len;
@@ -926,10 +944,23 @@ static int __init test_sysfs_init(void)
}
module_init(test_sysfs_init);
+#ifdef CONFIG_FAIL_KERNFS_KNOBS
+/* The goal is to race our device removal with a pending kernfs -> store call */
+static void test_sysfs_kernfs_send_completion_rmmod(void)
+{
+ if (!enable_completion_on_rmmod)
+ return;
+ complete(&kernfs_debug_wait_completion);
+}
+#else
+static inline void test_sysfs_kernfs_send_completion_rmmod(void) {}
+#endif
+
static void __exit test_sysfs_exit(void)
{
if (enable_debugfs)
debugfs_remove(debugfs_dir);
+ test_sysfs_kernfs_send_completion_rmmod();
if (delay_rmmod_ms)
msleep(delay_rmmod_ms);
unregister_test_dev_sysfs(first_test_dev);
diff --git a/tools/testing/selftests/sysfs/config b/tools/testing/selftests/sysfs/config
index 9196f452ecd5..2876a229f95b 100644
--- a/tools/testing/selftests/sysfs/config
+++ b/tools/testing/selftests/sysfs/config
@@ -1,2 +1,5 @@
CONFIG_SYSFS=m
CONFIG_TEST_SYSFS=m
+CONFIG_FAULT_INJECTION=y
+CONFIG_FAULT_INJECTION_DEBUG_FS=y
+CONFIG_FAIL_KERNFS_KNOBS=y
diff --git a/tools/testing/selftests/sysfs/sysfs.sh b/tools/testing/selftests/sysfs/sysfs.sh
index b3f4c2236c7f..f928635d0e35 100755
--- a/tools/testing/selftests/sysfs/sysfs.sh
+++ b/tools/testing/selftests/sysfs/sysfs.sh
@@ -62,6 +62,10 @@ ALL_TESTS="$ALL_TESTS 0025:1:1:test_dev_y:block"
ALL_TESTS="$ALL_TESTS 0026:1:1:test_dev_y:block"
ALL_TESTS="$ALL_TESTS 0027:1:0:test_dev_x:block" # deadlock test
ALL_TESTS="$ALL_TESTS 0028:1:0:test_dev_x:block" # deadlock test with rntl_lock
+ALL_TESTS="$ALL_TESTS 0029:1:1:test_dev_x:block" # kernfs race removal of store
+ALL_TESTS="$ALL_TESTS 0030:1:1:test_dev_x:block" # kernfs race removal before mutex
+ALL_TESTS="$ALL_TESTS 0031:1:1:test_dev_x:block" # kernfs race removal after mutex
+ALL_TESTS="$ALL_TESTS 0032:1:1:test_dev_x:block" # kernfs race removal after active
allow_user_defaults()
{
@@ -92,6 +96,9 @@ allow_user_defaults()
if [ -z $SYSFS_DEBUGFS_DIR ]; then
SYSFS_DEBUGFS_DIR="/sys/kernel/debug/test_sysfs"
fi
+ if [ -z $KERNFS_DEBUGFS_DIR ]; then
+ KERNFS_DEBUGFS_DIR="/sys/kernel/debug/kernfs"
+ fi
if [ -z $PAGE_SIZE ]; then
PAGE_SIZE=$(getconf PAGESIZE)
fi
@@ -167,6 +174,14 @@ modprobe_reset_enable_rtnl_lock_on_rmmod()
unset FIRST_MODPROBE_ARGS
}
+modprobe_reset_enable_completion()
+{
+ FIRST_MODPROBE_ARGS="enable_completion_on_rmmod=1 enable_verbose_writes=1"
+ FIRST_MODPROBE_ARGS="$FIRST_MODPROBE_ARGS enable_verbose_rmmod=1 delay_rmmod_ms=0"
+ modprobe_reset
+ unset FIRST_MODPROBE_ARGS
+}
+
load_req_mod()
{
modprobe_reset
@@ -197,6 +212,63 @@ debugfs_reset_first_test_dev_ignore_errors()
echo -n "1" >"$SYSFS_DEBUGFS_DIR"/reset_first_test_dev
}
+debugfs_kernfs_kernfs_fop_write_iter_exists()
+{
+ KNOB_DIR="${KERNFS_DEBUGFS_DIR}/config_fail_kernfs_fop_write_iter"
+ if [[ ! -d $KNOB_DIR ]]; then
+ echo "kernfs debugfs does not exist $KNOB_DIR"
+ return 0;
+ fi
+ KNOB_DEBUGFS="${KERNFS_DEBUGFS_DIR}/fail_kernfs_fop_write_iter"
+ if [[ ! -d $KNOB_DEBUGFS ]]; then
+ echo -n "kernfs debugfs for coniguring fail_kernfs_fop_write_iter "
+ echo "does not exist $KNOB_DIR"
+ return 0;
+ fi
+ return 1
+}
+
+debugfs_kernfs_kernfs_fop_write_iter_set_fail_once()
+{
+ KNOB_DEBUGFS="${KERNFS_DEBUGFS_DIR}/fail_kernfs_fop_write_iter"
+ echo 1 > $KNOB_DEBUGFS/interval
+ echo 100 > $KNOB_DEBUGFS/probability
+ echo 0 > $KNOB_DEBUGFS/space
+ # Disable verbose messages on the kernel ring buffer which may
+ # confuse developers with a kernel panic.
+ echo 0 > $KNOB_DEBUGFS/verbose
+
+ # Fail only once
+ echo 1 > $KNOB_DEBUGFS/times
+}
+
+debugfs_kernfs_kernfs_fop_write_iter_set_fail_never()
+{
+ KNOB_DEBUGFS="${KERNFS_DEBUGFS_DIR}/fail_kernfs_fop_write_iter"
+ echo 0 > $KNOB_DEBUGFS/times
+}
+
+debugfs_kernfs_set_wait_ms()
+{
+ SLEEP_AFTER_WAIT_MS="${KERNFS_DEBUGFS_DIR}/sleep_after_wait_ms"
+ echo $1 > $SLEEP_AFTER_WAIT_MS
+}
+
+debugfs_kernfs_disable_wait_kernfs_fop_write_iter()
+{
+ ENABLE_WAIT_KNOB="${KERNFS_DEBUGFS_DIR}/config_fail_kernfs_fop_write_iter/wait_"
+ for KNOB in ${ENABLE_WAIT_KNOB}*; do
+ echo 0 > $KNOB
+ done
+}
+
+debugfs_kernfs_enable_wait_kernfs_fop_write_iter()
+{
+ ENABLE_WAIT_KNOB="${KERNFS_DEBUGFS_DIR}/config_fail_kernfs_fop_write_iter/wait_$1"
+ echo -n "1" > $ENABLE_WAIT_KNOB
+ return $?
+}
+
set_orig()
{
if [[ ! -z $TARGET ]] && [[ ! -z $ORIG ]]; then
@@ -972,6 +1044,105 @@ sysfs_test_0028()
fi
}
+sysfs_race_kernfs_kernfs_fop_write_iter()
+{
+ TARGET="${DIR}/$(get_test_target $1)"
+ WAIT_AT=$2
+ EXPECT_WRITE_RETURNS=$3
+ MSDELAY=$4
+
+ modprobe_reset_enable_completion
+ ORIG=$(cat "${TARGET}")
+ TEST_STR=$(( $ORIG + 1 ))
+
+ echo -n "Test racing removal of sysfs store op with kernfs $WAIT_AT ... "
+
+ if debugfs_kernfs_kernfs_fop_write_iter_exists; then
+ echo -n "skipping test as CONFIG_FAIL_KERNFS_KNOBS "
+ echo " or CONFIG_FAULT_INJECTION_DEBUG_FS is disabled"
+ return $ksft_skip
+ fi
+
+ # Allow for failing the kernfs_kernfs_fop_write_iter call once,
+ # we'll provide exact context shortly afterwards.
+ debugfs_kernfs_kernfs_fop_write_iter_set_fail_once
+
+ # First disable all waits
+ debugfs_kernfs_disable_wait_kernfs_fop_write_iter
+
+ # Enable a wait_for_completion(&kernfs_debug_wait_completion) at the
+ # specified location inside the kernfs_fop_write_iter() routine
+ debugfs_kernfs_enable_wait_kernfs_fop_write_iter $WAIT_AT
+
+ # Configure kernfs so that after its wait_for_completion() it
+ # will msleep() this amount of time and schedule(). We figure this
+ # will be sufficient time to allow for our module removal to complete.
+ debugfs_kernfs_set_wait_ms $MSDELAY
+
+ # Now we trigger a kernfs write op, which will run kernfs_fop_write_iter,
+ # but will wait until our driver sends a respective completion
+ set_test_ignore_errors &
+ write_pid=$!
+
+ # At this point kernfs_fop_write_iter() hasn't run our op, its
+ # waiting for our completion at the specified time $WAIT_AT.
+ # We now remove our module which will send a
+ # complete(&kernfs_debug_wait_completion) right before we deregister
+ # our device and the sysfs device attributes are removed.
+ #
+ # After the completion is sent, the test_sysfs driver races with
+ # kernfs to do the device deregistration with the kernfs msleep
+ # and schedule(). This should mean we've forced trying to remove the
+ # module prior to allowing kernfs to run our store operation. If the
+ # race did happen we'll panic with a null dereference on the store op.
+ #
+ # If no race happens we should see no write operation triggered.
+ modprobe -r $TEST_DRIVER > /dev/null 2>&1
+
+ debugfs_kernfs_kernfs_fop_write_iter_set_fail_never
+
+ wait $write_pid
+ if [[ $? -eq $EXPECT_WRITE_RETURNS ]]; then
+ echo "ok"
+ else
+ echo "FAIL" >&2
+ fi
+}
+
+sysfs_test_0029()
+{
+ for delay in 0 2 4 8 16 32 64 128 246 512 1024; do
+ echo "Using delay-after-completion: $delay"
+ sysfs_race_kernfs_kernfs_fop_write_iter 0029 at_start 1 $delay
+ done
+}
+
+sysfs_test_0030()
+{
+ for delay in 0 2 4 8 16 32 64 128 246 512 1024; do
+ echo "Using delay-after-completion: $delay"
+ sysfs_race_kernfs_kernfs_fop_write_iter 0030 before_mutex 1 $delay
+ done
+}
+
+sysfs_test_0031()
+{
+ for delay in 0 2 4 8 16 32 64 128 246 512 1024; do
+ echo "Using delay-after-completion: $delay"
+ sysfs_race_kernfs_kernfs_fop_write_iter 0031 after_mutex 1 $delay
+ done
+}
+
+# A write only succeeds *iff* a module removal happens *after* the
+# kernfs active reference is obtained with kernfs_get_active().
+sysfs_test_0032()
+{
+ for delay in 0 2 4 8 16 32 64 128 246 512 1024; do
+ echo "Using delay-after-completion: $delay"
+ sysfs_race_kernfs_kernfs_fop_write_iter 0032 after_active 0 $delay
+ done
+}
+
test_gen_desc()
{
echo -n "$1 x $(get_test_count $1)"
@@ -1013,6 +1184,10 @@ list_tests()
echo "$(test_gen_desc 0026) - block test writing y larger delay and resetting device"
echo "$(test_gen_desc 0027) - test rmmod deadlock while writing x ... "
echo "$(test_gen_desc 0028) - test rmmod deadlock using rtnl_lock while writing x ..."
+ echo "$(test_gen_desc 0029) - racing removal of store op with kernfs at start"
+ echo "$(test_gen_desc 0030) - racing removal of store op with kernfs before mutex"
+ echo "$(test_gen_desc 0031) - racing removal of store op with kernfs after mutex"
+ echo "$(test_gen_desc 0032) - racing removal of store op with kernfs after active"
}
usage()
--
2.30.2
This adds initial failure injection support to kernfs. We start
off with debug knobs which when enabled allow test drivers, such as
test_sysfs, to then make use of these to try to force certain
difficult races to take place with a high degree of certainty.
This only adds runtime code *iff* the new bool CONFIG_FAIL_KERNFS_KNOBS is
enabled in your kernel. If you don't have this enabled this provides
no new functional. When CONFIG_FAIL_KERNFS_KNOBS is disabled the new
routine kernfs_debug_should_wait() ends up being transformed to if
(false), and so the compiler should optimize these out as dead code
producing no new effective binary changes.
We start off with enabling failure injections in kernfs by allowing us to
alter the way kernfs_fop_write_iter() behaves. We allow for the routine
kernfs_fop_write_iter() to wait for a certain condition in the kernel to
occur, after which it will sleep a predefined amount of time. This lets
kernfs users to time exactly when it want kernfs_fop_write_iter() to
complete, allowing for developing race conditions and test for correctness
in kernfs.
You'd boot with this enabled on your kernel command line:
fail_kernfs_fop_write_iter=1,100,0,1
The values are <interval,probability,size,times>, we don't care for
size, so for now we ignore it. The above ensures a failure will trigger
only once.
*How* we allow for this routine to change behaviour is left to knobs we
expose under debugfs:
# ls -1 /sys/kernel/debug/kernfs/config_fail_kernfs_fop_write_iter/
wait_after_active
wait_after_mutex
wait_at_start
wait_before_mutex
A debugfs entry also exists to allow us to sleep a configurabler amount
of time after the completion:
/sys/kernel/debug/kernfs/sleep_after_wait_ms
These two sets of knobs allow us to construct races and demonstrate
how the kernfs active reference should suffice to project against
races.
Enabling CONFIG_FAULT_INJECTION_DEBUG_FS enables us to configure the
differnt fault injection parametres for the new fail_kernfs_fop_write_iter
fault injection at run time:
ls -1 /sys/kernel/debug/kernfs/fail_kernfs_fop_write_iter/
interval
probability
space
task-filter
times
verbose
verbose_ratelimit_burst
verbose_ratelimit_interval_ms
Signed-off-by: Luis Chamberlain <mcgrof(a)kernel.org>
---
.../fault-injection/fault-injection.rst | 22 +++++
MAINTAINERS | 2 +-
fs/kernfs/Makefile | 1 +
fs/kernfs/failure-injection.c | 91 +++++++++++++++++++
fs/kernfs/file.c | 13 +++
fs/kernfs/kernfs-internal.h | 72 +++++++++++++++
include/linux/kernfs.h | 5 +
lib/Kconfig.debug | 10 ++
8 files changed, 215 insertions(+), 1 deletion(-)
create mode 100644 fs/kernfs/failure-injection.c
diff --git a/Documentation/fault-injection/fault-injection.rst b/Documentation/fault-injection/fault-injection.rst
index 4a25c5eb6f07..d4d34b082f47 100644
--- a/Documentation/fault-injection/fault-injection.rst
+++ b/Documentation/fault-injection/fault-injection.rst
@@ -28,6 +28,28 @@ Available fault injection capabilities
injects kernel RPC client and server failures.
+- fail_kernfs_fop_write_iter
+
+ Allows for failures to be enabled inside kernfs_fop_write_iter(). Enabling
+ this does not immediately enable any errors to occur. You must configure
+ how you want this routine to fail or change behaviour by using the debugfs
+ knobs for it:
+
+ # ls -1 /sys/kernel/debug/kernfs/config_fail_kernfs_fop_write_iter/
+ wait_after_active
+ wait_after_mutex
+ wait_at_start
+ wait_before_mutex
+
+ You can also configure how long to sleep after a wait under
+
+ /sys/kernel/debug/kernfs/sleep_after_wait_ms
+
+ If you enable CONFIG_FAULT_INJECTION_DEBUG_FS the fail_add_disk failure
+ injection parameters are placed under:
+
+ /sys/kernel/debug/kernfs/fail_kernfs_fop_write_iter/
+
- fail_make_request
injects disk IO errors on devices permitted by setting
diff --git a/MAINTAINERS b/MAINTAINERS
index 28a34384f541..acdbf91058d5 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -10341,7 +10341,7 @@ M: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
M: Tejun Heo <tj(a)kernel.org>
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git
-F: fs/kernfs/
+F: fs/kernfs/*
F: include/linux/kernfs.h
KEXEC
diff --git a/fs/kernfs/Makefile b/fs/kernfs/Makefile
index 4ca54ff54c98..bc5b32ca39f9 100644
--- a/fs/kernfs/Makefile
+++ b/fs/kernfs/Makefile
@@ -4,3 +4,4 @@
#
obj-y := mount.o inode.o dir.o file.o symlink.o
+obj-$(CONFIG_FAIL_KERNFS_KNOBS) += failure-injection.o
diff --git a/fs/kernfs/failure-injection.c b/fs/kernfs/failure-injection.c
new file mode 100644
index 000000000000..4130d202c13b
--- /dev/null
+++ b/fs/kernfs/failure-injection.c
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/fault-inject.h>
+#include <linux/delay.h>
+
+#include "kernfs-internal.h"
+
+static DECLARE_FAULT_ATTR(fail_kernfs_fop_write_iter);
+struct kernfs_config_fail kernfs_config_fail;
+
+#define kernfs_config_fail(when) \
+ kernfs_config_fail.kernfs_fop_write_iter_fail.wait_ ## when
+
+#define kernfs_config_fail(when) \
+ kernfs_config_fail.kernfs_fop_write_iter_fail.wait_ ## when
+
+static int __init setup_fail_kernfs_fop_write_iter(char *str)
+{
+ return setup_fault_attr(&fail_kernfs_fop_write_iter, str);
+}
+
+__setup("fail_kernfs_fop_write_iter=", setup_fail_kernfs_fop_write_iter);
+
+struct dentry *kernfs_debugfs_root;
+struct dentry *config_fail_kernfs_fop_write_iter;
+
+static int __init kernfs_init_failure_injection(void)
+{
+ kernfs_config_fail.sleep_after_wait_ms = 100;
+ kernfs_debugfs_root = debugfs_create_dir("kernfs", NULL);
+
+ fault_create_debugfs_attr("fail_kernfs_fop_write_iter",
+ kernfs_debugfs_root, &fail_kernfs_fop_write_iter);
+
+ config_fail_kernfs_fop_write_iter =
+ debugfs_create_dir("config_fail_kernfs_fop_write_iter",
+ kernfs_debugfs_root);
+
+ debugfs_create_u32("sleep_after_wait_ms", 0600,
+ kernfs_debugfs_root,
+ &kernfs_config_fail.sleep_after_wait_ms);
+
+ debugfs_create_bool("wait_at_start", 0600,
+ config_fail_kernfs_fop_write_iter,
+ &kernfs_config_fail(at_start));
+ debugfs_create_bool("wait_before_mutex", 0600,
+ config_fail_kernfs_fop_write_iter,
+ &kernfs_config_fail(before_mutex));
+ debugfs_create_bool("wait_after_mutex", 0600,
+ config_fail_kernfs_fop_write_iter,
+ &kernfs_config_fail(after_mutex));
+ debugfs_create_bool("wait_after_active", 0600,
+ config_fail_kernfs_fop_write_iter,
+ &kernfs_config_fail(after_active));
+ return 0;
+}
+late_initcall(kernfs_init_failure_injection);
+
+int __kernfs_debug_should_wait_kernfs_fop_write_iter(bool evaluate)
+{
+ if (!evaluate)
+ return 0;
+
+ return should_fail(&fail_kernfs_fop_write_iter, 0);
+}
+
+DECLARE_COMPLETION(kernfs_debug_wait_completion);
+EXPORT_SYMBOL_NS_GPL(kernfs_debug_wait_completion, KERNFS_DEBUG_PRIVATE);
+
+void kernfs_debug_wait(void)
+{
+ unsigned long timeout;
+
+ timeout = wait_for_completion_timeout(&kernfs_debug_wait_completion,
+ msecs_to_jiffies(3000));
+ if (!timeout)
+ pr_info("%s waiting for kernfs_debug_wait_completion timed out\n",
+ __func__);
+ else
+ pr_info("%s received completion with time left on timeout %u ms\n",
+ __func__, jiffies_to_msecs(timeout));
+
+ /**
+ * The goal is wait for an event, and *then* once we have
+ * reached it, the other side will try to do something which
+ * it thinks will break. So we must give it some time to do
+ * that. The amount of time is configurable.
+ */
+ msleep(kernfs_config_fail.sleep_after_wait_ms);
+ pr_info("%s ended\n", __func__);
+}
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
index 60e2a86c535e..4479c6580333 100644
--- a/fs/kernfs/file.c
+++ b/fs/kernfs/file.c
@@ -259,6 +259,9 @@ static ssize_t kernfs_fop_write_iter(struct kiocb *iocb, struct iov_iter *iter)
const struct kernfs_ops *ops;
char *buf;
+ if (kernfs_debug_should_wait(kernfs_fop_write_iter, at_start))
+ kernfs_debug_wait();
+
if (of->atomic_write_len) {
if (len > of->atomic_write_len)
return -E2BIG;
@@ -280,17 +283,27 @@ static ssize_t kernfs_fop_write_iter(struct kiocb *iocb, struct iov_iter *iter)
}
buf[len] = '\0'; /* guarantee string termination */
+ if (kernfs_debug_should_wait(kernfs_fop_write_iter, before_mutex))
+ kernfs_debug_wait();
+
/*
* @of->mutex nests outside active ref and is used both to ensure that
* the ops aren't called concurrently for the same open file.
*/
mutex_lock(&of->mutex);
+
+ if (kernfs_debug_should_wait(kernfs_fop_write_iter, after_mutex))
+ kernfs_debug_wait();
+
if (!kernfs_get_active(of->kn)) {
mutex_unlock(&of->mutex);
len = -ENODEV;
goto out_free;
}
+ if (kernfs_debug_should_wait(kernfs_fop_write_iter, after_active))
+ kernfs_debug_wait();
+
ops = kernfs_ops(of->kn);
if (ops->write)
len = ops->write(of, buf, len, iocb->ki_pos);
diff --git a/fs/kernfs/kernfs-internal.h b/fs/kernfs/kernfs-internal.h
index f9cc912c31e1..9e3abf597e2d 100644
--- a/fs/kernfs/kernfs-internal.h
+++ b/fs/kernfs/kernfs-internal.h
@@ -18,6 +18,7 @@
#include <linux/kernfs.h>
#include <linux/fs_context.h>
+#include <linux/stringify.h>
struct kernfs_iattrs {
kuid_t ia_uid;
@@ -147,4 +148,75 @@ void kernfs_drain_open_files(struct kernfs_node *kn);
*/
extern const struct inode_operations kernfs_symlink_iops;
+/*
+ * failure-injection.c
+ */
+#ifdef CONFIG_FAIL_KERNFS_KNOBS
+
+/**
+ * struct kernfs_fop_write_iter_fail - how kernfs_fop_write_iter_fail fails
+ *
+ * This lets you configure what part of kernfs_fop_write_iter() should behave
+ * in a specific way to allow userspace to capture possible failures in
+ * kernfs. The wait knobs are allowed to let you design capture possible
+ * race conditions which would otherwise be difficult to reproduce. A
+ * secondary driver would tell kernfs's wait completion when it is done.
+ *
+ * The point to the wait completion failure injection tests are to confirm
+ * that the kernfs active refcount suffice to ensure other objects in other
+ * layers are also gauranteed to exist, even they are opaque to kernfs. This
+ * includes kobjects, devices, and other objects built on top of this, like
+ * the block layer when using sysfs block device attributes.
+ *
+ * @wait_at_start: waits for completion from a third party at the start of
+ * the routine.
+ * @wait_before_mutex: waits for completion from a third party before we
+ * are allowed to continue before the of->mutex is held.
+ * @wait_after_mutex: waits for completion from a third party after we
+ * have held the of->mutex.
+ * @wait_after_active: waits for completion from a thid party after we
+ * have refcounted the struct kernfs_node.
+ */
+struct kernfs_fop_write_iter_fail {
+ bool wait_at_start;
+ bool wait_before_mutex;
+ bool wait_after_mutex;
+ bool wait_after_active;
+};
+
+/**
+ * struct kernfs_config_fail - kernfs configuration for failure injection
+ *
+ * You can kernfs failure injection on boot, and in particular we currently
+ * only support failures for kernfs_fop_write_iter(). However, we don't
+ * want to always enable errors on this call when failure injection is enabled
+ * as this routine is used by many parts of the kernel for proper functionality.
+ * The compromise we make is we let userspace start enabling which parts it
+ * wants to fail after boot, if and only if failure injection has been enabled.
+ *
+ * @kernfs_fop_write_iter_fail: configuration for how we want to allow
+ * for failure injection on kernfs_fop_write_iter()
+ * @sleep_after_wait_ms: how many ms to wait after completion is received.
+ */
+struct kernfs_config_fail {
+ struct kernfs_fop_write_iter_fail kernfs_fop_write_iter_fail;
+ u32 sleep_after_wait_ms;
+};
+
+extern struct kernfs_config_fail kernfs_config_fail;
+
+#define __kernfs_config_wait_var(func, when) \
+ (kernfs_config_fail. func ## _fail.wait_ ## when)
+#define __kernfs_debug_should_wait_func_name(func) __kernfs_debug_should_wait_## func
+
+#define kernfs_debug_should_wait(func, when) \
+ __kernfs_debug_should_wait_func_name(func)(__kernfs_config_wait_var(func, when))
+int __kernfs_debug_should_wait_kernfs_fop_write_iter(bool evaluate);
+void kernfs_debug_wait(void);
+#else
+static inline void kernfs_init_failure_injection(void) {}
+#define kernfs_debug_should_wait(func, when) (false)
+static inline void kernfs_debug_wait(void) {}
+#endif
+
#endif /* __KERNFS_INTERNAL_H */
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index 3ccce6f24548..cd968ee2b503 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -411,6 +411,11 @@ void kernfs_init(void);
struct kernfs_node *kernfs_find_and_get_node_by_id(struct kernfs_root *root,
u64 id);
+
+#ifdef CONFIG_FAIL_KERNFS_KNOBS
+extern struct completion kernfs_debug_wait_completion;
+#endif
+
#else /* CONFIG_KERNFS */
static inline enum kernfs_node_type kernfs_type(struct kernfs_node *kn)
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index ae19bf1a21b8..a29b7d398c4e 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1902,6 +1902,16 @@ config FAULT_INJECTION_USERCOPY
Provides fault-injection capability to inject failures
in usercopy functions (copy_from_user(), get_user(), ...).
+config FAIL_KERNFS_KNOBS
+ bool "Fault-injection support in kernfs"
+ depends on FAULT_INJECTION
+ help
+ Provide fault-injection capability for kernfs. This only enables
+ the error injection functionality. To use it you must configure which
+ which path you want to trigger on error on using debugfs under
+ /sys/kernel/debug/kernfs/config_fail_kernfs_fop_write_iter/. By
+ default all of these are disabled.
+
config FAIL_MAKE_REQUEST
bool "Fault-injection capability for disk IO"
depends on FAULT_INJECTION && BLOCK
--
2.30.2
Two selftests drivers exist under the copyleft-next license.
These drivers were added prior to SPDX practice taking full swing
in the kernel. Now that we have an SPDX tag for copylef-next-0.3.1
documented, embrace it and remove the boiler plate.
Cc: Goldwyn Rodrigues <rgoldwyn(a)suse.com>
Cc: Kuno Woudt <kuno(a)frob.nl>
Cc: Richard Fontana <fontana(a)sharpeleven.org>
Cc: copyleft-next(a)lists.fedorahosted.org
Cc: Ciaran Farrell <Ciaran.Farrell(a)suse.com>
Cc: Christopher De Nicolo <Christopher.DeNicolo(a)suse.com>
Cc: Christoph Hellwig <hch(a)lst.de>
Cc: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Cc: Thomas Gleixner <tglx(a)linutronix.de>
Cc: Jonathan Corbet <corbet(a)lwn.net>
Cc: Thorsten Leemhuis <linux(a)leemhuis.info>
Cc: Andrew Morton <akpm(a)linux-foundation.org>
Signed-off-by: Luis Chamberlain <mcgrof(a)kernel.org>
---
lib/test_kmod.c | 12 +-----------
lib/test_sysctl.c | 12 +-----------
tools/testing/selftests/kmod/kmod.sh | 13 +------------
tools/testing/selftests/sysctl/sysctl.sh | 12 +-----------
4 files changed, 4 insertions(+), 45 deletions(-)
diff --git a/lib/test_kmod.c b/lib/test_kmod.c
index ce1589391413..d62afd89dc63 100644
--- a/lib/test_kmod.c
+++ b/lib/test_kmod.c
@@ -1,18 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR copyleft-next-0.3.1
/*
* kmod stress test driver
*
* Copyright (C) 2017 Luis R. Rodriguez <mcgrof(a)kernel.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or at your option any
- * later version; or, when distributed separately from the Linux kernel or
- * when incorporated into other software packages, subject to the following
- * license:
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of copyleft-next (version 0.3.1 or later) as published
- * at http://copyleft-next.org/.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/lib/test_sysctl.c b/lib/test_sysctl.c
index 3750323973f4..9e5bd10a930a 100644
--- a/lib/test_sysctl.c
+++ b/lib/test_sysctl.c
@@ -1,18 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR copyleft-next-0.3.1
/*
* proc sysctl test driver
*
* Copyright (C) 2017 Luis R. Rodriguez <mcgrof(a)kernel.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or at your option any
- * later version; or, when distributed separately from the Linux kernel or
- * when incorporated into other software packages, subject to the following
- * license:
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of copyleft-next (version 0.3.1 or later) as published
- * at http://copyleft-next.org/.
*/
/*
diff --git a/tools/testing/selftests/kmod/kmod.sh b/tools/testing/selftests/kmod/kmod.sh
index afd42387e8b2..7189715d7960 100755
--- a/tools/testing/selftests/kmod/kmod.sh
+++ b/tools/testing/selftests/kmod/kmod.sh
@@ -1,18 +1,7 @@
#!/bin/bash
-#
+# SPDX-License-Identifier: GPL-2.0-or-later OR copyleft-next-0.3.1
# Copyright (C) 2017 Luis R. Rodriguez <mcgrof(a)kernel.org>
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License as published by the Free
-# Software Foundation; either version 2 of the License, or at your option any
-# later version; or, when distributed separately from the Linux kernel or
-# when incorporated into other software packages, subject to the following
-# license:
-#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms of copyleft-next (version 0.3.1 or later) as published
-# at http://copyleft-next.org/.
-
# This is a stress test script for kmod, the kernel module loader. It uses
# test_kmod which exposes a series of knobs for the API for us so we can
# tweak each test in userspace rather than in kernelspace.
diff --git a/tools/testing/selftests/sysctl/sysctl.sh b/tools/testing/selftests/sysctl/sysctl.sh
index 19515dcb7d04..2046c603a4d4 100755
--- a/tools/testing/selftests/sysctl/sysctl.sh
+++ b/tools/testing/selftests/sysctl/sysctl.sh
@@ -1,16 +1,6 @@
#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0-or-later OR copyleft-next-0.3.1
# Copyright (C) 2017 Luis R. Rodriguez <mcgrof(a)kernel.org>
-#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License as published by the Free
-# Software Foundation; either version 2 of the License, or at your option any
-# later version; or, when distributed separately from the Linux kernel or
-# when incorporated into other software packages, subject to the following
-# license:
-#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms of copyleft-next (version 0.3.1 or later) as published
-# at http://copyleft-next.org/.
# This performs a series tests against the proc sysctl interface.
--
2.30.2
Add the full text of the copyleft-next-0.3.1 license to the kernel
tree as well as the required tags for reference and tooling.
The license text was copied directly from the copyleft-next project's
git tree [0].
Discussion of using copyleft-next-0.3.1 on Linux started since June,
2016 [1]. In the end Linus' preference was to have drivers use
MODULE_LICENSE("GPL") to make it clear that the GPL applies when it
comes to Linux [2]. Additionally, even though copyleft-next-0.3.1 has
been found to be to be GPLv2 compatible by three attorneys at SUSE and
Redhat [3], to err on the side of caution we simply recommend to
always use the "OR" language for this license [4].
Even though it has been a goal of the project to be GPL-v2 compatible
to be certain in 2016 I asked for a clarification about what makes
copyleft-next GPLv2 compatible and also asked for a summary of
benefits. This prompted some small minor changes to make compatibility
even further clear and as of copyleft 0.3.1 compatibility should
be crystal clear [5].
The summary of why copyleft-next 0.3.1 is compatible with GPLv2
is explained as follows:
Like GPLv2, copyleft-next requires distribution of derivative works
("Derived Works" in copyleft-next 0.3.x) to be under the same license.
Ordinarily this would make the two licenses incompatible. However,
copyleft-next 0.3.1 says: "If the Derived Work includes material
licensed under the GPL, You may instead license the Derived Work under
the GPL." "GPL" is defined to include GPLv2.
In practice this means copyleft-next code in Linux may be licensed
under the GPL2, however there are additional obvious gains for
bringing contributions from Linux outbound where copyleft-next is
preferred. A summary of benefits why projects outside of Linux might
prefer to use copyleft-next >= 0.3.1 over GPLv2:
o It is much shorter and simpler
o It has an explicit patent license grant, unlike GPLv2
o Its notice preservation conditions are clearer
o More free software/open source licenses are compatible
with it (via section 4)
o The source code requirement triggered by binary distribution
is much simpler in a procedural sense
o Recipients potentially have a contract claim against distributors
who are noncompliant with the source code requirement
o There is a built-in inbound=outbound policy for upstream
contributions (cf. Apache License 2.0 section 5)
o There are disincentives to engage in the controversial practice
of copyleft/ proprietary dual-licensing
o In 15 years copyleft expires, which can be advantageous
for legacy code
o There are explicit disincentives to bringing patent infringement
claims accusing the licensed work of infringement (see 10b)
o There is a cure period for licensees who are not compliant
with the license (there is no cure opportunity in GPLv2)
o copyleft-next has a 'built-in or-later' provision
The first driver submission to Linux under this dual strategy was
lib/test_sysctl.c through commit 9308f2f9e7f05 ("test_sysctl: add
dedicated proc sysctl test driver") merged in July 2017. Shortly after
that I also added test_kmod through commit d9c6a72d6fa29 ("kmod: add
test driver to stress test the module loader") in the same month. These
two drivers went in just a few months before the SPDX license practice
kicked in. In 2018 Kuno Woudt went through the process to get SPDX
identifiers for copyleft-next [6] [7]. Although there are SPDX tags
for copyleft-next-0.3.0, we only document use in Linux starting from
copyleft-next-0.3.1 which makes GPLv2 compatibility crystal clear.
This patch will let us update the two Linux selftest drivers in
subsequent patches with their respective SPDX license identifiers and
let us remove repetitive license boiler plate.
[0] https://github.com/copyleft-next/copyleft-next/blob/master/Releases/copylef…
[1] https://lore.kernel.org/lkml/1465929311-13509-1-git-send-email-mcgrof@kerne…
[2] https://lore.kernel.org/lkml/CA+55aFyhxcvD+q7tp+-yrSFDKfR0mOHgyEAe=f_94aKLs…
[3] https://lore.kernel.org/lkml/20170516232702.GL17314@wotan.suse.de/
[4] https://lkml.kernel.org/r/1495234558.7848.122.camel@linux.intel.com
[5] https://lists.fedorahosted.org/archives/list/copyleft-next@lists.fedorahost…
[6] https://spdx.org/licenses/copyleft-next-0.3.0.html
[7] https://spdx.org/licenses/copyleft-next-0.3.1.html
Cc: Goldwyn Rodrigues <rgoldwyn(a)suse.com>
Cc: Kuno Woudt <kuno(a)frob.nl>
Cc: Richard Fontana <fontana(a)sharpeleven.org>
Cc: copyleft-next(a)lists.fedorahosted.org
Cc: Ciaran Farrell <Ciaran.Farrell(a)suse.com>
Cc: Christopher De Nicolo <Christopher.DeNicolo(a)suse.com>
Cc: Christoph Hellwig <hch(a)lst.de>
Cc: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Cc: Thomas Gleixner <tglx(a)linutronix.de>
Cc: Jonathan Corbet <corbet(a)lwn.net>
Cc: Thorsten Leemhuis <linux(a)leemhuis.info>
Cc: Andrew Morton <akpm(a)linux-foundation.org>
Signed-off-by: Luis Chamberlain <mcgrof(a)kernel.org>
---
LICENSES/dual/copyleft-next-0.3.1 | 237 ++++++++++++++++++++++++++++++
1 file changed, 237 insertions(+)
create mode 100644 LICENSES/dual/copyleft-next-0.3.1
diff --git a/LICENSES/dual/copyleft-next-0.3.1 b/LICENSES/dual/copyleft-next-0.3.1
new file mode 100644
index 000000000000..086bcb74b478
--- /dev/null
+++ b/LICENSES/dual/copyleft-next-0.3.1
@@ -0,0 +1,237 @@
+Valid-License-Identifier: copyleft-next-0.3.1
+SPDX-URL: https://spdx.org/licenses/copyleft-next-0.3.1
+Usage-Guide:
+ This license can be used in code, it has been found to be GPLv2 compatible
+ by attorneys at Redhat and SUSE, however to air on the side of caution,
+ it's best to only use it together with a GPL2 compatible license using "OR".
+ To use the copyleft-next-0.3.1 license put the following SPDX tag/value
+ pair into a comment according to the placement guidelines in the
+ licensing rules documentation:
+ SPDX-License-Identifier: GPL-2.0 OR copyleft-next-0.3.1
+ SPDX-License-Identifier: GPL-2.0-only OR copyleft-next 0.3.1
+ SPDX-License-Identifier: GPL-2.0+ OR copyleft-next-0.3.1
+ SPDX-License-Identifier: GPL-2.0-or-later OR copyleft-next-0.3.1
+License-Text:
+
+=======================================================================
+
+ copyleft-next 0.3.1 ("this License")
+ Release date: 2016-04-29
+
+1. License Grants; No Trademark License
+
+ Subject to the terms of this License, I grant You:
+
+ a) A non-exclusive, worldwide, perpetual, royalty-free, irrevocable
+ copyright license, to reproduce, Distribute, prepare derivative works
+ of, publicly perform and publicly display My Work.
+
+ b) A non-exclusive, worldwide, perpetual, royalty-free, irrevocable
+ patent license under Licensed Patents to make, have made, use, sell,
+ offer for sale, and import Covered Works.
+
+ This License does not grant any rights in My name, trademarks, service
+ marks, or logos.
+
+2. Distribution: General Conditions
+
+ You may Distribute Covered Works, provided that You (i) inform
+ recipients how they can obtain a copy of this License; (ii) satisfy the
+ applicable conditions of sections 3 through 6; and (iii) preserve all
+ Legal Notices contained in My Work (to the extent they remain
+ pertinent). "Legal Notices" means copyright notices, license notices,
+ license texts, and author attributions, but does not include logos,
+ other graphical images, trademarks or trademark legends.
+
+3. Conditions for Distributing Derived Works; Outbound GPL Compatibility
+
+ If You Distribute a Derived Work, You must license the entire Derived
+ Work as a whole under this License, with prominent notice of such
+ licensing. This condition may not be avoided through such means as
+ separate Distribution of portions of the Derived Work.
+
+ If the Derived Work includes material licensed under the GPL, You may
+ instead license the Derived Work under the GPL.
+
+4. Condition Against Further Restrictions; Inbound License Compatibility
+
+ When Distributing a Covered Work, You may not impose further
+ restrictions on the exercise of rights in the Covered Work granted under
+ this License. This condition is not excused merely because such
+ restrictions result from Your compliance with conditions or obligations
+ extrinsic to this License (such as a court order or an agreement with a
+ third party).
+
+ However, You may Distribute a Covered Work incorporating material
+ governed by a license that is both OSI-Approved and FSF-Free as of the
+ release date of this License, provided that compliance with such
+ other license would not conflict with any conditions stated in other
+ sections of this License.
+
+5. Conditions for Distributing Object Code
+
+ You may Distribute an Object Code form of a Covered Work, provided that
+ you accompany the Object Code with a URL through which the Corresponding
+ Source is made available, at no charge, by some standard or customary
+ means of providing network access to source code.
+
+ If you Distribute the Object Code in a physical product or tangible
+ storage medium ("Product"), the Corresponding Source must be available
+ through such URL for two years from the date of Your most recent
+ Distribution of the Object Code in the Product. However, if the Product
+ itself contains or is accompanied by the Corresponding Source (made
+ available in a customarily accessible manner), You need not also comply
+ with the first paragraph of this section.
+
+ Each direct and indirect recipient of the Covered Work from You is an
+ intended third-party beneficiary of this License solely as to this
+ section 5, with the right to enforce its terms.
+
+6. Symmetrical Licensing Condition for Upstream Contributions
+
+ If You Distribute a work to Me specifically for inclusion in or
+ modification of a Covered Work (a "Patch"), and no explicit licensing
+ terms apply to the Patch, You license the Patch under this License, to
+ the extent of Your copyright in the Patch. This condition does not
+ negate the other conditions of this License, if applicable to the Patch.
+
+7. Nullification of Copyleft/Proprietary Dual Licensing
+
+ If I offer to license, for a fee, a Covered Work under terms other than
+ a license that is OSI-Approved or FSF-Free as of the release date of this
+ License or a numbered version of copyleft-next released by the
+ Copyleft-Next Project, then the license I grant You under section 1 is no
+ longer subject to the conditions in sections 3 through 5.
+
+8. Copyleft Sunset
+
+ The conditions in sections 3 through 5 no longer apply once fifteen
+ years have elapsed from the date of My first Distribution of My Work
+ under this License.
+
+9. Pass-Through
+
+ When You Distribute a Covered Work, the recipient automatically receives
+ a license to My Work from Me, subject to the terms of this License.
+
+10. Termination
+
+ Your license grants under section 1 are automatically terminated if You
+
+ a) fail to comply with the conditions of this License, unless You cure
+ such noncompliance within thirty days after becoming aware of it, or
+
+ b) initiate a patent infringement litigation claim (excluding
+ declaratory judgment actions, counterclaims, and cross-claims)
+ alleging that any part of My Work directly or indirectly infringes
+ any patent.
+
+ Termination of Your license grants extends to all copies of Covered
+ Works You subsequently obtain. Termination does not terminate the
+ rights of those who have received copies or rights from You subject to
+ this License.
+
+ To the extent permission to make copies of a Covered Work is necessary
+ merely for running it, such permission is not terminable.
+
+11. Later License Versions
+
+ The Copyleft-Next Project may release new versions of copyleft-next,
+ designated by a distinguishing version number ("Later Versions").
+ Unless I explicitly remove the option of Distributing Covered Works
+ under Later Versions, You may Distribute Covered Works under any Later
+ Version.
+
+** 12. No Warranty **
+** **
+** My Work is provided "as-is", without warranty. You bear the risk **
+** of using it. To the extent permitted by applicable law, each **
+** Distributor of My Work excludes the implied warranties of title, **
+** merchantability, fitness for a particular purpose and **
+** non-infringement. **
+
+** 13. Limitation of Liability **
+** **
+** To the extent permitted by applicable law, in no event will any **
+** Distributor of My Work be liable to You for any damages **
+** whatsoever, whether direct, indirect, special, incidental, or **
+** consequential damages, whether arising under contract, tort **
+** (including negligence), or otherwise, even where the Distributor **
+** knew or should have known about the possibility of such damages. **
+
+14. Severability
+
+ The invalidity or unenforceability of any provision of this License
+ does not affect the validity or enforceability of the remainder of
+ this License. Such provision is to be reformed to the minimum extent
+ necessary to make it valid and enforceable.
+
+15. Definitions
+
+ "Copyleft-Next Project" means the project that maintains the source
+ code repository at <https://github.com/copyleft-next/copyleft-next.git/>
+ as of the release date of this License.
+
+ "Corresponding Source" of a Covered Work in Object Code form means (i)
+ the Source Code form of the Covered Work; (ii) all scripts,
+ instructions and similar information that are reasonably necessary for
+ a skilled developer to generate such Object Code from the Source Code
+ provided under (i); and (iii) a list clearly identifying all Separate
+ Works (other than those provided in compliance with (ii)) that were
+ specifically used in building and (if applicable) installing the
+ Covered Work (for example, a specified proprietary compiler including
+ its version number). Corresponding Source must be machine-readable.
+
+ "Covered Work" means My Work or a Derived Work.
+
+ "Derived Work" means a work of authorship that copies from, modifies,
+ adapts, is based on, is a derivative work of, transforms, translates or
+ contains all or part of My Work, such that copyright permission is
+ required. The following are not Derived Works: (i) Mere Aggregation;
+ (ii) a mere reproduction of My Work; and (iii) if My Work fails to
+ explicitly state an expectation otherwise, a work that merely makes
+ reference to My Work.
+
+ "Distribute" means to distribute, transfer or make a copy available to
+ someone else, such that copyright permission is required.
+
+ "Distributor" means Me and anyone else who Distributes a Covered Work.
+
+ "FSF-Free" means classified as 'free' by the Free Software Foundation.
+
+ "GPL" means a version of the GNU General Public License or the GNU
+ Affero General Public License.
+
+ "I"/"Me"/"My" refers to the individual or legal entity that places My
+ Work under this License. "You"/"Your" refers to the individual or legal
+ entity exercising rights in My Work under this License. A legal entity
+ includes each entity that controls, is controlled by, or is under
+ common control with such legal entity. "Control" means (a) the power to
+ direct the actions of such legal entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent of the
+ outstanding shares or beneficial ownership of such legal entity.
+
+ "Licensed Patents" means all patent claims licensable royalty-free by
+ Me, now or in the future, that are necessarily infringed by making,
+ using, or selling My Work, and excludes claims that would be infringed
+ only as a consequence of further modification of My Work.
+
+ "Mere Aggregation" means an aggregation of a Covered Work with a
+ Separate Work.
+
+ "My Work" means the particular work of authorship I license to You
+ under this License.
+
+ "Object Code" means any form of a work that is not Source Code.
+
+ "OSI-Approved" means approved as 'Open Source' by the Open Source
+ Initiative.
+
+ "Separate Work" means a work that is separate from and independent of a
+ particular Covered Work and is not by its nature an extension or
+ enhancement of the Covered Work, and/or a runtime library, standard
+ library or similar component that is used to generate an Object Code
+ form of a Covered Work.
+
+ "Source Code" means the preferred form of a work for making
+ modifications to it.
--
2.30.2
udmabuf has the following implicit declaration warns:
udmabuf.c:30:10: warning: implicit declaration of function 'open';
udmabuf.c:42:8: warning: implicit declaration of function 'fcntl'
These are caused due to not including fcntl.h and including just
linux/fcntl.h. Fix it to include fcntl.h which will bring in the
linux/fcntl.h. In addition, define __EXPORTED_HEADERS__ to bring in
F_ADD_SEALS and F_SEAL_SHRINK defines and fix the following error
that show up when just fcntl.h is included.
udmabuf.c:45:21: error: 'F_ADD_SEALS' undeclared
45 | ret = fcntl(memfd, F_ADD_SEALS, F_SEAL_SHRINK);
| ^~~~~~~~~~~
udmabuf.c:45:34: error: 'F_SEAL_SHRINK' undeclared
45 | ret = fcntl(memfd, F_ADD_SEALS, F_SEAL_SHRINK);
| ^~~~~~~~~~~~~
Signed-off-by: Shuah Khan <skhan(a)linuxfoundation.org>
---
tools/testing/selftests/drivers/dma-buf/udmabuf.c | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/tools/testing/selftests/drivers/dma-buf/udmabuf.c b/tools/testing/selftests/drivers/dma-buf/udmabuf.c
index 4de902ea14d8..de1c4e6de0b2 100644
--- a/tools/testing/selftests/drivers/dma-buf/udmabuf.c
+++ b/tools/testing/selftests/drivers/dma-buf/udmabuf.c
@@ -1,10 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
+#define __EXPORTED_HEADERS__
+
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <errno.h>
-#include <linux/fcntl.h>
+#include <fcntl.h>
#include <malloc.h>
#include <sys/ioctl.h>
--
2.30.2
This series fixes up a few issues introduced into vec-syscfg during
refactoring in the review process, then adds a new test which ensures
that the behaviour when we attempt to set a vector length which is not
supported by the current system matches what is documented in the SVE
ABI documentation.
Mark Brown (4):
selftests: arm64: Fix printf() format mismatch in vec-syscfg
selftests: arm64: Remove bogus error check on writing to files
selftests: arm64: Fix and enable test for setting current VL in
vec-syscfg
selftests: arm64: Verify that all possible vector lengths are handled
tools/testing/selftests/arm64/fp/vec-syscfg.c | 94 ++++++++++++++++---
1 file changed, 81 insertions(+), 13 deletions(-)
base-commit: 6880fa6c56601bb8ed59df6c30fd390cc5f6dd8f
--
2.20.1
Hi Everybody,
This series consists out of outstanding SGX selftests changes, rebased
and gathered in a single series that is more easily merged for testing
and development, and a few more changes added to expand the existing tests.
The outstanding SGX selftest changes included in this series that have already
been submitted separately are:
* An almost two year old patch fixing a benign linker warning that is still
present today:
https://lore.kernel.org/linux-sgx/20191017030340.18301-2-sean.j.christopher…
The original patch is added intact and not all email addresses
within are valid.
* Latest (v4) of Jarkko Sakkinen's series to add an oversubscription test:
https://lore.kernel.org/linux-sgx/20210809093127.76264-1-jarkko@kernel.org/
* Latest (v2) of Jarkko Sakkinen's patch that provides provide per-op
parameter structs for the test enclave:
https://lore.kernel.org/linux-sgx/20210812224645.90280-1-jarkko@kernel.org/
The reason why most of these patches are outstanding is that they depend
on a kernel change that is still under discussion. Decision to wait in:
https://lore.kernel.org/linux-sgx/f8674dac5579a8a424de1565f7ffa2b5bf2f8e36.…
The original patch for this kernel dependency continues to be included in
this series as a placeholder until the ongoing discussions are concluded.
The new changes introduced in this series builds on Jarkko's outstanding
SGX selftest changes and adds new tests for page permissions, exception
handling, and thread entry.
Reinette
Jarkko Sakkinen (9):
x86/sgx: Add /sys/kernel/debug/x86/sgx_total_mem
selftests/sgx: Assign source for each segment
selftests/sgx: Make data measurement for an enclave segment optional
selftests/sgx: Create a heap for the test enclave
selftests/sgx: Dump segments and /proc/self/maps only on failure
selftests/sgx: Encpsulate the test enclave creation
selftests/sgx: Move setup_test_encl() to each TEST_F()
selftests/sgx: Add a new kselftest: unclobbered_vdso_oversubscribed
selftests/sgx: Provide per-op parameter structs for the test enclave
Reinette Chatre (4):
selftests/sgx: Rename test properties in preparation for more enclave
tests
selftests/sgx: Add page permission and exception test
selftests/sgx: Enable multiple thread support
selftests/sgx: Add test for multiple TCS entry
Sean Christopherson (1):
selftests/x86/sgx: Fix a benign linker warning
Documentation/x86/sgx.rst | 6 +
arch/x86/kernel/cpu/sgx/main.c | 10 +-
tools/testing/selftests/sgx/Makefile | 2 +-
tools/testing/selftests/sgx/defines.h | 33 +-
tools/testing/selftests/sgx/load.c | 40 +-
tools/testing/selftests/sgx/main.c | 341 +++++++++++++++---
tools/testing/selftests/sgx/main.h | 7 +-
tools/testing/selftests/sgx/sigstruct.c | 12 +-
tools/testing/selftests/sgx/test_encl.c | 60 ++-
.../selftests/sgx/test_encl_bootstrap.S | 21 +-
10 files changed, 445 insertions(+), 87 deletions(-)
--
2.25.1
From: Steven Rostedt (VMware) <rostedt(a)goodmis.org>
commit 210f9df02611cbe641ced3239122b270fd907d86 upstream.
The selftest for ftrace checks some features by checking if the README has
text that states the feature is supported by that kernel. Unfortunately,
this check gives false positives because it many not be checked if there's
spaces in the string to check. This is due to the compare between the
required variable with the ":README" string stripped, because neither has
quotes around them.
Link: https://lkml.kernel.org/r/20210820204742.087177341@goodmis.org
Cc: "Tzvetomir Stoyanov" <tz.stoyanov(a)gmail.com>
Cc: Tom Zanussi <zanussi(a)kernel.org>
Cc: Shuah Khan <shuah(a)kernel.org>
Cc: Shuah Khan <skhan(a)linuxfoundation.org>
Cc: linux-kselftest(a)vger.kernel.org
Cc: stable(a)vger.kernel.org
Fixes: 1b8eec510ba64 ("selftests/ftrace: Support ":README" suffix for requires")
Acked-by: Masami Hiramatsu <mhiramat(a)kernel.org>
Signed-off-by: Steven Rostedt (VMware) <rostedt(a)goodmis.org>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
tools/testing/selftests/ftrace/test.d/functions | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
--- a/tools/testing/selftests/ftrace/test.d/functions
+++ b/tools/testing/selftests/ftrace/test.d/functions
@@ -115,7 +115,7 @@ check_requires() { # Check required file
echo "Required tracer $t is not configured."
exit_unsupported
fi
- elif [ $r != $i ]; then
+ elif [ "$r" != "$i" ]; then
if ! grep -Fq "$r" README ; then
echo "Required feature pattern \"$r\" is not in README."
exit_unsupported
From: Steven Rostedt (VMware) <rostedt(a)goodmis.org>
commit 210f9df02611cbe641ced3239122b270fd907d86 upstream.
The selftest for ftrace checks some features by checking if the README has
text that states the feature is supported by that kernel. Unfortunately,
this check gives false positives because it many not be checked if there's
spaces in the string to check. This is due to the compare between the
required variable with the ":README" string stripped, because neither has
quotes around them.
Link: https://lkml.kernel.org/r/20210820204742.087177341@goodmis.org
Cc: "Tzvetomir Stoyanov" <tz.stoyanov(a)gmail.com>
Cc: Tom Zanussi <zanussi(a)kernel.org>
Cc: Shuah Khan <shuah(a)kernel.org>
Cc: Shuah Khan <skhan(a)linuxfoundation.org>
Cc: linux-kselftest(a)vger.kernel.org
Cc: stable(a)vger.kernel.org
Fixes: 1b8eec510ba64 ("selftests/ftrace: Support ":README" suffix for requires")
Acked-by: Masami Hiramatsu <mhiramat(a)kernel.org>
Signed-off-by: Steven Rostedt (VMware) <rostedt(a)goodmis.org>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
tools/testing/selftests/ftrace/test.d/functions | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
--- a/tools/testing/selftests/ftrace/test.d/functions
+++ b/tools/testing/selftests/ftrace/test.d/functions
@@ -115,7 +115,7 @@ check_requires() { # Check required file
echo "Required tracer $t is not configured."
exit_unsupported
fi
- elif [ $r != $i ]; then
+ elif [ "$r" != "$i" ]; then
if ! grep -Fq "$r" README ; then
echo "Required feature pattern \"$r\" is not in README."
exit_unsupported
From: Steven Rostedt (VMware) <rostedt(a)goodmis.org>
commit 210f9df02611cbe641ced3239122b270fd907d86 upstream.
The selftest for ftrace checks some features by checking if the README has
text that states the feature is supported by that kernel. Unfortunately,
this check gives false positives because it many not be checked if there's
spaces in the string to check. This is due to the compare between the
required variable with the ":README" string stripped, because neither has
quotes around them.
Link: https://lkml.kernel.org/r/20210820204742.087177341@goodmis.org
Cc: "Tzvetomir Stoyanov" <tz.stoyanov(a)gmail.com>
Cc: Tom Zanussi <zanussi(a)kernel.org>
Cc: Shuah Khan <shuah(a)kernel.org>
Cc: Shuah Khan <skhan(a)linuxfoundation.org>
Cc: linux-kselftest(a)vger.kernel.org
Cc: stable(a)vger.kernel.org
Fixes: 1b8eec510ba64 ("selftests/ftrace: Support ":README" suffix for requires")
Acked-by: Masami Hiramatsu <mhiramat(a)kernel.org>
Signed-off-by: Steven Rostedt (VMware) <rostedt(a)goodmis.org>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
tools/testing/selftests/ftrace/test.d/functions | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
--- a/tools/testing/selftests/ftrace/test.d/functions
+++ b/tools/testing/selftests/ftrace/test.d/functions
@@ -115,7 +115,7 @@ check_requires() { # Check required file
echo "Required tracer $t is not configured."
exit_unsupported
fi
- elif [ $r != $i ]; then
+ elif [ "$r" != "$i" ]; then
if ! grep -Fq "$r" README ; then
echo "Required feature pattern \"$r\" is not in README."
exit_unsupported
The XSAVE feature set supports the saving and restoring of state components,
which is used for process context switching. The state components include
x87 state for FPU execution environment, SSE state, AVX state and so on. In
order to ensure that XSAVE works correctly, add XSAVE basic test for
XSAVE architecture functionality.
This patch set tests and verifies the basic functions of XSAVE/XRSTOR in
user space; during and after signal processing on the x86 platform, the
XSAVE contents of the process should not be changed.
This series introduces only the most basic XSAVE tests. In the
future, the intention is to continue expanding the scope of
these selftests to include more kernel XSAVE-related functionality
and XSAVE-managed features like AMX and shadow stacks.
========
- Change from v3 to v4:
- Improve the comment in patch 1.
- Change from v2 to v3:
- Improve the description of patch 2 git log.
- Change from v1 to v2:
- Improve the cover-letter. (Dave Hansen)
Pengfei Xu (2):
selftests/xsave: test basic XSAVE architecture functionality
selftests/xsave: add xsave test during and after signal handling
tools/testing/selftests/Makefile | 1 +
tools/testing/selftests/xsave/.gitignore | 3 +
tools/testing/selftests/xsave/Makefile | 6 +
tools/testing/selftests/xsave/xsave_common.h | 246 ++++++++++++++++++
.../selftests/xsave/xsave_instruction.c | 83 ++++++
.../selftests/xsave/xsave_signal_handle.c | 184 +++++++++++++
6 files changed, 523 insertions(+)
create mode 100644 tools/testing/selftests/xsave/.gitignore
create mode 100644 tools/testing/selftests/xsave/Makefile
create mode 100644 tools/testing/selftests/xsave/xsave_common.h
create mode 100644 tools/testing/selftests/xsave/xsave_instruction.c
create mode 100644 tools/testing/selftests/xsave/xsave_signal_handle.c
--
2.20.1
LKP/0Day reported some building errors about kvm, and errors message
are not always same:
- lib/x86_64/processor.c:1083:31: error: ‘KVM_CAP_NESTED_STATE’ undeclared
(first use in this function); did you mean ‘KVM_CAP_PIT_STATE2’?
- lib/test_util.c:189:30: error: ‘MAP_HUGE_16KB’ undeclared (first use
in this function); did you mean ‘MAP_HUGE_16GB’?
Although kvm relies on the khdr, they still be built in parallel when -j
is specified. In this case, it will cause compiling errors.
Here we mark target khdr as NOTPARALLEL to make it be always built
first.
CC: Philip Li <philip.li(a)intel.com>
Reported-by: kernel test robot <lkp(a)intel.com>
Signed-off-by: Li Zhijian <lizhijian(a)cn.fujitsu.com>
---
tools/testing/selftests/lib.mk | 1 +
1 file changed, 1 insertion(+)
diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
index 7ee911355328..5074b01f2a29 100644
--- a/tools/testing/selftests/lib.mk
+++ b/tools/testing/selftests/lib.mk
@@ -48,6 +48,7 @@ ARCH ?= $(SUBARCH)
# When local build is done, headers are installed in the default
# INSTALL_HDR_PATH usr/include.
.PHONY: khdr
+.NOTPARALLEL:
khdr:
ifndef KSFT_KHDR_INSTALL_DONE
ifeq (1,$(DEFAULT_INSTALL_HDR_PATH))
--
2.33.0
Introduction
============
This patch set depends on:
- support for the euid policy keyword for critical data
(https://lore.kernel.org/linux-integrity/20210705115650.3373599-1-roberto.sa…)
- basic DIGLIM
(https://lore.kernel.org/linux-integrity/20210914163401.864635-1-roberto.sas…)
Introduce the remaining features necessary to upload to the kernel
reference values from RPM headers or digest lists in other formats.
Loader: it will automatically uploads digest lists from a directory
specified in the kernel configuration and will execute a user space
uploader to upload digest lists in a format that is not recognized
by the kernel;
LSM: it identifies digest list parsers and monitor their activity for
integrity evaluation; it protects digest list parsers from other user
space processes considered as untrusted;
Digest list generators: user space tools to generate digest lists from
files (in the compact format) or from the RPM DB;
Digest list uploader and parsers: user space tools responsible to upload to
the kernel digest lists not in the
compact format (e.g. those derived from
the RPM DB);
Administration guide: it describes the steps necessary to upload to the
kernel all the digests of an RPM-based Linux
distribution, using a custom kernel with the DIGLIM
patches applied.
With these changes, DIGLIM is ready to be used by IMA for measurement and
appraisal (this functionality will be added with a future patch set).
DIGLIM already supports appended signatures, but at the moment they cannot
be interpreted by IMA (unsupported ID PKEY_ID_PGP). Another patch set is
necessary to load the PGP keys from the Linux distribution to the system
keyring and to verify the PGP signatures of the RPM headers.
With the patch sets above and the execution policies for IMA proposed some
time ago, it will be possible to generate a measurement list with digest
lists and unknown files, and enable IMA appraisal in enforcing mode.
The kernel command line would be:
ima_template=ima-modsig ima_policy="exec_tcb|tmpfs|digest_lists|appraise_exec_tcb|appraise_tmpfs|appraise_digest_lists"
The effort required for Linux distribution vendors will be to generate and
sign the digest lists for the digest list uploader and the RPM parser. This
could be done for example in the kernel-tools package (or in a separate
package). Existing package signatures are sufficient for remaining files.
Issues/Questions
================
Lockdep (patch 2/9)
-------------------
I'm using iterate_dir() and file_open_root() to iterate and open files
in a directory. Unfortunately, I get the following warning:
============================================
WARNING: possible recursive locking detected
5.15.0-rc1-dont-use-00049-ga5a881519991 #134 Not tainted
--------------------------------------------
swapper/1 is trying to acquire lock:
0000000066812898 (&sb->s_type->i_mutex_key#7){++++}-{4:4}, at: path_openat+0x75d/0xd20
but task is already holding lock:
0000000066812898 (&sb->s_type->i_mutex_key#7){++++}-{4:4}, at: iterate_dir+0x65/0x250
other info that might help us debug this:
Possible unsafe locking scenario:
CPU0
----
lock(&sb->s_type->i_mutex_key#7);
lock(&sb->s_type->i_mutex_key#7);
*** DEADLOCK ***
due to the fact that path_openat() might be trying to lock the directory
already locked by iterate_dir(). What it would be a good way to avoid it?
Inode availability in security_file_free() (patch 3/9)
------------------------------------------------------
It seems that this hook is called when the last reference to a file is
released. After enabling debugging, sometimes the kernel reported that the
inode I was trying to access was already freed.
To avoid this situation, I'm grabbing an additional reference of the inode
in the security_file_open() hook, to ensure that the inode does not
disappear, and I'm releasing it in the security_file_free() hook. Is this
solution acceptable?
Roberto Sassu (9):
ima: Introduce new hook DIGEST_LIST_CHECK
diglim: Loader
diglim: LSM
diglim: Tests - LSM
diglim: Compact digest list generator
diglim: RPM digest list generator
diglim: Digest list uploader
diglim: RPM parser
diglim: Admin guide
Documentation/admin-guide/diglim.rst | 136 +++++
Documentation/admin-guide/index.rst | 1 +
.../security/diglim/implementation.rst | 16 +
Documentation/security/diglim/index.rst | 1 +
Documentation/security/diglim/lsm.rst | 65 +++
Documentation/security/diglim/tests.rst | 18 +-
MAINTAINERS | 10 +
security/integrity/diglim/Kconfig | 14 +
security/integrity/diglim/Makefile | 2 +-
security/integrity/diglim/diglim.h | 27 +
security/integrity/diglim/fs.c | 3 +
security/integrity/diglim/hooks.c | 436 ++++++++++++++++
security/integrity/diglim/loader.c | 92 ++++
security/integrity/iint.c | 1 +
security/integrity/ima/ima.h | 1 +
security/integrity/ima/ima_main.c | 3 +-
security/integrity/ima/ima_policy.c | 3 +
security/integrity/integrity.h | 8 +
tools/diglim/Makefile | 27 +
tools/diglim/common.c | 79 +++
tools/diglim/common.h | 59 +++
tools/diglim/compact_gen.c | 349 +++++++++++++
tools/diglim/rpm_gen.c | 334 ++++++++++++
tools/diglim/rpm_parser.c | 483 ++++++++++++++++++
tools/diglim/upload_digest_lists.c | 238 +++++++++
tools/testing/selftests/diglim/Makefile | 12 +-
tools/testing/selftests/diglim/common.h | 9 +
tools/testing/selftests/diglim/selftest.c | 357 ++++++++++++-
28 files changed, 2764 insertions(+), 20 deletions(-)
create mode 100644 Documentation/admin-guide/diglim.rst
create mode 100644 Documentation/security/diglim/lsm.rst
create mode 100644 security/integrity/diglim/hooks.c
create mode 100644 security/integrity/diglim/loader.c
create mode 100644 tools/diglim/Makefile
create mode 100644 tools/diglim/common.c
create mode 100644 tools/diglim/common.h
create mode 100644 tools/diglim/compact_gen.c
create mode 100644 tools/diglim/rpm_gen.c
create mode 100644 tools/diglim/rpm_parser.c
create mode 100644 tools/diglim/upload_digest_lists.c
--
2.25.1
Hi, I have been sharing an old VFAT formatted hard disk on one pc to
another using Samba and sometime after kernel 5.14.0 it stopped working (apparently no longer being shared as the mount.smbfs command
on the client failed with error -13 yet mount.smbfs still worked for
ext3 filesytems shared from the same machine which had the VFAT
filesystem).
The only error I saw on the machine with the VFAT formatted hard disk
was the output of the mount command had truncated the name of the
mount to only include the first 4 characters of the base name of the
mount point.
e.g. when VFAT filesystem was mounted on /mnt/victoria, the output of
the mount command showed the filesytem mounted on /mnt/vict
The kernel build used was i386 with gcc 11.2.0-4 using
make - j2 menuconfig bindeb-pkg
.config available on request.
The git-bisect was:
victoria:/usr/src/linux# git bisect loggit bisect start '--' 'fs/fat'#
good: [7d2a07b769330c34b4deabeed939325c77a7ec2f] Linux 5.14git bisect
good 7d2a07b769330c34b4deabeed939325c77a7ec2f# bad:
[a3fa7a101dcff93791d1b1bdb3affcad1410c8c1] Merge branches 'akpm' and
'akpm-hotfixes' (patches from Andrew)git bisect bad
a3fa7a101dcff93791d1b1bdb3affcad1410c8c1# good:
[edb0872f44ec9976ea6d052cb4b93cd2d23ac2ba] block: move the bdi from
the request_queue to the gendiskgit bisect good
edb0872f44ec9976ea6d052cb4b93cd2d23ac2ba# good:
[b0d4adaf3b3c4402d9c3b6186e02aa1e4f7985cd] fat: Add KUnit tests for
checksums and timestampsgit bisect good
b0d4adaf3b3c4402d9c3b6186e02aa1e4f7985cd# bad:
[c815f04ba94940fbc303a6ea9669e7da87f8e77d] Merge tag
'linux-kselftest-kunit-5.15-rc1' of
git://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux-kselftestgit
bisect bad c815f04ba94940fbc303a6ea9669e7da87f8e77d# first bad commit:
[c815f04ba94940fbc303a6ea9669e7da87f8e77d] Merge tag
'linux-kselftest-kunit-5.15-rc1' of
git://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux-kselftest
amarsh04@victoria:~$ mount|grep vic/dev/sdb6 on /vict type vfat
(rw,relatime,uid=65534,gid=65534,fmask=0000,dmask=0000,allow_utime=0022,codepage=437,iocharset=utf8,shortname=mixed,errors=remount-ro)
Happy to run any further tests but kernel builds are slow on this machine (Pentium Dl.
Arthur.
--
Sent from my Android device with K-9 Mail. Please excuse my brevity.
This test assumes that the declared kunit_suite object is the exact one
which is being executed, which KUnit will not guarantee [1].
Specifically, `suite->log` is not initialized until a suite object is
executed. So if KUnit makes a copy of the suite and runs that instead,
this test dereferences an invalid pointer and (hopefully) segfaults.
N.B. since we no longer assume this, we can no longer verify that
`suite->log` is *not* allocated during normal execution.
An alternative to this patch that would allow us to test that would
require exposing an API for the current test to get its current suite.
Exposing that for one internal kunit test seems like overkill, and
grants users more footguns (e.g. reusing a test case in multiple suites
and changing behavior based on the suite name, dynamically modifying the
setup/cleanup funcs, storing/reading stuff out of the suite->log, etc.).
[1] In a subsequent patch, KUnit will allow running subsets of test
cases within a suite by making a copy of the suite w/ the filtered test
list. But there are other reasons KUnit might execute a copy, e.g. if it
ever wants to support parallel execution of different suites, recovering
from errors and restarting suites
Signed-off-by: Daniel Latypov <dlatypov(a)google.com>
Reviewed-by: Brendan Higgins <brendanhiggins(a)google.com>
---
lib/kunit/kunit-test.c | 14 ++++++++------
1 file changed, 8 insertions(+), 6 deletions(-)
diff --git a/lib/kunit/kunit-test.c b/lib/kunit/kunit-test.c
index d69efcbed624..555601d17f79 100644
--- a/lib/kunit/kunit-test.c
+++ b/lib/kunit/kunit-test.c
@@ -415,12 +415,15 @@ static struct kunit_suite kunit_log_test_suite = {
static void kunit_log_test(struct kunit *test)
{
- struct kunit_suite *suite = &kunit_log_test_suite;
+ struct kunit_suite suite;
+
+ suite.log = kunit_kzalloc(test, KUNIT_LOG_SIZE, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, suite.log);
kunit_log(KERN_INFO, test, "put this in log.");
kunit_log(KERN_INFO, test, "this too.");
- kunit_log(KERN_INFO, suite, "add to suite log.");
- kunit_log(KERN_INFO, suite, "along with this.");
+ kunit_log(KERN_INFO, &suite, "add to suite log.");
+ kunit_log(KERN_INFO, &suite, "along with this.");
#ifdef CONFIG_KUNIT_DEBUGFS
KUNIT_EXPECT_NOT_ERR_OR_NULL(test,
@@ -428,12 +431,11 @@ static void kunit_log_test(struct kunit *test)
KUNIT_EXPECT_NOT_ERR_OR_NULL(test,
strstr(test->log, "this too."));
KUNIT_EXPECT_NOT_ERR_OR_NULL(test,
- strstr(suite->log, "add to suite log."));
+ strstr(suite.log, "add to suite log."));
KUNIT_EXPECT_NOT_ERR_OR_NULL(test,
- strstr(suite->log, "along with this."));
+ strstr(suite.log, "along with this."));
#else
KUNIT_EXPECT_PTR_EQ(test, test->log, (char *)NULL);
- KUNIT_EXPECT_PTR_EQ(test, suite->log, (char *)NULL);
#endif
}
base-commit: a3fa7a101dcff93791d1b1bdb3affcad1410c8c1
--
2.33.0.309.g3052b89438-goog
From: Baolin Wang <baolin.wang(a)linux.alibaba.com>
[ Upstream commit d538ddb97e066571e4fc58b832f40739621b42bb ]
The openat2 test suite fails on ARM64 because the definition of
O_LARGEFILE is different on ARM64. Fix the problem by defining
the correct O_LARGEFILE definition on ARM64.
"openat2 unexpectedly returned # 3['.../tools/testing/selftests/openat2']
with 208000 (!= 208000)
not ok 102 openat2 with incompatible flags (O_PATH | O_LARGEFILE) fails
with -22 (Invalid argument)"
Fixed change log to improve formatting and clarity:
Shuah Khan <skhan(a)linuxfoundation.org>
Signed-off-by: Baolin Wang <baolin.wang(a)linux.alibaba.com>
Reviewed-by: Aleksa Sarai <cyphar(a)cyphar.com>
Acked-by: Christian Brauner <christian.brauner(a)ubuntu.com>
Signed-off-by: Shuah Khan <skhan(a)linuxfoundation.org>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
---
tools/testing/selftests/openat2/openat2_test.c | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/tools/testing/selftests/openat2/openat2_test.c b/tools/testing/selftests/openat2/openat2_test.c
index b386367c606b..5354cef55c6c 100644
--- a/tools/testing/selftests/openat2/openat2_test.c
+++ b/tools/testing/selftests/openat2/openat2_test.c
@@ -22,7 +22,11 @@
* XXX: This is wrong on {mips, parisc, powerpc, sparc}.
*/
#undef O_LARGEFILE
+#ifdef __aarch64__
+#define O_LARGEFILE 0x20000
+#else
#define O_LARGEFILE 0x8000
+#endif
struct open_how_ext {
struct open_how inner;
--
2.30.2
From: Baolin Wang <baolin.wang(a)linux.alibaba.com>
[ Upstream commit d538ddb97e066571e4fc58b832f40739621b42bb ]
The openat2 test suite fails on ARM64 because the definition of
O_LARGEFILE is different on ARM64. Fix the problem by defining
the correct O_LARGEFILE definition on ARM64.
"openat2 unexpectedly returned # 3['.../tools/testing/selftests/openat2']
with 208000 (!= 208000)
not ok 102 openat2 with incompatible flags (O_PATH | O_LARGEFILE) fails
with -22 (Invalid argument)"
Fixed change log to improve formatting and clarity:
Shuah Khan <skhan(a)linuxfoundation.org>
Signed-off-by: Baolin Wang <baolin.wang(a)linux.alibaba.com>
Reviewed-by: Aleksa Sarai <cyphar(a)cyphar.com>
Acked-by: Christian Brauner <christian.brauner(a)ubuntu.com>
Signed-off-by: Shuah Khan <skhan(a)linuxfoundation.org>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
---
tools/testing/selftests/openat2/openat2_test.c | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/tools/testing/selftests/openat2/openat2_test.c b/tools/testing/selftests/openat2/openat2_test.c
index 381d874cce99..300af824b07b 100644
--- a/tools/testing/selftests/openat2/openat2_test.c
+++ b/tools/testing/selftests/openat2/openat2_test.c
@@ -22,7 +22,11 @@
* XXX: This is wrong on {mips, parisc, powerpc, sparc}.
*/
#undef O_LARGEFILE
+#ifdef __aarch64__
+#define O_LARGEFILE 0x20000
+#else
#define O_LARGEFILE 0x8000
+#endif
struct open_how_ext {
struct open_how inner;
--
2.30.2
From: Baolin Wang <baolin.wang(a)linux.alibaba.com>
[ Upstream commit d538ddb97e066571e4fc58b832f40739621b42bb ]
The openat2 test suite fails on ARM64 because the definition of
O_LARGEFILE is different on ARM64. Fix the problem by defining
the correct O_LARGEFILE definition on ARM64.
"openat2 unexpectedly returned # 3['.../tools/testing/selftests/openat2']
with 208000 (!= 208000)
not ok 102 openat2 with incompatible flags (O_PATH | O_LARGEFILE) fails
with -22 (Invalid argument)"
Fixed change log to improve formatting and clarity:
Shuah Khan <skhan(a)linuxfoundation.org>
Signed-off-by: Baolin Wang <baolin.wang(a)linux.alibaba.com>
Reviewed-by: Aleksa Sarai <cyphar(a)cyphar.com>
Acked-by: Christian Brauner <christian.brauner(a)ubuntu.com>
Signed-off-by: Shuah Khan <skhan(a)linuxfoundation.org>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
---
tools/testing/selftests/openat2/openat2_test.c | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/tools/testing/selftests/openat2/openat2_test.c b/tools/testing/selftests/openat2/openat2_test.c
index d7ec1e7da0d0..1bddbe934204 100644
--- a/tools/testing/selftests/openat2/openat2_test.c
+++ b/tools/testing/selftests/openat2/openat2_test.c
@@ -22,7 +22,11 @@
* XXX: This is wrong on {mips, parisc, powerpc, sparc}.
*/
#undef O_LARGEFILE
+#ifdef __aarch64__
+#define O_LARGEFILE 0x20000
+#else
#define O_LARGEFILE 0x8000
+#endif
struct open_how_ext {
struct open_how inner;
--
2.30.2
From: Li Zhijian <lizhijian(a)cn.fujitsu.com>
[ Upstream commit 2d82d73da35b72b53fe0d96350a2b8d929d07e42 ]
0Day robot observed that it's easily timeout on a heavy load host.
-------------------
# selftests: bpf: test_maps
# Fork 1024 tasks to 'test_update_delete'
# Fork 1024 tasks to 'test_update_delete'
# Fork 100 tasks to 'test_hashmap'
# Fork 100 tasks to 'test_hashmap_percpu'
# Fork 100 tasks to 'test_hashmap_sizes'
# Fork 100 tasks to 'test_hashmap_walk'
# Fork 100 tasks to 'test_arraymap'
# Fork 100 tasks to 'test_arraymap_percpu'
# Failed sockmap unexpected timeout
not ok 3 selftests: bpf: test_maps # exit=1
# selftests: bpf: test_lru_map
# nr_cpus:8
-------------------
Since this test will be scheduled by 0Day to a random host that could have
only a few cpus(2-8), enlarge the timeout to avoid a false NG report.
In practice, i tried to pin it to only one cpu by 'taskset 0x01 ./test_maps',
and knew 10S is likely enough, but i still perfer to a larger value 30.
Reported-by: kernel test robot <lkp(a)intel.com>
Signed-off-by: Li Zhijian <lizhijian(a)cn.fujitsu.com>
Signed-off-by: Alexei Starovoitov <ast(a)kernel.org>
Acked-by: Song Liu <songliubraving(a)fb.com>
Link: https://lore.kernel.org/bpf/20210820015556.23276-2-lizhijian@cn.fujitsu.com
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
---
tools/testing/selftests/bpf/test_maps.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
index 96c6238a4a1f..3f503ad37a2b 100644
--- a/tools/testing/selftests/bpf/test_maps.c
+++ b/tools/testing/selftests/bpf/test_maps.c
@@ -730,7 +730,7 @@ static void test_sockmap(int tasks, void *data)
FD_ZERO(&w);
FD_SET(sfd[3], &w);
- to.tv_sec = 1;
+ to.tv_sec = 30;
to.tv_usec = 0;
s = select(sfd[3] + 1, &w, NULL, NULL, &to);
if (s == -1) {
--
2.30.2
From: Li Zhijian <lizhijian(a)cn.fujitsu.com>
[ Upstream commit 2d82d73da35b72b53fe0d96350a2b8d929d07e42 ]
0Day robot observed that it's easily timeout on a heavy load host.
-------------------
# selftests: bpf: test_maps
# Fork 1024 tasks to 'test_update_delete'
# Fork 1024 tasks to 'test_update_delete'
# Fork 100 tasks to 'test_hashmap'
# Fork 100 tasks to 'test_hashmap_percpu'
# Fork 100 tasks to 'test_hashmap_sizes'
# Fork 100 tasks to 'test_hashmap_walk'
# Fork 100 tasks to 'test_arraymap'
# Fork 100 tasks to 'test_arraymap_percpu'
# Failed sockmap unexpected timeout
not ok 3 selftests: bpf: test_maps # exit=1
# selftests: bpf: test_lru_map
# nr_cpus:8
-------------------
Since this test will be scheduled by 0Day to a random host that could have
only a few cpus(2-8), enlarge the timeout to avoid a false NG report.
In practice, i tried to pin it to only one cpu by 'taskset 0x01 ./test_maps',
and knew 10S is likely enough, but i still perfer to a larger value 30.
Reported-by: kernel test robot <lkp(a)intel.com>
Signed-off-by: Li Zhijian <lizhijian(a)cn.fujitsu.com>
Signed-off-by: Alexei Starovoitov <ast(a)kernel.org>
Acked-by: Song Liu <songliubraving(a)fb.com>
Link: https://lore.kernel.org/bpf/20210820015556.23276-2-lizhijian@cn.fujitsu.com
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
---
tools/testing/selftests/bpf/test_maps.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
index 4e202217fae1..87ba89df9802 100644
--- a/tools/testing/selftests/bpf/test_maps.c
+++ b/tools/testing/selftests/bpf/test_maps.c
@@ -796,7 +796,7 @@ static void test_sockmap(int tasks, void *data)
FD_ZERO(&w);
FD_SET(sfd[3], &w);
- to.tv_sec = 1;
+ to.tv_sec = 30;
to.tv_usec = 0;
s = select(sfd[3] + 1, &w, NULL, NULL, &to);
if (s == -1) {
--
2.30.2
From: Li Zhijian <lizhijian(a)cn.fujitsu.com>
[ Upstream commit 2d82d73da35b72b53fe0d96350a2b8d929d07e42 ]
0Day robot observed that it's easily timeout on a heavy load host.
-------------------
# selftests: bpf: test_maps
# Fork 1024 tasks to 'test_update_delete'
# Fork 1024 tasks to 'test_update_delete'
# Fork 100 tasks to 'test_hashmap'
# Fork 100 tasks to 'test_hashmap_percpu'
# Fork 100 tasks to 'test_hashmap_sizes'
# Fork 100 tasks to 'test_hashmap_walk'
# Fork 100 tasks to 'test_arraymap'
# Fork 100 tasks to 'test_arraymap_percpu'
# Failed sockmap unexpected timeout
not ok 3 selftests: bpf: test_maps # exit=1
# selftests: bpf: test_lru_map
# nr_cpus:8
-------------------
Since this test will be scheduled by 0Day to a random host that could have
only a few cpus(2-8), enlarge the timeout to avoid a false NG report.
In practice, i tried to pin it to only one cpu by 'taskset 0x01 ./test_maps',
and knew 10S is likely enough, but i still perfer to a larger value 30.
Reported-by: kernel test robot <lkp(a)intel.com>
Signed-off-by: Li Zhijian <lizhijian(a)cn.fujitsu.com>
Signed-off-by: Alexei Starovoitov <ast(a)kernel.org>
Acked-by: Song Liu <songliubraving(a)fb.com>
Link: https://lore.kernel.org/bpf/20210820015556.23276-2-lizhijian@cn.fujitsu.com
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
---
tools/testing/selftests/bpf/test_maps.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
index 1c4219ceced2..45c7a55f0b8b 100644
--- a/tools/testing/selftests/bpf/test_maps.c
+++ b/tools/testing/selftests/bpf/test_maps.c
@@ -972,7 +972,7 @@ static void test_sockmap(unsigned int tasks, void *data)
FD_ZERO(&w);
FD_SET(sfd[3], &w);
- to.tv_sec = 1;
+ to.tv_sec = 30;
to.tv_usec = 0;
s = select(sfd[3] + 1, &w, NULL, NULL, &to);
if (s == -1) {
--
2.30.2
From: Jussi Maki <joamaki(a)gmail.com>
[ Upstream commit 95413846cca37f20000dd095cf6d91f8777129d7 ]
The program type cannot be deduced from 'tx' which causes an invalid
argument error when trying to load xdp_tx.o using the skeleton.
Rename the section name to "xdp" so that libbpf can deduce the type.
Signed-off-by: Jussi Maki <joamaki(a)gmail.com>
Signed-off-by: Daniel Borkmann <daniel(a)iogearbox.net>
Acked-by: Andrii Nakryiko <andrii(a)kernel.org>
Link: https://lore.kernel.org/bpf/20210731055738.16820-7-joamaki@gmail.com
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
---
tools/testing/selftests/bpf/progs/xdp_tx.c | 2 +-
tools/testing/selftests/bpf/test_xdp_veth.sh | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/tools/testing/selftests/bpf/progs/xdp_tx.c b/tools/testing/selftests/bpf/progs/xdp_tx.c
index 57912e7c94b0..9ed477776eca 100644
--- a/tools/testing/selftests/bpf/progs/xdp_tx.c
+++ b/tools/testing/selftests/bpf/progs/xdp_tx.c
@@ -3,7 +3,7 @@
#include <linux/bpf.h>
#include "bpf_helpers.h"
-SEC("tx")
+SEC("xdp")
int xdp_tx(struct xdp_md *xdp)
{
return XDP_TX;
diff --git a/tools/testing/selftests/bpf/test_xdp_veth.sh b/tools/testing/selftests/bpf/test_xdp_veth.sh
index ba8ffcdaac30..995278e684b6 100755
--- a/tools/testing/selftests/bpf/test_xdp_veth.sh
+++ b/tools/testing/selftests/bpf/test_xdp_veth.sh
@@ -108,7 +108,7 @@ ip link set dev veth2 xdp pinned $BPF_DIR/progs/redirect_map_1
ip link set dev veth3 xdp pinned $BPF_DIR/progs/redirect_map_2
ip -n ns1 link set dev veth11 xdp obj xdp_dummy.o sec xdp_dummy
-ip -n ns2 link set dev veth22 xdp obj xdp_tx.o sec tx
+ip -n ns2 link set dev veth22 xdp obj xdp_tx.o sec xdp
ip -n ns3 link set dev veth33 xdp obj xdp_dummy.o sec xdp_dummy
trap cleanup EXIT
--
2.30.2
From: Li Zhijian <lizhijian(a)cn.fujitsu.com>
[ Upstream commit 2d82d73da35b72b53fe0d96350a2b8d929d07e42 ]
0Day robot observed that it's easily timeout on a heavy load host.
-------------------
# selftests: bpf: test_maps
# Fork 1024 tasks to 'test_update_delete'
# Fork 1024 tasks to 'test_update_delete'
# Fork 100 tasks to 'test_hashmap'
# Fork 100 tasks to 'test_hashmap_percpu'
# Fork 100 tasks to 'test_hashmap_sizes'
# Fork 100 tasks to 'test_hashmap_walk'
# Fork 100 tasks to 'test_arraymap'
# Fork 100 tasks to 'test_arraymap_percpu'
# Failed sockmap unexpected timeout
not ok 3 selftests: bpf: test_maps # exit=1
# selftests: bpf: test_lru_map
# nr_cpus:8
-------------------
Since this test will be scheduled by 0Day to a random host that could have
only a few cpus(2-8), enlarge the timeout to avoid a false NG report.
In practice, i tried to pin it to only one cpu by 'taskset 0x01 ./test_maps',
and knew 10S is likely enough, but i still perfer to a larger value 30.
Reported-by: kernel test robot <lkp(a)intel.com>
Signed-off-by: Li Zhijian <lizhijian(a)cn.fujitsu.com>
Signed-off-by: Alexei Starovoitov <ast(a)kernel.org>
Acked-by: Song Liu <songliubraving(a)fb.com>
Link: https://lore.kernel.org/bpf/20210820015556.23276-2-lizhijian@cn.fujitsu.com
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
---
tools/testing/selftests/bpf/test_maps.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
index 0d92ebcb335d..179e680e8d13 100644
--- a/tools/testing/selftests/bpf/test_maps.c
+++ b/tools/testing/selftests/bpf/test_maps.c
@@ -968,7 +968,7 @@ static void test_sockmap(unsigned int tasks, void *data)
FD_ZERO(&w);
FD_SET(sfd[3], &w);
- to.tv_sec = 1;
+ to.tv_sec = 30;
to.tv_usec = 0;
s = select(sfd[3] + 1, &w, NULL, NULL, &to);
if (s == -1) {
--
2.30.2
From: Mark Brown <broonie(a)kernel.org>
[ Upstream commit 0c69bd2ca6ee20064dde7853cd749284e053a874 ]
The PAC tests check to see if the system supports the relevant PAC features
but instead of skipping the tests if they can't be executed they fail the
tests which makes things look like they're not working when they are.
Signed-off-by: Mark Brown <broonie(a)kernel.org>
Link: https://lore.kernel.org/r/20210819165723.43903-1-broonie@kernel.org
Signed-off-by: Catalin Marinas <catalin.marinas(a)arm.com>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
---
tools/testing/selftests/arm64/pauth/pac.c | 10 ++++++----
1 file changed, 6 insertions(+), 4 deletions(-)
diff --git a/tools/testing/selftests/arm64/pauth/pac.c b/tools/testing/selftests/arm64/pauth/pac.c
index 592fe538506e..b743daa772f5 100644
--- a/tools/testing/selftests/arm64/pauth/pac.c
+++ b/tools/testing/selftests/arm64/pauth/pac.c
@@ -25,13 +25,15 @@
do { \
unsigned long hwcaps = getauxval(AT_HWCAP); \
/* data key instructions are not in NOP space. This prevents a SIGILL */ \
- ASSERT_NE(0, hwcaps & HWCAP_PACA) TH_LOG("PAUTH not enabled"); \
+ if (!(hwcaps & HWCAP_PACA)) \
+ SKIP(return, "PAUTH not enabled"); \
} while (0)
#define ASSERT_GENERIC_PAUTH_ENABLED() \
do { \
unsigned long hwcaps = getauxval(AT_HWCAP); \
/* generic key instructions are not in NOP space. This prevents a SIGILL */ \
- ASSERT_NE(0, hwcaps & HWCAP_PACG) TH_LOG("Generic PAUTH not enabled"); \
+ if (!(hwcaps & HWCAP_PACG)) \
+ SKIP(return, "Generic PAUTH not enabled"); \
} while (0)
void sign_specific(struct signatures *sign, size_t val)
@@ -256,7 +258,7 @@ TEST(single_thread_different_keys)
unsigned long hwcaps = getauxval(AT_HWCAP);
/* generic and data key instructions are not in NOP space. This prevents a SIGILL */
- ASSERT_NE(0, hwcaps & HWCAP_PACA) TH_LOG("PAUTH not enabled");
+ ASSERT_PAUTH_ENABLED();
if (!(hwcaps & HWCAP_PACG)) {
TH_LOG("WARNING: Generic PAUTH not enabled. Skipping generic key checks");
nkeys = NKEYS - 1;
@@ -299,7 +301,7 @@ TEST(exec_changed_keys)
unsigned long hwcaps = getauxval(AT_HWCAP);
/* generic and data key instructions are not in NOP space. This prevents a SIGILL */
- ASSERT_NE(0, hwcaps & HWCAP_PACA) TH_LOG("PAUTH not enabled");
+ ASSERT_PAUTH_ENABLED();
if (!(hwcaps & HWCAP_PACG)) {
TH_LOG("WARNING: Generic PAUTH not enabled. Skipping generic key checks");
nkeys = NKEYS - 1;
--
2.30.2
From: Mark Brown <broonie(a)kernel.org>
[ Upstream commit 83e5dcbece4ea67ec3ad94b897e2844184802fd7 ]
When skipping the tests due to a lack of system support for MTE we
currently print a message saying FAIL which makes it look like the test
failed even though the test did actually report KSFT_SKIP, creating some
confusion. Change the error message to say SKIP instead so things are
clearer.
Signed-off-by: Mark Brown <broonie(a)kernel.org>
Link: https://lore.kernel.org/r/20210819172902.56211-1-broonie@kernel.org
Signed-off-by: Catalin Marinas <catalin.marinas(a)arm.com>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
---
tools/testing/selftests/arm64/mte/mte_common_util.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tools/testing/selftests/arm64/mte/mte_common_util.c b/tools/testing/selftests/arm64/mte/mte_common_util.c
index 70665ba88cbb..2703bd628d06 100644
--- a/tools/testing/selftests/arm64/mte/mte_common_util.c
+++ b/tools/testing/selftests/arm64/mte/mte_common_util.c
@@ -285,7 +285,7 @@ int mte_default_setup(void)
int ret;
if (!(hwcaps2 & HWCAP2_MTE)) {
- ksft_print_msg("FAIL: MTE features unavailable\n");
+ ksft_print_msg("SKIP: MTE features unavailable\n");
return KSFT_SKIP;
}
/* Get current mte mode */
--
2.30.2
From: Yonghong Song <yhs(a)fb.com>
[ Upstream commit b16ac5bf732a5e23d164cf908ec7742d6a6120d3 ]
libbpf CI has reported send_signal test is flaky although
I am not able to reproduce it in my local environment.
But I am able to reproduce with on-demand libbpf CI ([1]).
Through code analysis, the following is possible reason.
The failed subtest runs bpf program in softirq environment.
Since bpf_send_signal() only sends to a fork of "test_progs"
process. If the underlying current task is
not "test_progs", bpf_send_signal() will not be triggered
and the subtest will fail.
To reduce the chances where the underlying process is not
the intended one, this patch boosted scheduling priority to
-20 (highest allowed by setpriority() call). And I did
10 runs with on-demand libbpf CI with this patch and I
didn't observe any failures.
[1] https://github.com/libbpf/libbpf/actions/workflows/ondemand.yml
Signed-off-by: Yonghong Song <yhs(a)fb.com>
Signed-off-by: Andrii Nakryiko <andrii(a)kernel.org>
Link: https://lore.kernel.org/bpf/20210817190923.3186725-1-yhs@fb.com
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
---
.../selftests/bpf/prog_tests/send_signal.c | 16 ++++++++++++++++
1 file changed, 16 insertions(+)
diff --git a/tools/testing/selftests/bpf/prog_tests/send_signal.c b/tools/testing/selftests/bpf/prog_tests/send_signal.c
index 7043e6ded0e6..75b72c751772 100644
--- a/tools/testing/selftests/bpf/prog_tests/send_signal.c
+++ b/tools/testing/selftests/bpf/prog_tests/send_signal.c
@@ -1,5 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
+#include <sys/time.h>
+#include <sys/resource.h>
#include "test_send_signal_kern.skel.h"
static volatile int sigusr1_received = 0;
@@ -41,12 +43,23 @@ static void test_send_signal_common(struct perf_event_attr *attr,
}
if (pid == 0) {
+ int old_prio;
+
/* install signal handler and notify parent */
signal(SIGUSR1, sigusr1_handler);
close(pipe_c2p[0]); /* close read */
close(pipe_p2c[1]); /* close write */
+ /* boost with a high priority so we got a higher chance
+ * that if an interrupt happens, the underlying task
+ * is this process.
+ */
+ errno = 0;
+ old_prio = getpriority(PRIO_PROCESS, 0);
+ ASSERT_OK(errno, "getpriority");
+ ASSERT_OK(setpriority(PRIO_PROCESS, 0, -20), "setpriority");
+
/* notify parent signal handler is installed */
CHECK(write(pipe_c2p[1], buf, 1) != 1, "pipe_write", "err %d\n", -errno);
@@ -62,6 +75,9 @@ static void test_send_signal_common(struct perf_event_attr *attr,
/* wait for parent notification and exit */
CHECK(read(pipe_p2c[0], buf, 1) != 1, "pipe_read", "err %d\n", -errno);
+ /* restore the old priority */
+ ASSERT_OK(setpriority(PRIO_PROCESS, 0, old_prio), "setpriority");
+
close(pipe_c2p[1]);
close(pipe_p2c[0]);
exit(0);
--
2.30.2
From: Jussi Maki <joamaki(a)gmail.com>
[ Upstream commit 95413846cca37f20000dd095cf6d91f8777129d7 ]
The program type cannot be deduced from 'tx' which causes an invalid
argument error when trying to load xdp_tx.o using the skeleton.
Rename the section name to "xdp" so that libbpf can deduce the type.
Signed-off-by: Jussi Maki <joamaki(a)gmail.com>
Signed-off-by: Daniel Borkmann <daniel(a)iogearbox.net>
Acked-by: Andrii Nakryiko <andrii(a)kernel.org>
Link: https://lore.kernel.org/bpf/20210731055738.16820-7-joamaki@gmail.com
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
---
tools/testing/selftests/bpf/progs/xdp_tx.c | 2 +-
tools/testing/selftests/bpf/test_xdp_veth.sh | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/tools/testing/selftests/bpf/progs/xdp_tx.c b/tools/testing/selftests/bpf/progs/xdp_tx.c
index 94e6c2b281cb..5f725c720e00 100644
--- a/tools/testing/selftests/bpf/progs/xdp_tx.c
+++ b/tools/testing/selftests/bpf/progs/xdp_tx.c
@@ -3,7 +3,7 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
-SEC("tx")
+SEC("xdp")
int xdp_tx(struct xdp_md *xdp)
{
return XDP_TX;
diff --git a/tools/testing/selftests/bpf/test_xdp_veth.sh b/tools/testing/selftests/bpf/test_xdp_veth.sh
index ba8ffcdaac30..995278e684b6 100755
--- a/tools/testing/selftests/bpf/test_xdp_veth.sh
+++ b/tools/testing/selftests/bpf/test_xdp_veth.sh
@@ -108,7 +108,7 @@ ip link set dev veth2 xdp pinned $BPF_DIR/progs/redirect_map_1
ip link set dev veth3 xdp pinned $BPF_DIR/progs/redirect_map_2
ip -n ns1 link set dev veth11 xdp obj xdp_dummy.o sec xdp_dummy
-ip -n ns2 link set dev veth22 xdp obj xdp_tx.o sec tx
+ip -n ns2 link set dev veth22 xdp obj xdp_tx.o sec xdp
ip -n ns3 link set dev veth33 xdp obj xdp_dummy.o sec xdp_dummy
trap cleanup EXIT
--
2.30.2
From: Li Zhijian <lizhijian(a)cn.fujitsu.com>
[ Upstream commit 2d82d73da35b72b53fe0d96350a2b8d929d07e42 ]
0Day robot observed that it's easily timeout on a heavy load host.
-------------------
# selftests: bpf: test_maps
# Fork 1024 tasks to 'test_update_delete'
# Fork 1024 tasks to 'test_update_delete'
# Fork 100 tasks to 'test_hashmap'
# Fork 100 tasks to 'test_hashmap_percpu'
# Fork 100 tasks to 'test_hashmap_sizes'
# Fork 100 tasks to 'test_hashmap_walk'
# Fork 100 tasks to 'test_arraymap'
# Fork 100 tasks to 'test_arraymap_percpu'
# Failed sockmap unexpected timeout
not ok 3 selftests: bpf: test_maps # exit=1
# selftests: bpf: test_lru_map
# nr_cpus:8
-------------------
Since this test will be scheduled by 0Day to a random host that could have
only a few cpus(2-8), enlarge the timeout to avoid a false NG report.
In practice, i tried to pin it to only one cpu by 'taskset 0x01 ./test_maps',
and knew 10S is likely enough, but i still perfer to a larger value 30.
Reported-by: kernel test robot <lkp(a)intel.com>
Signed-off-by: Li Zhijian <lizhijian(a)cn.fujitsu.com>
Signed-off-by: Alexei Starovoitov <ast(a)kernel.org>
Acked-by: Song Liu <songliubraving(a)fb.com>
Link: https://lore.kernel.org/bpf/20210820015556.23276-2-lizhijian@cn.fujitsu.com
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
---
tools/testing/selftests/bpf/test_maps.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
index 51adc42b2b40..7fed68492a2e 100644
--- a/tools/testing/selftests/bpf/test_maps.c
+++ b/tools/testing/selftests/bpf/test_maps.c
@@ -968,7 +968,7 @@ static void test_sockmap(unsigned int tasks, void *data)
FD_ZERO(&w);
FD_SET(sfd[3], &w);
- to.tv_sec = 1;
+ to.tv_sec = 30;
to.tv_usec = 0;
s = select(sfd[3] + 1, &w, NULL, NULL, &to);
if (s == -1) {
--
2.30.2
From: Mark Brown <broonie(a)kernel.org>
[ Upstream commit 0c69bd2ca6ee20064dde7853cd749284e053a874 ]
The PAC tests check to see if the system supports the relevant PAC features
but instead of skipping the tests if they can't be executed they fail the
tests which makes things look like they're not working when they are.
Signed-off-by: Mark Brown <broonie(a)kernel.org>
Link: https://lore.kernel.org/r/20210819165723.43903-1-broonie@kernel.org
Signed-off-by: Catalin Marinas <catalin.marinas(a)arm.com>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
---
tools/testing/selftests/arm64/pauth/pac.c | 10 ++++++----
1 file changed, 6 insertions(+), 4 deletions(-)
diff --git a/tools/testing/selftests/arm64/pauth/pac.c b/tools/testing/selftests/arm64/pauth/pac.c
index 592fe538506e..b743daa772f5 100644
--- a/tools/testing/selftests/arm64/pauth/pac.c
+++ b/tools/testing/selftests/arm64/pauth/pac.c
@@ -25,13 +25,15 @@
do { \
unsigned long hwcaps = getauxval(AT_HWCAP); \
/* data key instructions are not in NOP space. This prevents a SIGILL */ \
- ASSERT_NE(0, hwcaps & HWCAP_PACA) TH_LOG("PAUTH not enabled"); \
+ if (!(hwcaps & HWCAP_PACA)) \
+ SKIP(return, "PAUTH not enabled"); \
} while (0)
#define ASSERT_GENERIC_PAUTH_ENABLED() \
do { \
unsigned long hwcaps = getauxval(AT_HWCAP); \
/* generic key instructions are not in NOP space. This prevents a SIGILL */ \
- ASSERT_NE(0, hwcaps & HWCAP_PACG) TH_LOG("Generic PAUTH not enabled"); \
+ if (!(hwcaps & HWCAP_PACG)) \
+ SKIP(return, "Generic PAUTH not enabled"); \
} while (0)
void sign_specific(struct signatures *sign, size_t val)
@@ -256,7 +258,7 @@ TEST(single_thread_different_keys)
unsigned long hwcaps = getauxval(AT_HWCAP);
/* generic and data key instructions are not in NOP space. This prevents a SIGILL */
- ASSERT_NE(0, hwcaps & HWCAP_PACA) TH_LOG("PAUTH not enabled");
+ ASSERT_PAUTH_ENABLED();
if (!(hwcaps & HWCAP_PACG)) {
TH_LOG("WARNING: Generic PAUTH not enabled. Skipping generic key checks");
nkeys = NKEYS - 1;
@@ -299,7 +301,7 @@ TEST(exec_changed_keys)
unsigned long hwcaps = getauxval(AT_HWCAP);
/* generic and data key instructions are not in NOP space. This prevents a SIGILL */
- ASSERT_NE(0, hwcaps & HWCAP_PACA) TH_LOG("PAUTH not enabled");
+ ASSERT_PAUTH_ENABLED();
if (!(hwcaps & HWCAP_PACG)) {
TH_LOG("WARNING: Generic PAUTH not enabled. Skipping generic key checks");
nkeys = NKEYS - 1;
--
2.30.2
From: Mark Brown <broonie(a)kernel.org>
[ Upstream commit 83e5dcbece4ea67ec3ad94b897e2844184802fd7 ]
When skipping the tests due to a lack of system support for MTE we
currently print a message saying FAIL which makes it look like the test
failed even though the test did actually report KSFT_SKIP, creating some
confusion. Change the error message to say SKIP instead so things are
clearer.
Signed-off-by: Mark Brown <broonie(a)kernel.org>
Link: https://lore.kernel.org/r/20210819172902.56211-1-broonie@kernel.org
Signed-off-by: Catalin Marinas <catalin.marinas(a)arm.com>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
---
tools/testing/selftests/arm64/mte/mte_common_util.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tools/testing/selftests/arm64/mte/mte_common_util.c b/tools/testing/selftests/arm64/mte/mte_common_util.c
index f50ac31920d1..0328a1e08f65 100644
--- a/tools/testing/selftests/arm64/mte/mte_common_util.c
+++ b/tools/testing/selftests/arm64/mte/mte_common_util.c
@@ -298,7 +298,7 @@ int mte_default_setup(void)
int ret;
if (!(hwcaps2 & HWCAP2_MTE)) {
- ksft_print_msg("FAIL: MTE features unavailable\n");
+ ksft_print_msg("SKIP: MTE features unavailable\n");
return KSFT_SKIP;
}
/* Get current mte mode */
--
2.30.2
From: Yonghong Song <yhs(a)fb.com>
[ Upstream commit b16ac5bf732a5e23d164cf908ec7742d6a6120d3 ]
libbpf CI has reported send_signal test is flaky although
I am not able to reproduce it in my local environment.
But I am able to reproduce with on-demand libbpf CI ([1]).
Through code analysis, the following is possible reason.
The failed subtest runs bpf program in softirq environment.
Since bpf_send_signal() only sends to a fork of "test_progs"
process. If the underlying current task is
not "test_progs", bpf_send_signal() will not be triggered
and the subtest will fail.
To reduce the chances where the underlying process is not
the intended one, this patch boosted scheduling priority to
-20 (highest allowed by setpriority() call). And I did
10 runs with on-demand libbpf CI with this patch and I
didn't observe any failures.
[1] https://github.com/libbpf/libbpf/actions/workflows/ondemand.yml
Signed-off-by: Yonghong Song <yhs(a)fb.com>
Signed-off-by: Andrii Nakryiko <andrii(a)kernel.org>
Link: https://lore.kernel.org/bpf/20210817190923.3186725-1-yhs@fb.com
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
---
.../selftests/bpf/prog_tests/send_signal.c | 16 ++++++++++++++++
1 file changed, 16 insertions(+)
diff --git a/tools/testing/selftests/bpf/prog_tests/send_signal.c b/tools/testing/selftests/bpf/prog_tests/send_signal.c
index 7043e6ded0e6..75b72c751772 100644
--- a/tools/testing/selftests/bpf/prog_tests/send_signal.c
+++ b/tools/testing/selftests/bpf/prog_tests/send_signal.c
@@ -1,5 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
+#include <sys/time.h>
+#include <sys/resource.h>
#include "test_send_signal_kern.skel.h"
static volatile int sigusr1_received = 0;
@@ -41,12 +43,23 @@ static void test_send_signal_common(struct perf_event_attr *attr,
}
if (pid == 0) {
+ int old_prio;
+
/* install signal handler and notify parent */
signal(SIGUSR1, sigusr1_handler);
close(pipe_c2p[0]); /* close read */
close(pipe_p2c[1]); /* close write */
+ /* boost with a high priority so we got a higher chance
+ * that if an interrupt happens, the underlying task
+ * is this process.
+ */
+ errno = 0;
+ old_prio = getpriority(PRIO_PROCESS, 0);
+ ASSERT_OK(errno, "getpriority");
+ ASSERT_OK(setpriority(PRIO_PROCESS, 0, -20), "setpriority");
+
/* notify parent signal handler is installed */
CHECK(write(pipe_c2p[1], buf, 1) != 1, "pipe_write", "err %d\n", -errno);
@@ -62,6 +75,9 @@ static void test_send_signal_common(struct perf_event_attr *attr,
/* wait for parent notification and exit */
CHECK(read(pipe_p2c[0], buf, 1) != 1, "pipe_read", "err %d\n", -errno);
+ /* restore the old priority */
+ ASSERT_OK(setpriority(PRIO_PROCESS, 0, old_prio), "setpriority");
+
close(pipe_c2p[1]);
close(pipe_p2c[0]);
exit(0);
--
2.30.2
From: Jussi Maki <joamaki(a)gmail.com>
[ Upstream commit 95413846cca37f20000dd095cf6d91f8777129d7 ]
The program type cannot be deduced from 'tx' which causes an invalid
argument error when trying to load xdp_tx.o using the skeleton.
Rename the section name to "xdp" so that libbpf can deduce the type.
Signed-off-by: Jussi Maki <joamaki(a)gmail.com>
Signed-off-by: Daniel Borkmann <daniel(a)iogearbox.net>
Acked-by: Andrii Nakryiko <andrii(a)kernel.org>
Link: https://lore.kernel.org/bpf/20210731055738.16820-7-joamaki@gmail.com
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
---
tools/testing/selftests/bpf/progs/xdp_tx.c | 2 +-
tools/testing/selftests/bpf/test_xdp_veth.sh | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/tools/testing/selftests/bpf/progs/xdp_tx.c b/tools/testing/selftests/bpf/progs/xdp_tx.c
index 94e6c2b281cb..5f725c720e00 100644
--- a/tools/testing/selftests/bpf/progs/xdp_tx.c
+++ b/tools/testing/selftests/bpf/progs/xdp_tx.c
@@ -3,7 +3,7 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
-SEC("tx")
+SEC("xdp")
int xdp_tx(struct xdp_md *xdp)
{
return XDP_TX;
diff --git a/tools/testing/selftests/bpf/test_xdp_veth.sh b/tools/testing/selftests/bpf/test_xdp_veth.sh
index ba8ffcdaac30..995278e684b6 100755
--- a/tools/testing/selftests/bpf/test_xdp_veth.sh
+++ b/tools/testing/selftests/bpf/test_xdp_veth.sh
@@ -108,7 +108,7 @@ ip link set dev veth2 xdp pinned $BPF_DIR/progs/redirect_map_1
ip link set dev veth3 xdp pinned $BPF_DIR/progs/redirect_map_2
ip -n ns1 link set dev veth11 xdp obj xdp_dummy.o sec xdp_dummy
-ip -n ns2 link set dev veth22 xdp obj xdp_tx.o sec tx
+ip -n ns2 link set dev veth22 xdp obj xdp_tx.o sec xdp
ip -n ns3 link set dev veth33 xdp obj xdp_dummy.o sec xdp_dummy
trap cleanup EXIT
--
2.30.2
From: Li Zhijian <lizhijian(a)cn.fujitsu.com>
[ Upstream commit 2d82d73da35b72b53fe0d96350a2b8d929d07e42 ]
0Day robot observed that it's easily timeout on a heavy load host.
-------------------
# selftests: bpf: test_maps
# Fork 1024 tasks to 'test_update_delete'
# Fork 1024 tasks to 'test_update_delete'
# Fork 100 tasks to 'test_hashmap'
# Fork 100 tasks to 'test_hashmap_percpu'
# Fork 100 tasks to 'test_hashmap_sizes'
# Fork 100 tasks to 'test_hashmap_walk'
# Fork 100 tasks to 'test_arraymap'
# Fork 100 tasks to 'test_arraymap_percpu'
# Failed sockmap unexpected timeout
not ok 3 selftests: bpf: test_maps # exit=1
# selftests: bpf: test_lru_map
# nr_cpus:8
-------------------
Since this test will be scheduled by 0Day to a random host that could have
only a few cpus(2-8), enlarge the timeout to avoid a false NG report.
In practice, i tried to pin it to only one cpu by 'taskset 0x01 ./test_maps',
and knew 10S is likely enough, but i still perfer to a larger value 30.
Reported-by: kernel test robot <lkp(a)intel.com>
Signed-off-by: Li Zhijian <lizhijian(a)cn.fujitsu.com>
Signed-off-by: Alexei Starovoitov <ast(a)kernel.org>
Acked-by: Song Liu <songliubraving(a)fb.com>
Link: https://lore.kernel.org/bpf/20210820015556.23276-2-lizhijian@cn.fujitsu.com
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
---
tools/testing/selftests/bpf/test_maps.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
index 30cbf5d98f7d..de58a3070eea 100644
--- a/tools/testing/selftests/bpf/test_maps.c
+++ b/tools/testing/selftests/bpf/test_maps.c
@@ -985,7 +985,7 @@ static void test_sockmap(unsigned int tasks, void *data)
FD_ZERO(&w);
FD_SET(sfd[3], &w);
- to.tv_sec = 1;
+ to.tv_sec = 30;
to.tv_usec = 0;
s = select(sfd[3] + 1, &w, NULL, NULL, &to);
if (s == -1) {
--
2.30.2
From: Mark Brown <broonie(a)kernel.org>
[ Upstream commit 0c69bd2ca6ee20064dde7853cd749284e053a874 ]
The PAC tests check to see if the system supports the relevant PAC features
but instead of skipping the tests if they can't be executed they fail the
tests which makes things look like they're not working when they are.
Signed-off-by: Mark Brown <broonie(a)kernel.org>
Link: https://lore.kernel.org/r/20210819165723.43903-1-broonie@kernel.org
Signed-off-by: Catalin Marinas <catalin.marinas(a)arm.com>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
---
tools/testing/selftests/arm64/pauth/pac.c | 10 ++++++----
1 file changed, 6 insertions(+), 4 deletions(-)
diff --git a/tools/testing/selftests/arm64/pauth/pac.c b/tools/testing/selftests/arm64/pauth/pac.c
index 592fe538506e..b743daa772f5 100644
--- a/tools/testing/selftests/arm64/pauth/pac.c
+++ b/tools/testing/selftests/arm64/pauth/pac.c
@@ -25,13 +25,15 @@
do { \
unsigned long hwcaps = getauxval(AT_HWCAP); \
/* data key instructions are not in NOP space. This prevents a SIGILL */ \
- ASSERT_NE(0, hwcaps & HWCAP_PACA) TH_LOG("PAUTH not enabled"); \
+ if (!(hwcaps & HWCAP_PACA)) \
+ SKIP(return, "PAUTH not enabled"); \
} while (0)
#define ASSERT_GENERIC_PAUTH_ENABLED() \
do { \
unsigned long hwcaps = getauxval(AT_HWCAP); \
/* generic key instructions are not in NOP space. This prevents a SIGILL */ \
- ASSERT_NE(0, hwcaps & HWCAP_PACG) TH_LOG("Generic PAUTH not enabled"); \
+ if (!(hwcaps & HWCAP_PACG)) \
+ SKIP(return, "Generic PAUTH not enabled"); \
} while (0)
void sign_specific(struct signatures *sign, size_t val)
@@ -256,7 +258,7 @@ TEST(single_thread_different_keys)
unsigned long hwcaps = getauxval(AT_HWCAP);
/* generic and data key instructions are not in NOP space. This prevents a SIGILL */
- ASSERT_NE(0, hwcaps & HWCAP_PACA) TH_LOG("PAUTH not enabled");
+ ASSERT_PAUTH_ENABLED();
if (!(hwcaps & HWCAP_PACG)) {
TH_LOG("WARNING: Generic PAUTH not enabled. Skipping generic key checks");
nkeys = NKEYS - 1;
@@ -299,7 +301,7 @@ TEST(exec_changed_keys)
unsigned long hwcaps = getauxval(AT_HWCAP);
/* generic and data key instructions are not in NOP space. This prevents a SIGILL */
- ASSERT_NE(0, hwcaps & HWCAP_PACA) TH_LOG("PAUTH not enabled");
+ ASSERT_PAUTH_ENABLED();
if (!(hwcaps & HWCAP_PACG)) {
TH_LOG("WARNING: Generic PAUTH not enabled. Skipping generic key checks");
nkeys = NKEYS - 1;
--
2.30.2
From: Mark Brown <broonie(a)kernel.org>
[ Upstream commit 83e5dcbece4ea67ec3ad94b897e2844184802fd7 ]
When skipping the tests due to a lack of system support for MTE we
currently print a message saying FAIL which makes it look like the test
failed even though the test did actually report KSFT_SKIP, creating some
confusion. Change the error message to say SKIP instead so things are
clearer.
Signed-off-by: Mark Brown <broonie(a)kernel.org>
Link: https://lore.kernel.org/r/20210819172902.56211-1-broonie@kernel.org
Signed-off-by: Catalin Marinas <catalin.marinas(a)arm.com>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
---
tools/testing/selftests/arm64/mte/mte_common_util.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tools/testing/selftests/arm64/mte/mte_common_util.c b/tools/testing/selftests/arm64/mte/mte_common_util.c
index f50ac31920d1..0328a1e08f65 100644
--- a/tools/testing/selftests/arm64/mte/mte_common_util.c
+++ b/tools/testing/selftests/arm64/mte/mte_common_util.c
@@ -298,7 +298,7 @@ int mte_default_setup(void)
int ret;
if (!(hwcaps2 & HWCAP2_MTE)) {
- ksft_print_msg("FAIL: MTE features unavailable\n");
+ ksft_print_msg("SKIP: MTE features unavailable\n");
return KSFT_SKIP;
}
/* Get current mte mode */
--
2.30.2
From: Yonghong Song <yhs(a)fb.com>
[ Upstream commit b16ac5bf732a5e23d164cf908ec7742d6a6120d3 ]
libbpf CI has reported send_signal test is flaky although
I am not able to reproduce it in my local environment.
But I am able to reproduce with on-demand libbpf CI ([1]).
Through code analysis, the following is possible reason.
The failed subtest runs bpf program in softirq environment.
Since bpf_send_signal() only sends to a fork of "test_progs"
process. If the underlying current task is
not "test_progs", bpf_send_signal() will not be triggered
and the subtest will fail.
To reduce the chances where the underlying process is not
the intended one, this patch boosted scheduling priority to
-20 (highest allowed by setpriority() call). And I did
10 runs with on-demand libbpf CI with this patch and I
didn't observe any failures.
[1] https://github.com/libbpf/libbpf/actions/workflows/ondemand.yml
Signed-off-by: Yonghong Song <yhs(a)fb.com>
Signed-off-by: Andrii Nakryiko <andrii(a)kernel.org>
Link: https://lore.kernel.org/bpf/20210817190923.3186725-1-yhs@fb.com
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
---
.../selftests/bpf/prog_tests/send_signal.c | 16 ++++++++++++++++
1 file changed, 16 insertions(+)
diff --git a/tools/testing/selftests/bpf/prog_tests/send_signal.c b/tools/testing/selftests/bpf/prog_tests/send_signal.c
index 023cc532992d..839f7ddaec16 100644
--- a/tools/testing/selftests/bpf/prog_tests/send_signal.c
+++ b/tools/testing/selftests/bpf/prog_tests/send_signal.c
@@ -1,5 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
+#include <sys/time.h>
+#include <sys/resource.h>
#include "test_send_signal_kern.skel.h"
int sigusr1_received = 0;
@@ -41,12 +43,23 @@ static void test_send_signal_common(struct perf_event_attr *attr,
}
if (pid == 0) {
+ int old_prio;
+
/* install signal handler and notify parent */
signal(SIGUSR1, sigusr1_handler);
close(pipe_c2p[0]); /* close read */
close(pipe_p2c[1]); /* close write */
+ /* boost with a high priority so we got a higher chance
+ * that if an interrupt happens, the underlying task
+ * is this process.
+ */
+ errno = 0;
+ old_prio = getpriority(PRIO_PROCESS, 0);
+ ASSERT_OK(errno, "getpriority");
+ ASSERT_OK(setpriority(PRIO_PROCESS, 0, -20), "setpriority");
+
/* notify parent signal handler is installed */
CHECK(write(pipe_c2p[1], buf, 1) != 1, "pipe_write", "err %d\n", -errno);
@@ -62,6 +75,9 @@ static void test_send_signal_common(struct perf_event_attr *attr,
/* wait for parent notification and exit */
CHECK(read(pipe_p2c[0], buf, 1) != 1, "pipe_read", "err %d\n", -errno);
+ /* restore the old priority */
+ ASSERT_OK(setpriority(PRIO_PROCESS, 0, old_prio), "setpriority");
+
close(pipe_c2p[1]);
close(pipe_p2c[0]);
exit(0);
--
2.30.2
From: Jussi Maki <joamaki(a)gmail.com>
[ Upstream commit 95413846cca37f20000dd095cf6d91f8777129d7 ]
The program type cannot be deduced from 'tx' which causes an invalid
argument error when trying to load xdp_tx.o using the skeleton.
Rename the section name to "xdp" so that libbpf can deduce the type.
Signed-off-by: Jussi Maki <joamaki(a)gmail.com>
Signed-off-by: Daniel Borkmann <daniel(a)iogearbox.net>
Acked-by: Andrii Nakryiko <andrii(a)kernel.org>
Link: https://lore.kernel.org/bpf/20210731055738.16820-7-joamaki@gmail.com
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
---
tools/testing/selftests/bpf/progs/xdp_tx.c | 2 +-
tools/testing/selftests/bpf/test_xdp_veth.sh | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/tools/testing/selftests/bpf/progs/xdp_tx.c b/tools/testing/selftests/bpf/progs/xdp_tx.c
index 94e6c2b281cb..5f725c720e00 100644
--- a/tools/testing/selftests/bpf/progs/xdp_tx.c
+++ b/tools/testing/selftests/bpf/progs/xdp_tx.c
@@ -3,7 +3,7 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
-SEC("tx")
+SEC("xdp")
int xdp_tx(struct xdp_md *xdp)
{
return XDP_TX;
diff --git a/tools/testing/selftests/bpf/test_xdp_veth.sh b/tools/testing/selftests/bpf/test_xdp_veth.sh
index ba8ffcdaac30..995278e684b6 100755
--- a/tools/testing/selftests/bpf/test_xdp_veth.sh
+++ b/tools/testing/selftests/bpf/test_xdp_veth.sh
@@ -108,7 +108,7 @@ ip link set dev veth2 xdp pinned $BPF_DIR/progs/redirect_map_1
ip link set dev veth3 xdp pinned $BPF_DIR/progs/redirect_map_2
ip -n ns1 link set dev veth11 xdp obj xdp_dummy.o sec xdp_dummy
-ip -n ns2 link set dev veth22 xdp obj xdp_tx.o sec tx
+ip -n ns2 link set dev veth22 xdp obj xdp_tx.o sec xdp
ip -n ns3 link set dev veth33 xdp obj xdp_dummy.o sec xdp_dummy
trap cleanup EXIT
--
2.30.2
Allow running each suite or each test case alone per kernel boot.
The motivation for this is to debug "test hermeticity" issues.
This new --run_isolated flag would be a good first step to try and
narrow down root causes.
Context: sometimes tests pass/fail depending on what ran before them.
Memory corruption errors in particular might only cause noticeable
issues later on. But you can also have the opposite, where "fixing" one
test causes another to start failing.
Usage:
$ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite
$ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=test
$ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=test example
The last one would provide output like
======== [PASSED] example ========
[PASSED] example_simple_test
============================================================
Testing complete. 1 tests run. 0 failed. 0 crashed. 0 skipped.
Starting KUnit Kernel (2/3)...
============================================================
======== [SKIPPED] example ========
[SKIPPED] example_skip_test # SKIP this test should be skipped
============================================================
Testing complete. 1 tests run. 0 failed. 0 crashed. 1 skipped.
Starting KUnit Kernel (3/3)...
============================================================
======== [SKIPPED] example ========
[SKIPPED] example_mark_skipped_test # SKIP this test should be skipped
============================================================
Testing complete. 1 tests run. 0 failed. 0 crashed. 1 skipped.
See the last patch's description for a bit more detail.
Meta:
The first patch is from another series with just a reworded commit
message, https://lore.kernel.org/linux-kselftest/20210805235145.2528054-2-dlatypov@g…
This patch series is based on the 2 patches in
https://lore.kernel.org/linux-kselftest/20210831171926.3832806-2-dlatypov@g….
(That's what adds support for us to run a single test case by itself).
Daniel Latypov (3):
kunit: add 'kunit.action' param to allow listing out tests
kunit: tool: factor exec + parse steps into a function
kunit: tool: support running each suite/test separately
lib/kunit/executor.c | 38 +++++++-
tools/testing/kunit/kunit.py | 127 +++++++++++++++++--------
tools/testing/kunit/kunit_tool_test.py | 40 ++++++++
3 files changed, 160 insertions(+), 45 deletions(-)
base-commit: 23fdafa5ae209688d5d5253786bab666bdb07b69
--
2.33.0.309.g3052b89438-goog
So I just committed these three fixes:
4b93c544e90e ("thunderbolt: test: split up test cases in
tb_test_credit_alloc_all")
ba7b1f861086 ("lib/test_scanf: split up number parsing test routines")
1476ff21abb4 ("iwl: fix debug printf format strings")
for the fallout from -Werror that I could easily check (mainly i386
'allyesconfig' - a situation I don't normally test).
The printk format string one was trivial and I hopefully didn't screw
anything up, but I'd ask people to look at and verify the two other
ones. I tried to be very careful, and organizing the code movement in
such a way that 'git diff' shows that it's doing the same thing before
and after, but hey, mistakes happen.
I found those two test-based ones somewhat annoying, because they both
showed how little the test infrastructure tries to follow kernel
rules. I bet those warnings have been showing up for a long long time,
and people went "that's not a relevant configuration" or had some
other reason to ignore them.
No, the test cases may not be relevant in most situations, but it's
not a good thing when something that is supposed to verify kernel
behavior then violates some very fundamental and core kernel rules.
And maybe it was simply missed. The one thing that was clear when I
did that thunderbolt thing in particular is how easy it is to create
variations of those 'struct some-assertion-struct' things on stack as
part of the KUNIT infrastructure. That's unfortunate. It is possible
that the solution to the kernel stack usage might have been to make
those structures static instead, but I didn't check whether the
description structs really can be.
It would be even nicer if they were 'static const'. Being fully
initialized instead of generating not only code that uses up stack,
but also the code to dynamically initialize them on the stack is all
kinds of nasty. I took one look at the generated code, and ran away
screaming.
Anyway, I'm adding the Kunit maintainer and lists here too, just to
see if maybe it could be possible to make those 'struct kunit_assert'
things and friends be static and const, but at least for the cases
that caused problems for i386, those three commits should make the
build pass.
The test_scanf case didn't actually use the Kunit infrastructure, the
stack use explosion is because gcc doesn't seem to combine stack
allocations in many situations. I know gcc *sometimes* does that stack
allocation combining, but not here. I suspect it might be related to
type aliasing, and only merging stack slots when they have the same
types, and thus triggered by the different result buffer sizes. Maybe.
Linus
We refactored the lib/test_hash.c file into KUnit as part of the student
group LKCAMP [1] introductory hackathon for kernel development.
This test was pointed to our group by Daniel Latypov [2], so its full
conversion into a pure KUnit test was our goal in this patch series, but
we ran into many problems relating to it not being split as unit tests,
which complicated matters a bit, as the reasoning behind the original
tests is quite cryptic for those unfamiliar with hash implementations.
Some interesting developments we'd like to highlight are:
- In patch 1/6 we noticed that there was an unused define directive that
could be removed.
- In patch 5/6 we noticed how stringhash and hash tests are all under
the lib/test_hash.c file, which might cause some confusion, and we
also broke those kernel config entries up.
Overall KUnit developments have been made in the other patches in this
series:
In patches 2/6 through 4/6 and 6/6 we refactored the lib/test_hash.c
file so as to make it more compatible with the KUnit style, whilst
preserving the original idea of the maintainer who designed it (i.e.
George Spelvin), which might be undesirable for unit tests, but we
assume it is enough for a first patch.
This is our first patch series so we hope our contributions are
interesting and also hope to get some useful criticism from the
community :)
[1] - https://lkcamp.dev/
[2] - https://lore.kernel.org/linux-kselftest/CAGS_qxojszgM19u=3HLwFgKX5bm5Khywvs…
Isabella Basso (6):
hash.h: remove unused define directive
test_hash.c: move common definitions to top of file
test_hash.c: split test_int_hash into arch-specific functions
test_hash.c: split test_hash_init
lib/Kconfig.debug: properly split hash test kernel entries
test_hash.c: refactor into kunit
include/linux/hash.h | 5 +-
lib/Kconfig.debug | 28 ++++-
lib/Makefile | 3 +-
lib/test_hash.c | 249 ++++++++++++++++---------------------
tools/include/linux/hash.h | 5 +-
5 files changed, 136 insertions(+), 154 deletions(-)
--
2.33.0
Hi Linus,
Please pull the following Kselftest update for Linux 5.15-rc1.
This Kselftest update for Linux 5.15-rc1 consists of fixes to build
and test failures.
-- openat2 test failure for O_LARGEFILE flag on ARM64
-- x86 test build failures related to glibc 2.34 adding
support for variable sized MINSIGSTKSZ and SIGSTKSZ
-- removing obsolete configs in sync and cpufreq config files
-- minor spelling and duplicate header include cleanups
diff is attached.
thanks,
-- Shuah
----------------------------------------------------------------
The following changes since commit 2734d6c1b1a089fb593ef6a23d4b70903526fe0c:
Linux 5.14-rc2 (2021-07-18 14:13:49 -0700)
are available in the Git repository at:
git://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux-kselftest tags/linux-kselftest-next-5.15-rc1
for you to fetch changes up to 67d6d80d90fb27b3cc7659f464fa3b87fd67bc14:
selftests/cpufreq: Rename DEBUG_PI_LIST to DEBUG_PLIST (2021-08-31 11:00:02 -0600)
----------------------------------------------------------------
linux-kselftest-next-5.15-rc1
This Kselftest update for Linux 5.15-rc1 consists of fixes to build
and test failures.
-- openat2 test failure for O_LARGEFILE flag on ARM64
-- x86 test build failures related to glibc 2.34 adding
support for variable sized MINSIGSTKSZ and SIGSTKSZ
-- removing obsolete configs in sync and cpufreq config files
-- minor spelling and duplicate header include cleanups
----------------------------------------------------------------
Baolin Wang (1):
selftests: openat2: Fix testing failure for O_LARGEFILE flag
Changcheng Deng (1):
kselftest:sched: remove duplicate include in cs_prctl_test.c
Colin Ian King (1):
selftests: safesetid: Fix spelling mistake "cant" -> "can't"
Jun Miao (1):
selftests/x86: Fix error: variably modified 'altstack_data' at file scope
Li Zhijian (2):
selftests/sync: Remove the deprecated config SYNC
selftests/cpufreq: Rename DEBUG_PI_LIST to DEBUG_PLIST
tools/testing/selftests/cpufreq/config | 2 +-
tools/testing/selftests/openat2/openat2_test.c | 4 ++++
tools/testing/selftests/safesetid/safesetid-test.c | 2 +-
tools/testing/selftests/sched/cs_prctl_test.c | 2 --
tools/testing/selftests/sync/config | 1 -
tools/testing/selftests/x86/mov_ss_trap.c | 4 ++--
tools/testing/selftests/x86/sigreturn.c | 7 +++----
tools/testing/selftests/x86/single_step_syscall.c | 4 ++--
tools/testing/selftests/x86/syscall_arg_fault.c | 7 +++----
9 files changed, 16 insertions(+), 17 deletions(-)
----------------------------------------------------------------
This is similar to TCP MD5 in functionality but it's sufficiently
different that wire formats are incompatible. Compared to TCP-MD5
more algorithms are supported and multiple keys can be used on the
same connection but there is still no negotiation mechanism.
Expected use-case is protecting long-duration BGP/LDP connections
between routers using pre-shared keys.
This version is mostly functional, it incorporates ABI feedback from
previous versions and adds tests to kselftests. More discussion and
testing is required and obvious optimizations were skipped in favor of
adding functionality. Here are several flaws:
* RST and TIMEWAIT are mostly unhandled
* Locking is lockdep-clean but need to be revised
* Sequence Number Extension not implemented
* User is responsible for ensuring keys do not overlap
* Traffic key is not cached (reducing performance)
Not all ABI suggestions were incorporated, they can be discussed further.
However I very much want to avoid supporting algorithms beyond RFC5926.
Test suite was added to tools/selftests/tcp_authopt. Tests are written
in python using pytest and scapy and check the API in some detail and
validate packet captures. Python code is already in linux and in
kselftests but virtualenvs not very much. This test suite uses `tox` to
create a private virtualenv and hide dependencies. Let me know if this
is OK or how it can be improved.
Limited testing support is also included in nettest and fcnal-test.sh,
those tests are slow and cover much less.
Changes for frr: https://github.com/FRRouting/frr/pull/9442
That PR was made early for ABI feedback, it has many issues.
Changes for yabgp: https://github.com/cdleonard/yabgp/commits/tcp_authopt
The patched version of yabgp can establish a BGP session protected by
TCP Authentication Option with a Cisco IOS-XR router. It old now.
Changes since RFCv2:
* Removed local_id from ABI and match on send_id/recv_id/addr
* Add all relevant out-of-tree tests to tools/testing/selftests
* Return an error instead of ignoring unknown flags, hopefully this makes
it easier to extend.
* Check sk_family before __tcp_authopt_info_get_or_create in tcp_set_authopt_key
* Use sock_owned_by_me instead of WARN_ON(!lockdep_sock_is_held(sk))
* Fix some intermediate build failures reported by kbuild robot
* Improve documentation
Link: https://lore.kernel.org/netdev/cover.1628544649.git.cdleonard@gmail.com/
Changes since RFC:
* Split into per-topic commits for ease of review. The intermediate
commits compile with a few "unused function" warnings and don't do
anything useful by themselves.
* Add ABI documention including kernel-doc on uapi
* Fix lockdep warnings from crypto by creating pools with one shash for
each cpu
* Accept short options to setsockopt by padding with zeros; this
approach allows increasing the size of the structs in the future.
* Support for aes-128-cmac-96
* Support for binding addresses to keys in a way similar to old tcp_md5
* Add support for retrieving received keyid/rnextkeyid and controling
the keyid/rnextkeyid being sent.
Link: https://lore.kernel.org/netdev/01383a8751e97ef826ef2adf93bfde3a08195a43.162…
Leonard Crestez (15):
tcp: authopt: Initial support and key management
docs: Add user documentation for tcp_authopt
selftests: Initial tcp_authopt test module
selftests: tcp_authopt: Initial sockopt manipulation
tcp: authopt: Add crypto initialization
tcp: authopt: Compute packet signatures
tcp: authopt: Hook into tcp core
tcp: authopt: Add snmp counters
selftests: tcp_authopt: Test key address binding
selftests: tcp_authopt: Capture and verify packets
selftests: Initial tcp_authopt support for nettest
selftests: Initial tcp_authopt support for fcnal-test
selftests: Add -t tcp_authopt option for fcnal-test.sh
tcp: authopt: Add key selection controls
selftests: tcp_authopt: Add tests for rollover
Documentation/networking/index.rst | 1 +
Documentation/networking/tcp_authopt.rst | 69 +
include/linux/tcp.h | 6 +
include/net/tcp.h | 1 +
include/net/tcp_authopt.h | 134 ++
include/uapi/linux/snmp.h | 1 +
include/uapi/linux/tcp.h | 110 ++
net/ipv4/Kconfig | 14 +
net/ipv4/Makefile | 1 +
net/ipv4/proc.c | 1 +
net/ipv4/tcp.c | 27 +
net/ipv4/tcp_authopt.c | 1168 +++++++++++++++++
net/ipv4/tcp_input.c | 17 +
net/ipv4/tcp_ipv4.c | 5 +
net/ipv4/tcp_minisocks.c | 2 +
net/ipv4/tcp_output.c | 74 +-
net/ipv6/tcp_ipv6.c | 4 +
tools/testing/selftests/net/fcnal-test.sh | 34 +
tools/testing/selftests/net/nettest.c | 34 +-
tools/testing/selftests/tcp_authopt/Makefile | 5 +
.../testing/selftests/tcp_authopt/README.rst | 15 +
tools/testing/selftests/tcp_authopt/config | 6 +
tools/testing/selftests/tcp_authopt/run.sh | 11 +
tools/testing/selftests/tcp_authopt/setup.cfg | 17 +
tools/testing/selftests/tcp_authopt/setup.py | 5 +
.../tcp_authopt/tcp_authopt_test/__init__.py | 0
.../tcp_authopt/tcp_authopt_test/conftest.py | 21 +
.../full_tcp_sniff_session.py | 53 +
.../tcp_authopt_test/linux_tcp_authopt.py | 198 +++
.../tcp_authopt_test/netns_fixture.py | 63 +
.../tcp_authopt/tcp_authopt_test/server.py | 82 ++
.../tcp_authopt/tcp_authopt_test/sockaddr.py | 101 ++
.../tcp_authopt_test/tcp_authopt_alg.py | 276 ++++
.../tcp_authopt/tcp_authopt_test/test_bind.py | 143 ++
.../tcp_authopt_test/test_rollover.py | 181 +++
.../tcp_authopt_test/test_sockopt.py | 74 ++
.../tcp_authopt_test/test_vectors.py | 359 +++++
.../tcp_authopt_test/test_verify_capture.py | 123 ++
.../tcp_authopt/tcp_authopt_test/utils.py | 154 +++
.../tcp_authopt/tcp_authopt_test/validator.py | 158 +++
40 files changed, 3746 insertions(+), 2 deletions(-)
create mode 100644 Documentation/networking/tcp_authopt.rst
create mode 100644 include/net/tcp_authopt.h
create mode 100644 net/ipv4/tcp_authopt.c
create mode 100644 tools/testing/selftests/tcp_authopt/Makefile
create mode 100644 tools/testing/selftests/tcp_authopt/README.rst
create mode 100644 tools/testing/selftests/tcp_authopt/config
create mode 100755 tools/testing/selftests/tcp_authopt/run.sh
create mode 100644 tools/testing/selftests/tcp_authopt/setup.cfg
create mode 100644 tools/testing/selftests/tcp_authopt/setup.py
create mode 100644 tools/testing/selftests/tcp_authopt/tcp_authopt_test/__init__.py
create mode 100644 tools/testing/selftests/tcp_authopt/tcp_authopt_test/conftest.py
create mode 100644 tools/testing/selftests/tcp_authopt/tcp_authopt_test/full_tcp_sniff_session.py
create mode 100644 tools/testing/selftests/tcp_authopt/tcp_authopt_test/linux_tcp_authopt.py
create mode 100644 tools/testing/selftests/tcp_authopt/tcp_authopt_test/netns_fixture.py
create mode 100644 tools/testing/selftests/tcp_authopt/tcp_authopt_test/server.py
create mode 100644 tools/testing/selftests/tcp_authopt/tcp_authopt_test/sockaddr.py
create mode 100644 tools/testing/selftests/tcp_authopt/tcp_authopt_test/tcp_authopt_alg.py
create mode 100644 tools/testing/selftests/tcp_authopt/tcp_authopt_test/test_bind.py
create mode 100644 tools/testing/selftests/tcp_authopt/tcp_authopt_test/test_rollover.py
create mode 100644 tools/testing/selftests/tcp_authopt/tcp_authopt_test/test_sockopt.py
create mode 100644 tools/testing/selftests/tcp_authopt/tcp_authopt_test/test_vectors.py
create mode 100644 tools/testing/selftests/tcp_authopt/tcp_authopt_test/test_verify_capture.py
create mode 100644 tools/testing/selftests/tcp_authopt/tcp_authopt_test/utils.py
create mode 100644 tools/testing/selftests/tcp_authopt/tcp_authopt_test/validator.py
base-commit: 3a62c333497b164868fdcd241842a1dd4e331825
--
2.25.1
[root@iaas-rpma gpio]# make
gcc gpio-mockup-cdev.c -o /home/lizhijian/linux/tools/testing/selftests/gpio/gpio-mockup-cdev
gpio-mockup-cdev.c: In function ‘request_line_v2’:
gpio-mockup-cdev.c:24:30: error: storage size of ‘req’ isn’t known
24 | struct gpio_v2_line_request req;
| ^~~
gpio-mockup-cdev.c:32:14: error: ‘GPIO_V2_LINE_FLAG_OUTPUT’ undeclared (first use in this function); did you mean ‘GPIOLINE_FLAG_IS_OUT’?
32 | if (flags & GPIO_V2_LINE_FLAG_OUTPUT) {
| ^~~~~~~~~~~~~~~~~~~~~~~~
Search headers from linux tree like others, such as sched
CC: Philip Li <philip.li(a)intel.com>
Reported-by: kernel test robot <lkp(a)intel.com>
Signed-off-by: Li Zhijian <lizhijian(a)cn.fujitsu.com>
---
tools/testing/selftests/gpio/Makefile | 1 +
1 file changed, 1 insertion(+)
diff --git a/tools/testing/selftests/gpio/Makefile b/tools/testing/selftests/gpio/Makefile
index 39f2bbe8dd3d..42ea7d2aa844 100644
--- a/tools/testing/selftests/gpio/Makefile
+++ b/tools/testing/selftests/gpio/Makefile
@@ -3,5 +3,6 @@
TEST_PROGS := gpio-mockup.sh
TEST_FILES := gpio-mockup-sysfs.sh
TEST_GEN_PROGS_EXTENDED := gpio-mockup-cdev
+CFLAGS += -I../../../../usr/include
include ../lib.mk
--
2.31.1
The first KernelCI hackfest[1] early June was successful in getting
a number of kernel developers to work alongside the core KernelCI
team. Test coverage was increased in particular with kselftest,
LTP, KUnit and a new test suite for libcamera. We're now improving
documentation and tooling to make it easier for anyone to get
started. Find out more about KernelCI on https://kernelci.org.
The second hackfest is scheduled for the 6th-10th September. It
should be a good opportunity to start discussing and working on
upstream kernel testing topics ahead of the Linux Plumbers
Conference[2].
Here's the project board where anyone can already add some ideas:
https://github.com/orgs/kernelci/projects/5
There is no registration system, but please reply to this email or
send a message on IRC (#kernelci libera.chat) or kernelci.slack.com
if you would like to take part so you'll get email updates and
invitations to the meetings and open hours sessions online. You
may just drop in and out at any point during the hackfest as you
see fit.
The hackfest features:
* Daily open hours online using Big Blue Button to discuss things
and get support from the KernelCI team
* KernelCI team members available across most time zones to provide
quick feedback
* A curated list of topics and a project board to help set
objectives and coordinate efforts between all contributors
As always, KernelCI is at the service of the kernel community so
please share any feedback you may have to help shape this upcoming
hackfest in the best possible way.
Thanks,
Guillaume
[1] https://foundation.kernelci.org/blog/2021/06/24/the-first-ever-kernelci-hac…
[2] https://www.linuxplumbersconf.org/event/11/page/104-accepted-microconferenc…
On 02/08/2021 10:00, Guillaume Tucker wrote:
> The first KernelCI hackfest[1] early June was successful in getting
> a number of kernel developers to work alongside the core KernelCI
> team. Test coverage was increased in particular with kselftest,
> LTP, KUnit and a new test suite for libcamera. We're now improving
> documentation and tooling to make it easier for anyone to get
> started. Find out more about KernelCI on https://kernelci.org.
>
> The second hackfest is scheduled for the 6th-10th September. It
> should be a good opportunity to start discussing and working on
> upstream kernel testing topics ahead of the Linux Plumbers
> Conference[2].
Please find below some extra information for the KernelCI
Hackfest which is taking place next week. We're expecting at
least some contributors from the Civil Infrastructure Platform
project, the Google Chrome OS kernel team, Collabora kernel
developers and a few more from the wider Linux kernel community.
If you need any direct support, please reply to this email or ask
on kernelci.slack.com or IRC #kernelci (libera.chat).
> Here's the project board where anyone can already add some ideas:
>
> https://github.com/orgs/kernelci/projects/5
In order to add an issue to the workboard, please first create
one in a KernelCI GitHub repository such as kernelci-core:
https://github.com/kernelci/kernelci-core/issues
Each contributor to the hackfest should be added to the
KernelCI "hackers" team, which has permission to edit the
workboard. If you aren't part of this team yet, please ask and
you'll be invited.
Note: Having a GitHub account is not mandatory for taking part in
the hackfest. It's mainly there to facilitate coordination, even
though it is required in order to contribute to the KernelCI
GitHub repositories. Contributions as part of the hackfest may
also be in the kernel tree such as improvements to kselftest,
KUnit or bug fixes, or other test suites such as LTP etc.
> The hackfest features:
>
> * Daily open hours online using Big Blue Button to discuss things
> and get support from the KernelCI team
>
> * KernelCI team members available across most time zones to provide
> quick feedback
>
> * A curated list of topics and a project board to help set
> objectives and coordinate efforts between all contributors
Please see the table below with the proposed daily open hours to
accommodate most time zones:
Region Zone Time 1 Time 2
East Asia GMT+10 17:00-19:00 03:00-05:00
Europe GMT+2 09:00-11:00 19:00-21:00
UTC 07:00-09:00 17:00-19:00
West America GMT-7 00:00-02:00 10:00-12:00
They will be held as a Big Blue Button virtual conference with
the same URL as the last hackfest. It's not being shared
publicly to avoid any potential abuse, so please ask if you don't
have it already.
On Monday, the focus should be put on getting started and
reviewing the backlog on the hackfest workboard to distribute
things among people or help new contributors find topics suitable
for them. Open hours are otherwise opportunities to get more
direct support from the KernelCI team or discuss any topic.
See you there!
Best wishes,
Guillaume
> [1] https://foundation.kernelci.org/blog/2021/06/24/the-first-ever-kernelci-hac…
> [2] https://www.linuxplumbersconf.org/event/11/page/104-accepted-microconferenc…
Hi Linus,
Please pull the following KUnit update for Linux 5.15-rc1.
This KUnit update for Linux 5.15-rc1 adds new features and tests:
tool:
-- support for --kernel_args to allow setting module params
-- support for --raw_output option to show just the kunit output during
make
tests:
-- KUnit tests for checksums and timestamps
-- Print test statistics on failure
-- Integrates UBSAN into the KUnit testing framework.
It fails KUnit tests whenever it reports undefined behavior.
diff is attached.
thanks,
-- Shuah
----------------------------------------------------------------
The following changes since commit 2734d6c1b1a089fb593ef6a23d4b70903526fe0c:
Linux 5.14-rc2 (2021-07-18 14:13:49 -0700)
are available in the Git repository at:
git://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux-kselftest tags/linux-kselftest-kunit-5.15-rc1
for you to fetch changes up to acd8e8407b8fcc3229d6d8558cac338bea801aed:
kunit: Print test statistics on failure (2021-08-13 13:38:31 -0600)
----------------------------------------------------------------
linux-kselftest-kunit-5.15-rc1
This KUnit update for Linux 5.15-rc1 adds new features and tests:
tool:
-- support for --kernel_args to allow setting module params
-- support for --raw_output option to show just the kunit output during
make
tests:
-- KUnit tests for checksums and timestamps
-- Print test statistics on failure
-- Integrates UBSAN into the KUnit testing framework.
It fails KUnit tests whenever it reports undefined behavior.
----------------------------------------------------------------
Daniel Latypov (2):
kunit: tool: add --kernel_args to allow setting module params
kunit: tool: make --raw_output support only showing kunit output
David Gow (2):
fat: Add KUnit tests for checksums and timestamps
kunit: Print test statistics on failure
Uriel Guajardo (1):
kunit: ubsan integration
Documentation/dev-tools/kunit/kunit-tool.rst | 9 +-
Documentation/dev-tools/kunit/running_tips.rst | 10 ++
fs/fat/.kunitconfig | 5 +
fs/fat/Kconfig | 14 +-
fs/fat/Makefile | 2 +
fs/fat/fat_test.c | 196 +++++++++++++++++++++++++
fs/fat/misc.c | 3 +
lib/kunit/test.c | 109 ++++++++++++++
lib/ubsan.c | 3 +
tools/testing/kunit/kunit.py | 36 +++--
tools/testing/kunit/kunit_parser.py | 6 +-
tools/testing/kunit/kunit_tool_test.py | 29 +++-
12 files changed, 398 insertions(+), 24 deletions(-)
create mode 100644 fs/fat/.kunitconfig
create mode 100644 fs/fat/fat_test.c
----------------------------------------------------------------
Patch 1 fixes a KVM+rseq bug where KVM's handling of TIF_NOTIFY_RESUME,
e.g. for task migration, clears the flag without informing rseq and leads
to stale data in userspace's rseq struct.
Patch 2 is a cleanup to try and make future bugs less likely. It's also
a baby step towards moving and renaming tracehook_notify_resume() since
it has nothing to do with tracing.
Patch 3 is a fix/cleanup to stop overriding x86's unistd_{32,64}.h when
the include path (intentionally) omits tools' uapi headers. KVM's
selftests do exactly that so that they can pick up the uapi headers from
the installed kernel headers, and still use various tools/ headers that
mirror kernel code, e.g. linux/types.h. This allows the new test in
patch 4 to reference __NR_rseq without having to manually define it.
Patch 4 is a regression test for the KVM+rseq bug.
Patch 5 is a cleanup made possible by patch 3.
Based on commit 835d31d319d9 ("Merge tag 'media/v5.15-1' of ...").
v3:
- Collect Ack/Review. [Mathieu, Ben]
- Add explicit smp_wmb() instead of relying on atomic_inc() to do a full
barrier. [Mathieu]
- Add lots and lots of comments in the selftest, especially around why
the migration thread needs a udelay(). [Mathieu]
- Delay between 1us and 10us to reduce the odds of having a hard
dependency on arch/kernel behavior. [Mathieu]
- Dropped an s390 change in patch 2 after a rebase to upstream master.
v2:
- https://lkml.kernel.org/r/20210820225002.310652-1-seanjc@google.com
- Don't touch rseq_cs when handling KVM case so that rseq_syscall() will
still detect a naughty userspace. [Mathieu]
- Use a sequence counter + retry in the test to ensure the process isn't
migrated between sched_getcpu() and reading rseq.cpu_id, i.e. to
avoid a flaky test. [Mathieu]
- Add Mathieu's ack for patch 2.
- Add more comments in the test.
v1: https://lkml.kernel.org/r/20210818001210.4073390-1-seanjc@google.com
Sean Christopherson (5):
KVM: rseq: Update rseq when processing NOTIFY_RESUME on xfer to KVM
guest
entry: rseq: Call rseq_handle_notify_resume() in
tracehook_notify_resume()
tools: Move x86 syscall number fallbacks to .../uapi/
KVM: selftests: Add a test for KVM_RUN+rseq to detect task migration
bugs
KVM: selftests: Remove __NR_userfaultfd syscall fallback
arch/arm/kernel/signal.c | 1 -
arch/arm64/kernel/signal.c | 1 -
arch/csky/kernel/signal.c | 4 +-
arch/mips/kernel/signal.c | 4 +-
arch/powerpc/kernel/signal.c | 4 +-
include/linux/tracehook.h | 2 +
kernel/entry/common.c | 4 +-
kernel/rseq.c | 14 +-
.../x86/include/{ => uapi}/asm/unistd_32.h | 0
.../x86/include/{ => uapi}/asm/unistd_64.h | 3 -
tools/testing/selftests/kvm/.gitignore | 1 +
tools/testing/selftests/kvm/Makefile | 3 +
tools/testing/selftests/kvm/rseq_test.c | 236 ++++++++++++++++++
13 files changed, 257 insertions(+), 20 deletions(-)
rename tools/arch/x86/include/{ => uapi}/asm/unistd_32.h (100%)
rename tools/arch/x86/include/{ => uapi}/asm/unistd_64.h (83%)
create mode 100644 tools/testing/selftests/kvm/rseq_test.c
--
2.33.0.153.gba50c8fa24-goog
Update to kunit_parser to improve compatibility with KTAP
specification including arbitrarily nested tests. Patch accomplishes
three major changes:
- Use a general Test object to represent all tests rather than TestCase
and TestSuite objects. This allows for easier implementation of arbitrary
levels of nested tests and promotes the idea that both test suites and test
cases are tests.
- Print errors incrementally rather than all at once after the
parsing finishes to maximize information given to the user in the
case of the parser given invalid input and to increase the helpfulness
of the timestamps given during printing. Note that kunit.py parse does
not print incrementally yet. However, this fix brings us closer to
this feature.
- Increase compatibility for different formats of input. Arbitrary levels
of nested tests supported. Also, test cases and test suites are now
supported to be present on the same level of testing.
This patch now implements the KTAP specification as described here:
https://lore.kernel.org/linux-kselftest/CA+GJov6tdjvY9x12JsJT14qn6c7NViJxqa….
This patch adjusts the kunit_tool_test.py file to check for
the correct outputs from the new parser and adds a new test to check
the parsing for a KTAP result log with correct format for multiple nested
subtests (test_is_test_passed-all_passed_nested.log).
This patch also alters the kunit_json.py file to allow for arbitrarily
nested tests.
Signed-off-by: Rae Moar <rmoar(a)google.com>
Reviewed-by: Brendan Higgins <brendanhiggins(a)google.com>
---
Change log from v2:
https://lore.kernel.org/linux-kselftest/20210826195505.3066755-1-rmoar@goog…
- Fixes bug of type disagreement in kunit_json.py for build_dir
- Removes raw_output()
- Changes docstrings in kunit_parser.py (class docstring, LineStream
docstrings, add_error(), total(), get_status(), all parsing methods)
- Fixes bug of not printing diagnostic log in the case of end of lines
- Sets default status of all tests to TEST_CRASHED
- Adds and prints empty tests with crashed status in case of missing
tests
- Prints 'subtest' in instance of 1 subtest instead of 'subtests'
- Includes checking for 'BUG:' message in search of crash messages in
log (note that parse_crash_in_log method could be removed but would
require deleting tests in kunit_tool_test.py that include the crash
message that is no longer used. If removed, parser would still print
log in cases of test crashed or failure, which would now include
missing subtests)
- Fixes bug of including directives (other than SKIP) in test name
when matching name in result line for subtests
---
Change log from v1:
https://lore.kernel.org/linux-kselftest/20210820200032.2178134-1-rmoar@goog…
- Rebase onto kselftest/kunit branch
- Add tests to kunit_tool_test.py to check parser is correctly stripping
hyphen, producing correct json objects with nested tests, correctly
passing kselftest TAP output, and correctly deals with missing test plan.
- Fix bug to correctly match test name in instance of a missing test plan.
- Fix bug in kunit_tool_test.py pointed out by Daniel where it was not
correctly checking for a proper match to the '0 tests run!' error
message. Reverts changes back to original.
- A few minor changes to commit message using Daniel's comments.
- Change docstrings using Daniel's comments to reduce:
- Shortens some docstrings to be one-line or just description if it is
self explanatory.
- Remove explicit respecification of types of parameters and returns
because this is already specified in the function annoations. However,
some descriptions of the parameters and returns remain and some contain
the type for context. Additionally, the types of public attributes of
classes remain.
- Remove any documentation of 'Return: None'
- Remove docstrings of helper methods within other methods
---
tools/testing/kunit/kunit_json.py | 56 +-
tools/testing/kunit/kunit_parser.py | 1022 ++++++++++++-----
tools/testing/kunit/kunit_tool_test.py | 132 ++-
.../test_is_test_passed-all_passed_nested.log | 34 +
.../test_is_test_passed-kselftest.log | 14 +
.../test_is_test_passed-missing_plan.log | 31 +
.../kunit/test_data/test_strip_hyphen.log | 16 +
7 files changed, 925 insertions(+), 380 deletions(-)
create mode 100644 tools/testing/kunit/test_data/test_is_test_passed-all_passed_nested.log
create mode 100644 tools/testing/kunit/test_data/test_is_test_passed-kselftest.log
create mode 100644 tools/testing/kunit/test_data/test_is_test_passed-missing_plan.log
create mode 100644 tools/testing/kunit/test_data/test_strip_hyphen.log
diff --git a/tools/testing/kunit/kunit_json.py b/tools/testing/kunit/kunit_json.py
index f5cca5c38cac..746bec72b9ac 100644
--- a/tools/testing/kunit/kunit_json.py
+++ b/tools/testing/kunit/kunit_json.py
@@ -11,47 +11,47 @@ import os
import kunit_parser
-from kunit_parser import TestStatus
-
-def get_json_result(test_result, def_config, build_dir, json_path) -> str:
- sub_groups = []
-
- # Each test suite is mapped to a KernelCI sub_group
- for test_suite in test_result.suites:
- sub_group = {
- "name": test_suite.name,
- "arch": "UM",
- "defconfig": def_config,
- "build_environment": build_dir,
- "test_cases": [],
- "lab_name": None,
- "kernel": None,
- "job": None,
- "git_branch": "kselftest",
- }
- test_cases = []
- # TODO: Add attachments attribute in test_case with detailed
- # failure message, see https://api.kernelci.org/schema-test-case.html#get
- for case in test_suite.cases:
- test_case = {"name": case.name, "status": "FAIL"}
- if case.status == TestStatus.SUCCESS:
+from kunit_parser import Test, TestResult, TestStatus
+from typing import Any, Dict, Optional
+
+JsonObj = Dict[str, Any]
+
+def _get_group_json(test: Test, def_config: str,
+ build_dir: Optional[str]) -> JsonObj:
+ sub_groups = [] # List[JsonObj]
+ test_cases = [] # List[JsonObj]
+
+ for subtest in test.subtests:
+ if len(subtest.subtests):
+ sub_group = _get_group_json(subtest, def_config,
+ build_dir)
+ sub_groups.append(sub_group)
+ else:
+ test_case = {"name": subtest.name, "status": "FAIL"}
+ if subtest.status == TestStatus.SUCCESS:
test_case["status"] = "PASS"
- elif case.status == TestStatus.TEST_CRASHED:
+ elif subtest.status == TestStatus.TEST_CRASHED:
test_case["status"] = "ERROR"
test_cases.append(test_case)
- sub_group["test_cases"] = test_cases
- sub_groups.append(sub_group)
+
test_group = {
- "name": "KUnit Test Group",
+ "name": test.name,
"arch": "UM",
"defconfig": def_config,
"build_environment": build_dir,
"sub_groups": sub_groups,
+ "test_cases": test_cases,
"lab_name": None,
"kernel": None,
"job": None,
"git_branch": "kselftest",
}
+ return test_group
+
+def get_json_result(test_result: TestResult, def_config: str,
+ build_dir: Optional[str], json_path: str) -> str:
+ test_group = _get_group_json(test_result.test, def_config, build_dir)
+ test_group["name"] = "KUnit Test Group"
json_obj = json.dumps(test_group, indent=4)
if json_path != 'stdout':
with open(json_path, 'w') as result_path:
diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py
index 6310a641b151..f1b28def3e78 100644
--- a/tools/testing/kunit/kunit_parser.py
+++ b/tools/testing/kunit/kunit_parser.py
@@ -1,11 +1,15 @@
# SPDX-License-Identifier: GPL-2.0
#
-# Parses test results from a kernel dmesg log.
+# Parses KTAP test results from a kernel dmesg log and incrementally prints
+# results with reader-friendly format. Stores and returns test results in a
+# Test object.
#
# Copyright (C) 2019, Google LLC.
# Author: Felix Guo <felixguoxiuping(a)gmail.com>
# Author: Brendan Higgins <brendanhiggins(a)google.com>
+# Author: Rae Moar <rmoar(a)google.com>
+from __future__ import annotations
import re
from collections import namedtuple
@@ -14,33 +18,52 @@ from enum import Enum, auto
from functools import reduce
from typing import Iterable, Iterator, List, Optional, Tuple
-TestResult = namedtuple('TestResult', ['status','suites','log'])
-
-class TestSuite(object):
+TestResult = namedtuple('TestResult', ['status','test','log'])
+
+class Test(object):
+ """
+ A class to represent a test parsed from KTAP results. All KTAP
+ results within a test log are stored in a main Test object as
+ subtests.
+
+ Attributes:
+ status : TestStatus - status of the test
+ name : str - name of the test
+ expected_count : int - expected number of subtests (0 if single
+ test case and None if unknown expected number of subtests)
+ subtests : List[Test] - list of subtests
+ log : List[str] - log of KTAP lines that correspond to the test
+ counts : TestCounts - counts of the test statuses and errors of
+ subtests or of the test itself if the test is a single
+ test case.
+ """
def __init__(self) -> None:
- self.status = TestStatus.SUCCESS
- self.name = ''
- self.cases = [] # type: List[TestCase]
-
- def __str__(self) -> str:
- return 'TestSuite(' + str(self.status) + ',' + self.name + ',' + str(self.cases) + ')'
-
- def __repr__(self) -> str:
- return str(self)
-
-class TestCase(object):
- def __init__(self) -> None:
- self.status = TestStatus.SUCCESS
+ """Creates Test object with default attributes."""
+ self.status = TestStatus.TEST_CRASHED
self.name = ''
+ self.expected_count = 0 # type: Optional[int]
+ self.subtests = [] # type: List[Test]
self.log = [] # type: List[str]
+ self.counts = TestCounts()
def __str__(self) -> str:
- return 'TestCase(' + str(self.status) + ',' + self.name + ',' + str(self.log) + ')'
+ """Returns string representation of a Test class object."""
+ return ('Test(' + str(self.status) + ', ' + self.name +
+ ', ' + str(self.expected_count) + ', ' +
+ str(self.subtests) + ', ' + str(self.log) + ', ' +
+ str(self.counts) + ')')
def __repr__(self) -> str:
+ """Returns string representation of a Test class object."""
return str(self)
+ def add_error(self, error_message: str) -> None:
+ """Records an error that occurred while parsing this test."""
+ self.counts.errors += 1
+ print_error('Test ' + self.name + ': ' + error_message)
+
class TestStatus(Enum):
+ """An enumeration class to represent the status of a test."""
SUCCESS = auto()
FAILURE = auto()
SKIPPED = auto()
@@ -48,381 +71,754 @@ class TestStatus(Enum):
NO_TESTS = auto()
FAILURE_TO_PARSE_TESTS = auto()
+class TestCounts:
+ """
+ Tracks the counts of statuses of all test cases and any errors within
+ a Test.
+
+ Attributes:
+ passed : int - the number of tests that have passed
+ failed : int - the number of tests that have failed
+ crashed : int - the number of tests that have crashed
+ skipped : int - the number of tests that have skipped
+ errors : int - the number of errors in the test and subtests
+ """
+ def __init__(self):
+ """Creates TestCounts object with counts of all test
+ statuses and test errors set to 0.
+ """
+ self.passed = 0
+ self.failed = 0
+ self.crashed = 0
+ self.skipped = 0
+ self.errors = 0
+
+ def __str__(self) -> str:
+ """Returns the string representation of a TestCounts object.
+ """
+ return ('Passed: ' + str(self.passed) +
+ ', Failed: ' + str(self.failed) +
+ ', Crashed: ' + str(self.crashed) +
+ ', Skipped: ' + str(self.skipped) +
+ ', Errors: ' + str(self.errors))
+
+ def total(self) -> int:
+ """Returns the total number of test cases within a test
+ object, where a test case is a test with no subtests.
+ """
+ return (self.passed + self.failed + self.crashed +
+ self.skipped)
+
+ def add_subtest_counts(self, counts: TestCounts) -> None:
+ """
+ Adds the counts of another TestCounts object to the current
+ TestCounts object. Used to add the counts of a subtest to the
+ parent test.
+
+ Parameters:
+ counts - a different TestCounts object whose counts
+ will be added to the counts of the TestCounts object
+ """
+ self.passed += counts.passed
+ self.failed += counts.failed
+ self.crashed += counts.crashed
+ self.skipped += counts.skipped
+ self.errors += counts.errors
+
+ def get_status(self) -> TestStatus:
+ """Returns the aggregated status of a Test using test
+ counts.
+ """
+ if self.crashed:
+ # If one of the subtests crash, the expected status
+ # of the Test is crashed.
+ return TestStatus.TEST_CRASHED
+ elif self.failed:
+ # Otherwise if one of the subtests fail, the
+ # expected status of the Test is failed.
+ return TestStatus.FAILURE
+ elif self.passed:
+ # Otherwise if one of the subtests pass, the
+ # expected status of the Test is passed.
+ return TestStatus.SUCCESS
+ else:
+ # Finally, if none of the subtests have failed,
+ # crashed, or passed, the expected status of the
+ # Test is skipped.
+ return TestStatus.SKIPPED
+
+ def add_status(self, status: TestStatus) -> None:
+ """
+ Increments count of inputted status.
+
+ Parameters:
+ status - status to be added to the TestCounts object
+ """
+ if status == TestStatus.SUCCESS or \
+ status == TestStatus.NO_TESTS:
+ # if status is NO_TESTS the most appropriate
+ # attribute to increment is passed because
+ # the test did not fail, crash or get skipped.
+ self.passed += 1
+ elif status == TestStatus.FAILURE:
+ self.failed += 1
+ elif status == TestStatus.SKIPPED:
+ self.skipped += 1
+ else:
+ self.crashed += 1
+
class LineStream:
- """Provides a peek()/pop() interface over an iterator of (line#, text)."""
+ """
+ A class to represent the lines of kernel output.
+ Provides a peek()/pop() interface over an iterator of
+ (line#, text).
+ """
_lines: Iterator[Tuple[int, str]]
_next: Tuple[int, str]
_done: bool
def __init__(self, lines: Iterator[Tuple[int, str]]):
+ """Creates a new LineStream that wraps the given iterator."""
self._lines = lines
self._done = False
self._next = (0, '')
self._get_next()
def _get_next(self) -> None:
+ """Advances the LineSteam to the next line or sets the _done
+ attribute if the LineStream has reached the end of the lines.
+ """
try:
self._next = next(self._lines)
except StopIteration:
self._done = True
def peek(self) -> str:
+ """Returns the current line, without advancing the LineStream.
+ """
return self._next[1]
def pop(self) -> str:
+ """Returns the current line and advances the LineStream to
+ the next line.
+ """
n = self._next
self._get_next()
return n[1]
def __bool__(self) -> bool:
+ """Returns True if stream has more lines."""
return not self._done
# Only used by kunit_tool_test.py.
def __iter__(self) -> Iterator[str]:
+ """Empties all lines stored in LineStream object into
+ Iterator object and returns the Iterator object.
+ """
while bool(self):
yield self.pop()
def line_number(self) -> int:
+ """Returns the line number of the current line."""
return self._next[0]
-kunit_start_re = re.compile(r'TAP version [0-9]+$')
-kunit_end_re = re.compile('(List of all partitions:|'
- 'Kernel panic - not syncing: VFS:|reboot: System halted)')
+# Parsing helper methods:
+
+KTAP_START = re.compile(r'KTAP version ([0-9]+)$')
+TAP_START = re.compile(r'TAP version ([0-9]+)$')
+KTAP_END = re.compile('(List of all partitions:|'
+ 'Kernel panic - not syncing: VFS:|reboot: System halted)')
def extract_tap_lines(kernel_output: Iterable[str]) -> LineStream:
- def isolate_kunit_output(kernel_output: Iterable[str]) -> Iterator[Tuple[int, str]]:
+ """Extracts KTAP lines from inputted kernel output in LineStream
+ object."""
+ def isolate_ktap_output(kernel_output: Iterable[str]) \
+ -> Iterator[Tuple[int, str]]:
line_num = 0
started = False
for line in kernel_output:
line_num += 1
- line = line.rstrip() # line always has a trailing \n
- if kunit_start_re.search(line):
+ line = line.rstrip() # remove trailing \n
+ if not started and KTAP_START.search(line):
+ # start extracting KTAP lines and set prefix
+ # to number of characters before version line
+ prefix_len = len(
+ line.split('KTAP version')[0])
+ started = True
+ yield line_num, line[prefix_len:]
+ elif not started and TAP_START.search(line):
+ # start extracting KTAP lines and set prefix
+ # to number of characters before version line
prefix_len = len(line.split('TAP version')[0])
started = True
yield line_num, line[prefix_len:]
- elif kunit_end_re.search(line):
+ elif started and KTAP_END.search(line):
+ # stop extracting KTAP lines
break
elif started:
- yield line_num, line[prefix_len:]
- return LineStream(lines=isolate_kunit_output(kernel_output))
-
-DIVIDER = '=' * 60
-
-RESET = '\033[0;0m'
-
-def red(text) -> str:
- return '\033[1;31m' + text + RESET
-
-def yellow(text) -> str:
- return '\033[1;33m' + text + RESET
-
-def green(text) -> str:
- return '\033[1;32m' + text + RESET
-
-def print_with_timestamp(message) -> None:
- print('[%s] %s' % (datetime.now().strftime('%H:%M:%S'), message))
+ # remove prefix and any indention and yield
+ # line with line number
+ line = line[prefix_len:].lstrip()
+ yield line_num, line
+ return LineStream(lines=isolate_ktap_output(kernel_output))
+
+KTAP_VERSIONS = [1]
+TAP_VERSIONS = [13, 14]
+
+def check_version(version_num: int, accepted_versions: List[int],
+ version_type: str, test: Test) -> None:
+ """
+ Adds error to test object if version number is too high or too
+ low.
+
+ Parameters:
+ version_num - The inputted version number from the parsed KTAP or TAP
+ header line
+ accepted_version - List of accepted KTAP or TAP versions
+ version_type - 'KTAP' or 'TAP' depending on the type of
+ version line.
+ test - Test object for current test being parsed
+ """
+ if version_num < min(accepted_versions):
+ test.add_error(version_type +
+ ' version lower than expected!')
+ elif version_num > max(accepted_versions):
+ test.add_error(
+ version_type + ' version higher than expected!')
+
+def parse_ktap_header(lines: LineStream, test: Test) -> bool:
+ """
+ Parses KTAP/TAP header line and checks version number.
+ Returns False if fails to parse KTAP/TAP header line.
+
+ Accepted formats:
+ - 'KTAP version [version number]'
+ - 'TAP version [version number]'
+
+ Parameters:
+ lines - LineStream of KTAP output to parse
+ test - Test object for current test being parsed
+
+ Return:
+ True if successfully parsed KTAP/TAP header line
+ """
+ ktap_match = KTAP_START.match(lines.peek())
+ tap_match = TAP_START.match(lines.peek())
+ if ktap_match:
+ version_num = int(ktap_match.group(1))
+ check_version(version_num, KTAP_VERSIONS, 'KTAP', test)
+ elif tap_match:
+ version_num = int(tap_match.group(1))
+ check_version(version_num, TAP_VERSIONS, 'TAP', test)
+ else:
+ return False
+ test.log.append(lines.pop())
+ return True
-def format_suite_divider(message) -> str:
- return '======== ' + message + ' ========'
+TEST_HEADER = re.compile(r'^# Subtest: (.*)$')
-def print_suite_divider(message) -> None:
- print_with_timestamp(DIVIDER)
- print_with_timestamp(format_suite_divider(message))
+def parse_test_header(lines: LineStream, test: Test) -> bool:
+ """
+ Parses test header and stores test name in test object.
+ Returns False if fails to parse test header line.
-def print_log(log) -> None:
- for m in log:
- print_with_timestamp(m)
+ Accepted format:
+ - '# Subtest: [test name]'
-TAP_ENTRIES = re.compile(r'^(TAP|[\s]*ok|[\s]*not ok|[\s]*[0-9]+\.\.[0-9]+|[\s]*# (Subtest:|.*: kunit test case crashed!)).*$')
+ Parameters:
+ lines - LineStream of ktap output to parse
+ test - Test object for current test being parsed
-def consume_non_diagnostic(lines: LineStream) -> None:
- while lines and not TAP_ENTRIES.match(lines.peek()):
- lines.pop()
-
-def save_non_diagnostic(lines: LineStream, test_case: TestCase) -> None:
- while lines and not TAP_ENTRIES.match(lines.peek()):
- test_case.log.append(lines.peek())
- lines.pop()
+ Return:
+ True if successfully parsed test header line
+ """
+ match = TEST_HEADER.match(lines.peek())
+ if not match:
+ return False
+ test.log.append(lines.pop())
+ test.name = match.group(1)
+ return True
-OkNotOkResult = namedtuple('OkNotOkResult', ['is_ok','description', 'text'])
+TEST_PLAN = re.compile(r'1\.\.([0-9]+)')
-OK_NOT_OK_SKIP = re.compile(r'^[\s]*(ok|not ok) [0-9]+ - (.*) # SKIP(.*)$')
+def parse_test_plan(lines: LineStream, test: Test) -> bool:
+ """
+ Parses test plan line and stores the expected number of subtests in
+ test object. Reports an error if expected count is 0.
+ Returns False and reports missing test plan error if fails to parse
+ test plan.
-OK_NOT_OK_SUBTEST = re.compile(r'^[\s]+(ok|not ok) [0-9]+ - (.*)$')
+ Accepted format:
+ - '1..[number of subtests]'
-OK_NOT_OK_MODULE = re.compile(r'^(ok|not ok) ([0-9]+) - (.*)$')
+ Parameters:
+ lines - LineStream of ktap output to parse
+ test - Test object for current test being parsed
-def parse_ok_not_ok_test_case(lines: LineStream, test_case: TestCase) -> bool:
- save_non_diagnostic(lines, test_case)
- if not lines:
- test_case.status = TestStatus.TEST_CRASHED
- return True
- line = lines.peek()
- match = OK_NOT_OK_SUBTEST.match(line)
- while not match and lines:
- line = lines.pop()
- match = OK_NOT_OK_SUBTEST.match(line)
- if match:
- test_case.log.append(lines.pop())
- test_case.name = match.group(2)
- skip_match = OK_NOT_OK_SKIP.match(line)
- if skip_match:
- test_case.status = TestStatus.SKIPPED
- return True
- if test_case.status == TestStatus.TEST_CRASHED:
- return True
- if match.group(1) == 'ok':
- test_case.status = TestStatus.SUCCESS
- else:
- test_case.status = TestStatus.FAILURE
- return True
- else:
+ Return:
+ True if successfully parsed test plan line
+ """
+ match = TEST_PLAN.match(lines.peek())
+ if not match:
+ test.expected_count = None
+ test.add_error('missing plan line!')
return False
-
-SUBTEST_DIAGNOSTIC = re.compile(r'^[\s]+# (.*)$')
-DIAGNOSTIC_CRASH_MESSAGE = re.compile(r'^[\s]+# .*?: kunit test case crashed!$')
-
-def parse_diagnostic(lines: LineStream, test_case: TestCase) -> bool:
- save_non_diagnostic(lines, test_case)
- if not lines:
+ test.log.append(lines.pop())
+ expected_count = int(match.group(1))
+ test.expected_count = expected_count
+ if expected_count == 0:
+ test.status = TestStatus.NO_TESTS
+ test.add_error('0 tests run!')
+ return True
+
+TEST_RESULT = re.compile(r'^(ok|not ok) ([0-9]+) (- )?([^#]*)( # .*)?$')
+
+TEST_RESULT_SKIP = re.compile(r'^(ok|not ok) ([0-9]+) (- )?(.*) # SKIP(.*)$')
+
+def peek_test_name_match(lines: LineStream, test: Test) -> bool:
+ """
+ Matches current line with the format of a test result line and checks
+ if the name matches the name of the current test.
+ Returns False if fails to match format or name.
+
+ Accepted format:
+ - '[ok|not ok] [test number] [-] [test name] [optional skip
+ directive]'
+
+ Parameters:
+ lines - LineStream of KTAP output to parse
+ test - Test object for current test being parsed
+
+ Return:
+ True if matched a test result line and the name matching the
+ expected test name
+ """
+ line = lines.peek()
+ match = TEST_RESULT.match(line)
+ if not match:
return False
+ name = match.group(4)
+ return (name == test.name)
+
+def parse_test_result(lines: LineStream, test: Test,
+ expected_num: int) -> bool:
+ """
+ Parses test result line and stores the status and name in the test
+ object. Reports an error if the test number does not match expected
+ test number.
+ Returns False if fails to parse test result line.
+
+ Note that the SKIP directive is the only direction that causes a
+ change in status.
+
+ Accepted format:
+ - '[ok|not ok] [test number] [-] [test name] [optional skip
+ directive]'
+
+ Parameters:
+ lines - LineStream of KTAP output to parse
+ test - Test object for current test being parsed
+ expected_num - expected test number for current test
+
+ Return:
+ True if successfully parsed a test result line.
+ """
line = lines.peek()
- match = SUBTEST_DIAGNOSTIC.match(line)
- if match:
- test_case.log.append(lines.pop())
- crash_match = DIAGNOSTIC_CRASH_MESSAGE.match(line)
- if crash_match:
- test_case.status = TestStatus.TEST_CRASHED
- return True
- else:
+ match = TEST_RESULT.match(line)
+ skip_match = TEST_RESULT_SKIP.match(line)
+
+ # Check if line matches test result line format
+ if not match:
return False
+ test.log.append(lines.pop())
-def parse_test_case(lines: LineStream) -> Optional[TestCase]:
- test_case = TestCase()
- save_non_diagnostic(lines, test_case)
- while parse_diagnostic(lines, test_case):
- pass
- if parse_ok_not_ok_test_case(lines, test_case):
- return test_case
+ # Set name of test object
+ if skip_match:
+ test.name = skip_match.group(4)
else:
- return None
-
-SUBTEST_HEADER = re.compile(r'^[\s]+# Subtest: (.*)$')
-
-def parse_subtest_header(lines: LineStream) -> Optional[str]:
- consume_non_diagnostic(lines)
- if not lines:
- return None
- match = SUBTEST_HEADER.match(lines.peek())
- if match:
- lines.pop()
- return match.group(1)
+ test.name = match.group(4)
+
+ # Check test num
+ num = int(match.group(2))
+ if num != expected_num:
+ test.add_error('Expected test number ' +
+ str(expected_num) + ' but found ' + str(num))
+
+ # Set status of test object
+ status = match.group(1)
+ if skip_match:
+ test.status = TestStatus.SKIPPED
+ elif status == 'ok':
+ test.status = TestStatus.SUCCESS
else:
- return None
+ test.status = TestStatus.FAILURE
+ return True
+
+def parse_diagnostic(lines: LineStream) -> List[str]:
+ """
+ Parse lines that do not match the format of a test result line or
+ test header line and returns them in list.
+
+ Line formats that are not parsed:
+ - '# Subtest: [test name]'
+ - '[ok|not ok] [test number] [-] [test name] [optional skip
+ directive]'
+
+ Parameters:
+ lines - LineStream of KTAP output to parse
+
+ Return:
+ Log of diagnostic lines
+ """
+ log = [] # type: List[str]
+ while lines and not TEST_RESULT.match(lines.peek()) and not \
+ TEST_HEADER.match(lines.peek()):
+ log.append(lines.pop())
+ return log
+
+DIAGNOSTIC_CRASH_MESSAGE = re.compile(
+ r'^(BUG:|# .*?: kunit test case crashed!$)')
+
+def parse_crash_in_log(test: Test) -> bool:
+ """
+ Iterate through the lines of the log to parse for crash message.
+ If crash message found, set status to crashed and return True.
+ Otherwise return False.
+
+ Parameters:
+ test - Test object for current test being parsed
+
+ Return:
+ True if crash message found in log
+ """
+ for line in test.log:
+ if DIAGNOSTIC_CRASH_MESSAGE.match(line):
+ test.status = TestStatus.TEST_CRASHED
+ return True
+ return False
-SUBTEST_PLAN = re.compile(r'[\s]+[0-9]+\.\.([0-9]+)')
-def parse_subtest_plan(lines: LineStream) -> Optional[int]:
- consume_non_diagnostic(lines)
- match = SUBTEST_PLAN.match(lines.peek())
- if match:
- lines.pop()
- return int(match.group(1))
- else:
- return None
-
-def max_status(left: TestStatus, right: TestStatus) -> TestStatus:
- if left == right:
- return left
- elif left == TestStatus.TEST_CRASHED or right == TestStatus.TEST_CRASHED:
- return TestStatus.TEST_CRASHED
- elif left == TestStatus.FAILURE or right == TestStatus.FAILURE:
- return TestStatus.FAILURE
- elif left == TestStatus.SKIPPED:
- return right
- else:
- return left
+# Printing helper methods:
-def parse_ok_not_ok_test_suite(lines: LineStream,
- test_suite: TestSuite,
- expected_suite_index: int) -> bool:
- consume_non_diagnostic(lines)
- if not lines:
- test_suite.status = TestStatus.TEST_CRASHED
- return False
- line = lines.peek()
- match = OK_NOT_OK_MODULE.match(line)
- if match:
- lines.pop()
- if match.group(1) == 'ok':
- test_suite.status = TestStatus.SUCCESS
- else:
- test_suite.status = TestStatus.FAILURE
- skip_match = OK_NOT_OK_SKIP.match(line)
- if skip_match:
- test_suite.status = TestStatus.SKIPPED
- suite_index = int(match.group(2))
- if suite_index != expected_suite_index:
- print_with_timestamp(
- red('[ERROR] ') + 'expected_suite_index ' +
- str(expected_suite_index) + ', but got ' +
- str(suite_index))
- return True
- else:
- return False
+DIVIDER = '=' * 60
-def bubble_up_errors(status_list: Iterable[TestStatus]) -> TestStatus:
- return reduce(max_status, status_list, TestStatus.SKIPPED)
+RESET = '\033[0;0m'
-def bubble_up_test_case_errors(test_suite: TestSuite) -> TestStatus:
- max_test_case_status = bubble_up_errors(x.status for x in test_suite.cases)
- return max_status(max_test_case_status, test_suite.status)
+def red(text: str) -> str:
+ """Returns inputted string with red color code."""
+ return '\033[1;31m' + text + RESET
-def parse_test_suite(lines: LineStream, expected_suite_index: int) -> Optional[TestSuite]:
- if not lines:
- return None
- consume_non_diagnostic(lines)
- test_suite = TestSuite()
- test_suite.status = TestStatus.SUCCESS
- name = parse_subtest_header(lines)
- if not name:
- return None
- test_suite.name = name
- expected_test_case_num = parse_subtest_plan(lines)
- if expected_test_case_num is None:
- return None
- while expected_test_case_num > 0:
- test_case = parse_test_case(lines)
- if not test_case:
- break
- test_suite.cases.append(test_case)
- expected_test_case_num -= 1
- if parse_ok_not_ok_test_suite(lines, test_suite, expected_suite_index):
- test_suite.status = bubble_up_test_case_errors(test_suite)
- return test_suite
- elif not lines:
- print_with_timestamp(red('[ERROR] ') + 'ran out of lines before end token')
- return test_suite
- else:
- print(f'failed to parse end of suite "{name}", at line {lines.line_number()}: {lines.peek()}')
- return None
+def yellow(text: str) -> str:
+ """Returns inputted string with yellow color code."""
+ return '\033[1;33m' + text + RESET
-TAP_HEADER = re.compile(r'^TAP version 14$')
+def green(text: str) -> str:
+ """Returns inputted string with green color code."""
+ return '\033[1;32m' + text + RESET
-def parse_tap_header(lines: LineStream) -> bool:
- consume_non_diagnostic(lines)
- if TAP_HEADER.match(lines.peek()):
- lines.pop()
- return True
- else:
- return False
+ANSI_LEN = len(red(''))
-TEST_PLAN = re.compile(r'[0-9]+\.\.([0-9]+)')
+def print_with_timestamp(message: str) -> None:
+ """Prints message with timestamp at beginning."""
+ print('[%s] %s' % (datetime.now().strftime('%H:%M:%S'), message))
-def parse_test_plan(lines: LineStream) -> Optional[int]:
- consume_non_diagnostic(lines)
- match = TEST_PLAN.match(lines.peek())
- if match:
- lines.pop()
- return int(match.group(1))
- else:
- return None
-
-def bubble_up_suite_errors(test_suites: Iterable[TestSuite]) -> TestStatus:
- return bubble_up_errors(x.status for x in test_suites)
-
-def parse_test_result(lines: LineStream) -> TestResult:
- consume_non_diagnostic(lines)
- if not lines or not parse_tap_header(lines):
- return TestResult(TestStatus.FAILURE_TO_PARSE_TESTS, [], lines)
- expected_test_suite_num = parse_test_plan(lines)
- if expected_test_suite_num == 0:
- return TestResult(TestStatus.NO_TESTS, [], lines)
- elif expected_test_suite_num is None:
- return TestResult(TestStatus.FAILURE_TO_PARSE_TESTS, [], lines)
- test_suites = []
- for i in range(1, expected_test_suite_num + 1):
- test_suite = parse_test_suite(lines, i)
- if test_suite:
- test_suites.append(test_suite)
+def format_test_divider(message: str, len_message: int) -> str:
+ """
+ Returns string with message centered in fixed width divider.
+
+ Example:
+ '===================== message example ====================='
+
+ Parameters:
+ message - message to be centered in divider line
+ len_message - length of the message to be printed such that
+ any characters of the color codes are not counted
+
+ Return:
+ String containing message centered in fixed width divider
+ """
+ default_count = 3 # default number of dashes
+ len_1 = default_count
+ len_2 = default_count
+ difference = len(DIVIDER) - len_message - 2 # 2 spaces added
+ if difference > 0:
+ # calculate number of dashes for each side of the divider
+ len_1 = int(difference / 2)
+ len_2 = difference - len_1
+ return ('=' * len_1) + ' ' + message + ' ' + ('=' * len_2)
+
+def print_test_header(test: Test) -> None:
+ """
+ Prints test header with test name and optionally the expected number
+ of subtests.
+
+ Example:
+ '=================== example (2 subtests) ==================='
+
+ Parameters:
+ test - Test object representing current test being printed
+ """
+ message = test.name
+ if test.expected_count:
+ if test.expected_count == 1:
+ message += (' (' + str(test.expected_count) +
+ ' subtest)')
else:
- print_with_timestamp(
- red('[ERROR] ') + ' expected ' +
- str(expected_test_suite_num) +
- ' test suites, but got ' + str(i - 2))
- break
- test_suite = parse_test_suite(lines, -1)
- if test_suite:
- print_with_timestamp(red('[ERROR] ') +
- 'got unexpected test suite: ' + test_suite.name)
- if test_suites:
- return TestResult(bubble_up_suite_errors(test_suites), test_suites, lines)
- else:
- return TestResult(TestStatus.NO_TESTS, [], lines)
+ message += (' (' + str(test.expected_count) +
+ ' subtests)')
+ print_with_timestamp(format_test_divider(message, len(message)))
-class TestCounts:
- passed: int
- failed: int
- crashed: int
- skipped: int
+def print_log(log: Iterable[str]) -> None:
+ """
+ Prints all strings in saved log for test in yellow.
- def __init__(self):
- self.passed = 0
- self.failed = 0
- self.crashed = 0
- self.skipped = 0
-
- def total(self) -> int:
- return self.passed + self.failed + self.crashed + self.skipped
-
-def print_and_count_results(test_result: TestResult) -> TestCounts:
- counts = TestCounts()
- for test_suite in test_result.suites:
- if test_suite.status == TestStatus.SUCCESS:
- print_suite_divider(green('[PASSED] ') + test_suite.name)
- elif test_suite.status == TestStatus.SKIPPED:
- print_suite_divider(yellow('[SKIPPED] ') + test_suite.name)
- elif test_suite.status == TestStatus.TEST_CRASHED:
- print_suite_divider(red('[CRASHED] ' + test_suite.name))
- else:
- print_suite_divider(red('[FAILED] ') + test_suite.name)
- for test_case in test_suite.cases:
- if test_case.status == TestStatus.SUCCESS:
- counts.passed += 1
- print_with_timestamp(green('[PASSED] ') + test_case.name)
- elif test_case.status == TestStatus.SKIPPED:
- counts.skipped += 1
- print_with_timestamp(yellow('[SKIPPED] ') + test_case.name)
- elif test_case.status == TestStatus.TEST_CRASHED:
- counts.crashed += 1
- print_with_timestamp(red('[CRASHED] ' + test_case.name))
- print_log(map(yellow, test_case.log))
- print_with_timestamp('')
+ Parameters:
+ log - Iterable object with all strings saved in log for test
+ """
+ for m in log:
+ print_with_timestamp(yellow(m))
+
+def format_test_result(test: Test) -> str:
+ """
+ Returns string with formatted test result with colored status and test
+ name.
+
+ Example:
+ '[PASSED] example'
+
+ Parameters:
+ test - Test object representing current test being printed
+
+ Return:
+ String containing formatted test result
+ """
+ if test.status == TestStatus.SUCCESS:
+ return (green('[PASSED] ') + test.name)
+ elif test.status == TestStatus.SKIPPED:
+ return (yellow('[SKIPPED] ') + test.name)
+ elif test.status == TestStatus.TEST_CRASHED:
+ print_log(test.log)
+ return (red('[CRASHED] ') + test.name)
+ else:
+ print_log(test.log)
+ return (red('[FAILED] ') + test.name)
+
+def print_test_result(test: Test) -> None:
+ """
+ Prints result line with status of test.
+
+ Example:
+ '[PASSED] example'
+
+ Parameters:
+ test - Test object representing current test being printed
+ """
+ print_with_timestamp(format_test_result(test))
+
+def print_test_footer(test: Test) -> None:
+ """
+ Prints test footer with status of test.
+
+ Example:
+ '===================== [PASSED] example ====================='
+
+ Parameters:
+ test - Test object representing current test being printed
+ """
+ message = format_test_result(test)
+ print_with_timestamp(format_test_divider(message,
+ len(message) - ANSI_LEN))
+
+def print_summary_line(test: Test) -> None:
+ """
+ Prints summary line of test object. Color of line is dependent on
+ status of test. Color is green if test passes, yellow if test is
+ skipped, and red if the test fails or crashes. Summary line contains
+ counts of the statuses of the tests subtests or the test itself if it
+ has no subtests.
+
+ Example:
+ "Testing complete. Passed: 2, Failed: 0, Crashed: 0, Skipped: 0,
+ Errors: 0"
+
+ test - Test object representing current test being printed
+ """
+ if test.status == TestStatus.SUCCESS or \
+ test.status == TestStatus.NO_TESTS:
+ color = green
+ elif test.status == TestStatus.SKIPPED:
+ color = yellow
+ else:
+ color = red
+ counts = test.counts
+ print_with_timestamp(color('Testing complete. ' + str(counts)))
+
+def print_error(error_message: str) -> None:
+ """
+ Prints error message with error format.
+
+ Example:
+ "[ERROR] Test example: missing test plan!"
+
+ Parameters:
+ error_message - message describing error
+ """
+ print_with_timestamp(red('[ERROR] ') + error_message)
+
+# Other methods:
+
+def bubble_up_test_results(test: Test) -> None:
+ """
+ If the test has subtests, add the test counts of the subtests to the
+ test and check if any of the tests crashed and if so set the test
+ status to crashed. Otherwise if the test has no subtests add the
+ status of the test to the test counts.
+
+ Parameters:
+ test - Test object for current test being parsed
+ """
+ parse_crash_in_log(test)
+ subtests = test.subtests
+ counts = test.counts
+ status = test.status
+ for t in subtests:
+ counts.add_subtest_counts(t.counts)
+ if counts.total() == 0:
+ counts.add_status(status)
+ elif test.counts.get_status() == TestStatus.TEST_CRASHED:
+ test.status = TestStatus.TEST_CRASHED
+
+def parse_test(lines: LineStream, expected_num: int, log: List[str]) -> Test:
+ """
+ Finds next test to parse in LineStream, creates new Test object,
+ parses any subtests of the test, populates Test object with all
+ information (status, name) about the test and the Test objects for
+ any subtests, and then returns the Test object. The method accepts
+ three formats of tests:
+
+ Accepted test formats:
+
+ - Main KTAP/TAP header
+
+ Example:
+
+ KTAP version 1
+ 1..4
+ [subtests]
+
+ - Subtest header line
+
+ Example:
+
+ # Subtest: name
+ 1..3
+ [subtests]
+ ok 1 name
+
+ - Test result line
+
+ Example:
+
+ ok 1 - test
+
+ Parameters:
+ lines - LineStream of KTAP output to parse
+ expected_num - expected test number for test to be parsed
+ log - list of strings containing any preceding diagnostic lines
+ corresponding to the current test
+
+ Return:
+ Test object populated with characteristics and any subtests
+ """
+ test = Test()
+ test.log.extend(log)
+ parent_test = False
+ main = parse_ktap_header(lines, test)
+ if main:
+ # If KTAP/TAP header is found, attempt to parse
+ # test plan
+ test.name = "main"
+ parse_test_plan(lines, test)
+ else:
+ # If KTAP/TAP header is not found, test must be subtest
+ # header or test result line so parse attempt to parser
+ # subtest header
+ parent_test = parse_test_header(lines, test)
+ if parent_test:
+ # If subtest header is found, attempt to parse
+ # test plan and print header
+ parse_test_plan(lines, test)
+ print_test_header(test)
+ expected_count = test.expected_count
+ subtests = []
+ test_num = 1
+ while expected_count is None or test_num <= expected_count:
+ # Loop to parse any subtests.
+ # Break after parsing expected number of tests or
+ # if expected number of tests is unknown break when test
+ # result line with matching name to subtest header is found
+ # or no more lines in stream.
+ sub_log = parse_diagnostic(lines)
+ sub_test = Test()
+ if not lines or (peek_test_name_match(lines, test) and
+ not main):
+ if expected_count and test_num <= expected_count:
+ # If parser reaches end of test before
+ # parsing expected number of subtests, print
+ # crashed subtest and record error
+ test.add_error('missing expected subtest!')
+ sub_test.log.extend(sub_log)
+ test.counts.add_status(
+ TestStatus.TEST_CRASHED)
+ print_test_result(sub_test)
else:
- counts.failed += 1
- print_with_timestamp(red('[FAILED] ') + test_case.name)
- print_log(map(yellow, test_case.log))
- print_with_timestamp('')
- return counts
+ test.log.extend(sub_log)
+ break
+ else:
+ sub_test = parse_test(lines, test_num, sub_log)
+ subtests.append(sub_test)
+ test_num += 1
+ test.subtests = subtests
+ if not main:
+ # If not main test, look for test result line
+ test.log.extend(parse_diagnostic(lines))
+ if (parent_test and peek_test_name_match(lines, test)) or \
+ not parent_test:
+ parse_test_result(lines, test, expected_num)
+ else:
+ test.add_error('missing subtest result line!')
+ # Add statuses to TestCounts attribute in Test object
+ bubble_up_test_results(test)
+ if parent_test:
+ # If test has subtests and is not the main test object, print
+ # footer.
+ print_test_footer(test)
+ elif not main:
+ print_test_result(test)
+ return test
def parse_run_tests(kernel_output: Iterable[str]) -> TestResult:
- counts = TestCounts()
+ """
+ Using kernel output, extract KTAP lines, parse the lines for test
+ results and print condensed test results and summary line .
+
+ Parameters:
+ kernel_output - Iterable object contains lines of kernel output
+
+ Return:
+ TestResult - Tuple containg status of main test object, main test
+ object with all subtests, and log of all KTAP lines.
+ """
+ print_with_timestamp(DIVIDER)
lines = extract_tap_lines(kernel_output)
- test_result = parse_test_result(lines)
- if test_result.status == TestStatus.NO_TESTS:
- print(red('[ERROR] ') + yellow('no tests run!'))
- elif test_result.status == TestStatus.FAILURE_TO_PARSE_TESTS:
- print(red('[ERROR] ') + yellow('could not parse test results!'))
+ test = Test()
+ if not lines:
+ test.add_error('invalid KTAP input!')
+ test.status = TestStatus.FAILURE_TO_PARSE_TESTS
else:
- counts = print_and_count_results(test_result)
+ test = parse_test(lines, 0, [])
+ if test.status != TestStatus.NO_TESTS:
+ test.status = test.counts.get_status()
print_with_timestamp(DIVIDER)
- if test_result.status == TestStatus.SUCCESS:
- fmt = green
- elif test_result.status == TestStatus.SKIPPED:
- fmt = yellow
- else:
- fmt =red
- print_with_timestamp(
- fmt('Testing complete. %d tests run. %d failed. %d crashed. %d skipped.' %
- (counts.total(), counts.failed, counts.crashed, counts.skipped)))
- return test_result
+ print_summary_line(test)
+ return TestResult(test.status, test, lines)
diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py
index 619c4554cbff..2a8b0b5f4269 100755
--- a/tools/testing/kunit/kunit_tool_test.py
+++ b/tools/testing/kunit/kunit_tool_test.py
@@ -106,10 +106,10 @@ class KUnitParserTest(unittest.TestCase):
with open(log_path) as file:
result = kunit_parser.extract_tap_lines(file.readlines())
self.assertContains('TAP version 14', result)
- self.assertContains(' # Subtest: example', result)
- self.assertContains(' 1..2', result)
- self.assertContains(' ok 1 - example_simple_test', result)
- self.assertContains(' ok 2 - example_mock_test', result)
+ self.assertContains('# Subtest: example', result)
+ self.assertContains('1..2', result)
+ self.assertContains('ok 1 - example_simple_test', result)
+ self.assertContains('ok 2 - example_mock_test', result)
self.assertContains('ok 1 - example', result)
def test_output_with_prefix_isolated_correctly(self):
@@ -117,28 +117,28 @@ class KUnitParserTest(unittest.TestCase):
with open(log_path) as file:
result = kunit_parser.extract_tap_lines(file.readlines())
self.assertContains('TAP version 14', result)
- self.assertContains(' # Subtest: kunit-resource-test', result)
- self.assertContains(' 1..5', result)
- self.assertContains(' ok 1 - kunit_resource_test_init_resources', result)
- self.assertContains(' ok 2 - kunit_resource_test_alloc_resource', result)
- self.assertContains(' ok 3 - kunit_resource_test_destroy_resource', result)
- self.assertContains(' foo bar #', result)
- self.assertContains(' ok 4 - kunit_resource_test_cleanup_resources', result)
- self.assertContains(' ok 5 - kunit_resource_test_proper_free_ordering', result)
+ self.assertContains('# Subtest: kunit-resource-test', result)
+ self.assertContains('1..5', result)
+ self.assertContains('ok 1 - kunit_resource_test_init_resources', result)
+ self.assertContains('ok 2 - kunit_resource_test_alloc_resource', result)
+ self.assertContains('ok 3 - kunit_resource_test_destroy_resource', result)
+ self.assertContains('foo bar #', result)
+ self.assertContains('ok 4 - kunit_resource_test_cleanup_resources', result)
+ self.assertContains('ok 5 - kunit_resource_test_proper_free_ordering', result)
self.assertContains('ok 1 - kunit-resource-test', result)
- self.assertContains(' foo bar # non-kunit output', result)
- self.assertContains(' # Subtest: kunit-try-catch-test', result)
- self.assertContains(' 1..2', result)
- self.assertContains(' ok 1 - kunit_test_try_catch_successful_try_no_catch',
+ self.assertContains('foo bar # non-kunit output', result)
+ self.assertContains('# Subtest: kunit-try-catch-test', result)
+ self.assertContains('1..2', result)
+ self.assertContains('ok 1 - kunit_test_try_catch_successful_try_no_catch',
result)
- self.assertContains(' ok 2 - kunit_test_try_catch_unsuccessful_try_does_catch',
+ self.assertContains('ok 2 - kunit_test_try_catch_unsuccessful_try_does_catch',
result)
self.assertContains('ok 2 - kunit-try-catch-test', result)
- self.assertContains(' # Subtest: string-stream-test', result)
- self.assertContains(' 1..3', result)
- self.assertContains(' ok 1 - string_stream_test_empty_on_creation', result)
- self.assertContains(' ok 2 - string_stream_test_not_empty_after_add', result)
- self.assertContains(' ok 3 - string_stream_test_get_string', result)
+ self.assertContains('# Subtest: string-stream-test', result)
+ self.assertContains('1..3', result)
+ self.assertContains('ok 1 - string_stream_test_empty_on_creation', result)
+ self.assertContains('ok 2 - string_stream_test_not_empty_after_add', result)
+ self.assertContains('ok 3 - string_stream_test_get_string', result)
self.assertContains('ok 3 - string-stream-test', result)
def test_parse_successful_test_log(self):
@@ -148,6 +148,13 @@ class KUnitParserTest(unittest.TestCase):
self.assertEqual(
kunit_parser.TestStatus.SUCCESS,
result.status)
+ def test_parse_successful_nested_tests_log(self):
+ all_passed_log = test_data_path('test_is_test_passed-all_passed_nested.log')
+ with open(all_passed_log) as file:
+ result = kunit_parser.parse_run_tests(file.readlines())
+ self.assertEqual(
+ kunit_parser.TestStatus.SUCCESS,
+ result.status)
def test_parse_failed_test_log(self):
failed_log = test_data_path('test_is_test_passed-failure.log')
@@ -162,17 +169,31 @@ class KUnitParserTest(unittest.TestCase):
with open(empty_log) as file:
result = kunit_parser.parse_run_tests(
kunit_parser.extract_tap_lines(file.readlines()))
- self.assertEqual(0, len(result.suites))
+ self.assertEqual(0, len(result.test.subtests))
self.assertEqual(
kunit_parser.TestStatus.FAILURE_TO_PARSE_TESTS,
result.status)
+ def test_missing_test_plan(self):
+ missing_plan_log = test_data_path('test_is_test_passed-'
+ 'missing_plan.log')
+ with open(missing_plan_log) as file:
+ result = kunit_parser.parse_run_tests(
+ kunit_parser.extract_tap_lines(
+ file.readlines()))
+ self.assertEqual(2, result.test.counts.errors)
+ self.assertEqual(
+ kunit_parser.TestStatus.SUCCESS,
+ result.status)
+
def test_no_tests(self):
- empty_log = test_data_path('test_is_test_passed-no_tests_run_with_header.log')
- with open(empty_log) as file:
+ header_log = test_data_path('test_is_test_passed-'
+ 'no_tests_run_with_header.log')
+ with open(header_log) as file:
result = kunit_parser.parse_run_tests(
- kunit_parser.extract_tap_lines(file.readlines()))
- self.assertEqual(0, len(result.suites))
+ kunit_parser.extract_tap_lines(
+ file.readlines()))
+ self.assertEqual(0, len(result.test.subtests))
self.assertEqual(
kunit_parser.TestStatus.NO_TESTS,
result.status)
@@ -182,15 +203,17 @@ class KUnitParserTest(unittest.TestCase):
print_mock = mock.patch('builtins.print').start()
with open(crash_log) as file:
result = kunit_parser.parse_run_tests(
- kunit_parser.extract_tap_lines(file.readlines()))
- print_mock.assert_any_call(StrContains('could not parse test results!'))
+ kunit_parser.extract_tap_lines(
+ file.readlines()))
+ print_mock.assert_any_call(StrContains('invalid KTAP input!'))
print_mock.stop()
file.close()
def test_crashed_test(self):
crashed_log = test_data_path('test_is_test_passed-crash.log')
with open(crashed_log) as file:
- result = kunit_parser.parse_run_tests(file.readlines())
+ result = kunit_parser.parse_run_tests(
+ file.readlines())
self.assertEqual(
kunit_parser.TestStatus.TEST_CRASHED,
result.status)
@@ -216,6 +239,23 @@ class KUnitParserTest(unittest.TestCase):
result.status)
file.close()
+ def test_ignores_hyphen(self):
+ hyphen_log = test_data_path('test_strip_hyphen.log')
+ file = open(hyphen_log)
+ result = kunit_parser.parse_run_tests(file.readlines())
+
+ # A skipped test does not fail the whole suite.
+ self.assertEqual(
+ kunit_parser.TestStatus.SUCCESS,
+ result.status)
+ self.assertEqual(
+ "sysctl_test",
+ result.test.subtests[0].name)
+ self.assertEqual(
+ "example",
+ result.test.subtests[1].name)
+ file.close()
+
def test_ignores_prefix_printk_time(self):
prefix_log = test_data_path('test_config_printk_time.log')
@@ -224,7 +264,7 @@ class KUnitParserTest(unittest.TestCase):
self.assertEqual(
kunit_parser.TestStatus.SUCCESS,
result.status)
- self.assertEqual('kunit-resource-test', result.suites[0].name)
+ self.assertEqual('kunit-resource-test', result.test.subtests[0].name)
def test_ignores_multiple_prefixes(self):
prefix_log = test_data_path('test_multiple_prefixes.log')
@@ -233,7 +273,7 @@ class KUnitParserTest(unittest.TestCase):
self.assertEqual(
kunit_parser.TestStatus.SUCCESS,
result.status)
- self.assertEqual('kunit-resource-test', result.suites[0].name)
+ self.assertEqual('kunit-resource-test', result.test.subtests[0].name)
def test_prefix_mixed_kernel_output(self):
mixed_prefix_log = test_data_path('test_interrupted_tap_output.log')
@@ -242,7 +282,7 @@ class KUnitParserTest(unittest.TestCase):
self.assertEqual(
kunit_parser.TestStatus.SUCCESS,
result.status)
- self.assertEqual('kunit-resource-test', result.suites[0].name)
+ self.assertEqual('kunit-resource-test', result.test.subtests[0].name)
def test_prefix_poundsign(self):
pound_log = test_data_path('test_pound_sign.log')
@@ -251,7 +291,7 @@ class KUnitParserTest(unittest.TestCase):
self.assertEqual(
kunit_parser.TestStatus.SUCCESS,
result.status)
- self.assertEqual('kunit-resource-test', result.suites[0].name)
+ self.assertEqual('kunit-resource-test', result.test.subtests[0].name)
def test_kernel_panic_end(self):
panic_log = test_data_path('test_kernel_panic_interrupt.log')
@@ -260,7 +300,7 @@ class KUnitParserTest(unittest.TestCase):
self.assertEqual(
kunit_parser.TestStatus.TEST_CRASHED,
result.status)
- self.assertEqual('kunit-resource-test', result.suites[0].name)
+ self.assertEqual('kunit-resource-test', result.test.subtests[0].name)
def test_pound_no_prefix(self):
pound_log = test_data_path('test_pound_no_prefix.log')
@@ -269,7 +309,7 @@ class KUnitParserTest(unittest.TestCase):
self.assertEqual(
kunit_parser.TestStatus.SUCCESS,
result.status)
- self.assertEqual('kunit-resource-test', result.suites[0].name)
+ self.assertEqual('kunit-resource-test', result.test.subtests[0].name)
class LinuxSourceTreeTest(unittest.TestCase):
@@ -291,6 +331,14 @@ class LinuxSourceTreeTest(unittest.TestCase):
pass
tree = kunit_kernel.LinuxSourceTree('', kunitconfig_path=dir)
+ def test_kselftest_nested(self):
+ kselftest_log = test_data_path('test_is_test_passed-kselftest.log')
+ with open(kselftest_log) as file:
+ result = kunit_parser.parse_run_tests(file.readlines())
+ self.assertEqual(
+ kunit_parser.TestStatus.SUCCESS,
+ result.status)
+
# TODO: add more test cases.
@@ -322,6 +370,12 @@ class KUnitJsonTest(unittest.TestCase):
result = self._json_for('test_is_test_passed-no_tests_run_with_header.log')
self.assertEqual(0, len(result['sub_groups']))
+ def test_nested_json(self):
+ result = self._json_for('test_is_test_passed-all_passed_nested.log')
+ self.assertEqual(
+ {'name': 'example_simple_test', 'status': 'PASS'},
+ result["sub_groups"][0]["sub_groups"][0]["test_cases"][0])
+
class StrContains(str):
def __eq__(self, other):
return self in other
@@ -380,7 +434,7 @@ class KUnitMainTest(unittest.TestCase):
self.assertEqual(e.exception.code, 1)
self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1)
- self.print_mock.assert_any_call(StrContains(' 0 tests run'))
+ self.print_mock.assert_any_call(StrContains('invalid KTAP input!'))
def test_exec_raw_output(self):
self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
@@ -388,7 +442,7 @@ class KUnitMainTest(unittest.TestCase):
self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1)
for call in self.print_mock.call_args_list:
self.assertNotEqual(call, mock.call(StrContains('Testing complete.')))
- self.assertNotEqual(call, mock.call(StrContains(' 0 tests run')))
+ self.assertNotEqual(call, mock.call(StrContains(' 0 tests run!')))
def test_run_raw_output(self):
self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
@@ -397,7 +451,7 @@ class KUnitMainTest(unittest.TestCase):
self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1)
for call in self.print_mock.call_args_list:
self.assertNotEqual(call, mock.call(StrContains('Testing complete.')))
- self.assertNotEqual(call, mock.call(StrContains(' 0 tests run')))
+ self.assertNotEqual(call, mock.call(StrContains(' 0 tests run!')))
def test_run_raw_output_kunit(self):
self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
diff --git a/tools/testing/kunit/test_data/test_is_test_passed-all_passed_nested.log b/tools/testing/kunit/test_data/test_is_test_passed-all_passed_nested.log
new file mode 100644
index 000000000000..9d5b04fe43a6
--- /dev/null
+++ b/tools/testing/kunit/test_data/test_is_test_passed-all_passed_nested.log
@@ -0,0 +1,34 @@
+TAP version 14
+1..2
+ # Subtest: sysctl_test
+ 1..4
+ # sysctl_test_dointvec_null_tbl_data: sysctl_test_dointvec_null_tbl_data passed
+ ok 1 - sysctl_test_dointvec_null_tbl_data
+ # Subtest: example
+ 1..2
+ init_suite
+ # example_simple_test: initializing
+ # example_simple_test: example_simple_test passed
+ ok 1 - example_simple_test
+ # example_mock_test: initializing
+ # example_mock_test: example_mock_test passed
+ ok 2 - example_mock_test
+ kunit example: all tests passed
+ ok 2 - example
+ # sysctl_test_dointvec_table_len_is_zero: sysctl_test_dointvec_table_len_is_zero passed
+ ok 3 - sysctl_test_dointvec_table_len_is_zero
+ # sysctl_test_dointvec_table_read_but_position_set: sysctl_test_dointvec_table_read_but_position_set passed
+ ok 4 - sysctl_test_dointvec_table_read_but_position_set
+kunit sysctl_test: all tests passed
+ok 1 - sysctl_test
+ # Subtest: example
+ 1..2
+init_suite
+ # example_simple_test: initializing
+ # example_simple_test: example_simple_test passed
+ ok 1 - example_simple_test
+ # example_mock_test: initializing
+ # example_mock_test: example_mock_test passed
+ ok 2 - example_mock_test
+kunit example: all tests passed
+ok 2 - example
diff --git a/tools/testing/kunit/test_data/test_is_test_passed-kselftest.log b/tools/testing/kunit/test_data/test_is_test_passed-kselftest.log
new file mode 100644
index 000000000000..65d3f27feaf2
--- /dev/null
+++ b/tools/testing/kunit/test_data/test_is_test_passed-kselftest.log
@@ -0,0 +1,14 @@
+TAP version 13
+1..2
+# selftests: membarrier: membarrier_test_single_thread
+# TAP version 13
+# 1..2
+# ok 1 sys_membarrier available
+# ok 2 sys membarrier invalid command test: command = -1, flags = 0, errno = 22. Failed as expected
+ok 1 selftests: membarrier: membarrier_test_single_thread
+# selftests: membarrier: membarrier_test_multi_thread
+# TAP version 13
+# 1..2
+# ok 1 sys_membarrier available
+# ok 2 sys membarrier invalid command test: command = -1, flags = 0, errno = 22. Failed as expected
+ok 2 selftests: membarrier: membarrier_test_multi_thread
diff --git a/tools/testing/kunit/test_data/test_is_test_passed-missing_plan.log b/tools/testing/kunit/test_data/test_is_test_passed-missing_plan.log
new file mode 100644
index 000000000000..5cd17b7f818a
--- /dev/null
+++ b/tools/testing/kunit/test_data/test_is_test_passed-missing_plan.log
@@ -0,0 +1,31 @@
+KTAP version 1
+ # Subtest: sysctl_test
+ # sysctl_test_dointvec_null_tbl_data: sysctl_test_dointvec_null_tbl_data passed
+ ok 1 - sysctl_test_dointvec_null_tbl_data
+ # sysctl_test_dointvec_table_maxlen_unset: sysctl_test_dointvec_table_maxlen_unset passed
+ ok 2 - sysctl_test_dointvec_table_maxlen_unset
+ # sysctl_test_dointvec_table_len_is_zero: sysctl_test_dointvec_table_len_is_zero passed
+ ok 3 - sysctl_test_dointvec_table_len_is_zero
+ # sysctl_test_dointvec_table_read_but_position_set: sysctl_test_dointvec_table_read_but_position_set passed
+ ok 4 - sysctl_test_dointvec_table_read_but_position_set
+ # sysctl_test_dointvec_happy_single_positive: sysctl_test_dointvec_happy_single_positive passed
+ ok 5 - sysctl_test_dointvec_happy_single_positive
+ # sysctl_test_dointvec_happy_single_negative: sysctl_test_dointvec_happy_single_negative passed
+ ok 6 - sysctl_test_dointvec_happy_single_negative
+ # sysctl_test_dointvec_single_less_int_min: sysctl_test_dointvec_single_less_int_min passed
+ ok 7 - sysctl_test_dointvec_single_less_int_min
+ # sysctl_test_dointvec_single_greater_int_max: sysctl_test_dointvec_single_greater_int_max passed
+ ok 8 - sysctl_test_dointvec_single_greater_int_max
+kunit sysctl_test: all tests passed
+ok 1 - sysctl_test
+ # Subtest: example
+ 1..2
+init_suite
+ # example_simple_test: initializing
+ # example_simple_test: example_simple_test passed
+ ok 1 - example_simple_test
+ # example_mock_test: initializing
+ # example_mock_test: example_mock_test passed
+ ok 2 - example_mock_test
+kunit example: all tests passed
+ok 2 - example
diff --git a/tools/testing/kunit/test_data/test_strip_hyphen.log b/tools/testing/kunit/test_data/test_strip_hyphen.log
new file mode 100644
index 000000000000..92ac7c24b374
--- /dev/null
+++ b/tools/testing/kunit/test_data/test_strip_hyphen.log
@@ -0,0 +1,16 @@
+KTAP version 1
+1..2
+ # Subtest: sysctl_test
+ 1..1
+ # sysctl_test_dointvec_null_tbl_data: sysctl_test_dointvec_null_tbl_data passed
+ ok 1 - sysctl_test_dointvec_null_tbl_data
+kunit sysctl_test: all tests passed
+ok 1 - sysctl_test
+ # Subtest: example
+ 1..1
+init_suite
+ # example_simple_test: initializing
+ # example_simple_test: example_simple_test passed
+ ok 1 example_simple_test
+kunit example: all tests passed
+ok 2 example
--
2.33.0.259.gc128427fd7-goog
Synchronous Ethernet networks use a physical layer clock to syntonize
the frequency across different network elements.
Multiple reference clock sources can be used. Clocks recovered from
PHY ports on the RX side or external sources like 1PPS GPS, etc.
This patch series introduces basic interface for reading the DPLL
state on a SyncE capable device. This state gives us information
about the source of the syntonization signal and whether the DPLL
circuit is tuned to the incoming signal.
Next steps:
- add interface to enable recovered clocks and get information
about them
v2:
- removed whitespace changes
- fix issues reported by test robot
Maciej Machnikowski (2):
rtnetlink: Add new RTM_GETSYNCESTATE message to get SyncE status
ice: add support for reading SyncE DPLL state
drivers/net/ethernet/intel/ice/ice.h | 5 ++
.../net/ethernet/intel/ice/ice_adminq_cmd.h | 34 ++++++++
drivers/net/ethernet/intel/ice/ice_common.c | 62 +++++++++++++++
drivers/net/ethernet/intel/ice/ice_common.h | 4 +
drivers/net/ethernet/intel/ice/ice_devids.h | 3 +
drivers/net/ethernet/intel/ice/ice_main.c | 55 +++++++++++++
drivers/net/ethernet/intel/ice/ice_ptp.c | 35 +++++++++
drivers/net/ethernet/intel/ice/ice_ptp_hw.c | 44 +++++++++++
drivers/net/ethernet/intel/ice/ice_ptp_hw.h | 22 ++++++
include/linux/netdevice.h | 6 ++
include/uapi/linux/if_link.h | 43 +++++++++++
include/uapi/linux/rtnetlink.h | 11 ++-
net/core/rtnetlink.c | 77 +++++++++++++++++++
security/selinux/nlmsgtab.c | 3 +-
14 files changed, 399 insertions(+), 5 deletions(-)
--
2.26.3
This test assumes that the declared kunit_suite object is the exact one
which is being executed, which KUnit will not guarantee [1].
Specifically, `suite->log` is not initialized until a suite object is
executed. So if KUnit makes a copy of the suite and runs that instead,
this test dereferences an invalid pointer and (hopefully) segfaults.
N.B. since we no longer assume this, we can no longer verify that
`suite->log` is *not* allocated during normal execution.
An alternative to this patch that would allow us to test that would
require exposing an API for the current test to get its current suite.
Exposing that for one internal kunit test seems like overkill, and
grants users more footguns (e.g. reusing a test case in multiple suites
and changing behavior based on the suite name, dynamically modifying the
setup/cleanup funcs, storing/reading stuff out of the suite->log, etc.).
[1] In a subsequent patch, KUnit will allow running subsets of test
cases within a suite by making a copy of the suite w/ the filtered test
list. But there are other reasons KUnit might execute a copy, e.g. if it
ever wants to support parallel execution of different suites, recovering
from errors and restarting suites
Signed-off-by: Daniel Latypov <dlatypov(a)google.com>
---
lib/kunit/kunit-test.c | 14 ++++++++------
1 file changed, 8 insertions(+), 6 deletions(-)
diff --git a/lib/kunit/kunit-test.c b/lib/kunit/kunit-test.c
index d69efcbed624..555601d17f79 100644
--- a/lib/kunit/kunit-test.c
+++ b/lib/kunit/kunit-test.c
@@ -415,12 +415,15 @@ static struct kunit_suite kunit_log_test_suite = {
static void kunit_log_test(struct kunit *test)
{
- struct kunit_suite *suite = &kunit_log_test_suite;
+ struct kunit_suite suite;
+
+ suite.log = kunit_kzalloc(test, KUNIT_LOG_SIZE, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, suite.log);
kunit_log(KERN_INFO, test, "put this in log.");
kunit_log(KERN_INFO, test, "this too.");
- kunit_log(KERN_INFO, suite, "add to suite log.");
- kunit_log(KERN_INFO, suite, "along with this.");
+ kunit_log(KERN_INFO, &suite, "add to suite log.");
+ kunit_log(KERN_INFO, &suite, "along with this.");
#ifdef CONFIG_KUNIT_DEBUGFS
KUNIT_EXPECT_NOT_ERR_OR_NULL(test,
@@ -428,12 +431,11 @@ static void kunit_log_test(struct kunit *test)
KUNIT_EXPECT_NOT_ERR_OR_NULL(test,
strstr(test->log, "this too."));
KUNIT_EXPECT_NOT_ERR_OR_NULL(test,
- strstr(suite->log, "add to suite log."));
+ strstr(suite.log, "add to suite log."));
KUNIT_EXPECT_NOT_ERR_OR_NULL(test,
- strstr(suite->log, "along with this."));
+ strstr(suite.log, "along with this."));
#else
KUNIT_EXPECT_PTR_EQ(test, test->log, (char *)NULL);
- KUNIT_EXPECT_PTR_EQ(test, suite->log, (char *)NULL);
#endif
}
base-commit: 9c849ce86e0fa93a218614eac562ace44053d7ce
--
2.33.0.259.gc128427fd7-goog
0Day will check if all configs listing under selftests are able
to be enabled properly.
For the missing configs, it will report something like:
LKP WARN miss config CONFIG_SYNC= of sync/config
CC: "Rafael J. Wysocki" <rjw(a)rjwysocki.net>
CC: Viresh Kumar <viresh.kumar(a)linaro.org>
CC: linux-pm(a)vger.kernel.org
Reported-by: kernel test robot <lkp(a)intel.com>
Li Zhijian (2):
selftests/sync: Remove the deprecated config SYNC
selftests/cpufreq: Rename DEBUG_PI_LIST to DEBUG_PLIST
tools/testing/selftests/cpufreq/config | 2 +-
tools/testing/selftests/sync/config | 1 -
2 files changed, 1 insertion(+), 2 deletions(-)
--
2.31.1
Synchronous Ethernet networks use a physical layer clock to syntonize
the frequency across different network elements.
Basic SyncE node defined in the ITU-T G.8264 consist of an Ethernet
Equipment Clock (EEC) and have the ability to recover synchronization
from the synchronization inputs - either traffic interfaces or external
frequency sources.
The EEC can synchronize its frequency (syntonize) to any of those sources.
It is also able to select synchronization source through priority tables
and synchronization status messaging. It also provides neccessary
filtering and holdover capabilities
This patch series introduces basic interface for reading the Ethernet
Equipment Clock (EEC) state on a SyncE capable device. This state gives
information about the source of the syntonization signal and the state
of EEC. This interface is required to implement Synchronization Status
Messaging on upper layers.
Maciej Machnikowski (2):
rtnetlink: Add new RTM_GETEECSTATE message to get SyncE status
ice: add support for reading SyncE DPLL state
drivers/net/ethernet/intel/ice/ice.h | 5 ++
.../net/ethernet/intel/ice/ice_adminq_cmd.h | 34 ++++++++++
drivers/net/ethernet/intel/ice/ice_common.c | 62 ++++++++++++++++++
drivers/net/ethernet/intel/ice/ice_common.h | 4 ++
drivers/net/ethernet/intel/ice/ice_devids.h | 3 +
drivers/net/ethernet/intel/ice/ice_main.c | 57 +++++++++++++++++
drivers/net/ethernet/intel/ice/ice_ptp.c | 35 ++++++++++
drivers/net/ethernet/intel/ice/ice_ptp_hw.c | 44 +++++++++++++
drivers/net/ethernet/intel/ice/ice_ptp_hw.h | 22 +++++++
include/linux/netdevice.h | 5 ++
include/uapi/linux/if_link.h | 46 +++++++++++++
include/uapi/linux/rtnetlink.h | 3 +
net/core/rtnetlink.c | 64 +++++++++++++++++++
security/selinux/nlmsgtab.c | 3 +-
14 files changed, 386 insertions(+), 1 deletion(-)
--
2.26.3
Synchronous Ethernet networks use a physical layer clock to syntonize
the frequency across different network elements.
Basic SyncE node defined in the ITU-T G.8264 consist of an Ethernet
Equipment Clock (EEC) and have the ability to recover synchronization
from the synchronization inputs - either traffic interfaces or external
frequency sources.
The EEC can synchronize its frequency (syntonize) to any of those sources.
It is also able to select synchronization source through priority tables
and synchronization status messaging. It also provides neccessary
filtering and holdover capabilities
This patch series introduces basic interface for reading the Ethernet
Equipment Clock (EEC) state on a SyncE capable device. This state gives
information about the source of the syntonization signal and the state
of EEC. This interface is required to implement Synchronization Status
Messaging on upper layers.
Next steps:
- add interface to enable source clocks and get information about them
v2:
- removed whitespace changes
- fix issues reported by test robot
v3:
- Changed naming from SyncE to EEC
- Clarify cover letter and commit message for patch 1
Maciej Machnikowski (2):
rtnetlink: Add new RTM_GETEECSTATE message to get SyncE status
ice: add support for reading SyncE DPLL state
drivers/net/ethernet/intel/ice/ice.h | 5 ++
.../net/ethernet/intel/ice/ice_adminq_cmd.h | 34 +++++++++
drivers/net/ethernet/intel/ice/ice_common.c | 62 ++++++++++++++++
drivers/net/ethernet/intel/ice/ice_common.h | 4 +
drivers/net/ethernet/intel/ice/ice_devids.h | 3 +
drivers/net/ethernet/intel/ice/ice_main.c | 55 ++++++++++++++
drivers/net/ethernet/intel/ice/ice_ptp.c | 35 +++++++++
drivers/net/ethernet/intel/ice/ice_ptp_hw.c | 44 +++++++++++
drivers/net/ethernet/intel/ice/ice_ptp_hw.h | 22 ++++++
include/linux/netdevice.h | 6 ++
include/uapi/linux/if_link.h | 43 +++++++++++
include/uapi/linux/rtnetlink.h | 3 +
net/core/rtnetlink.c | 74 +++++++++++++++++++
security/selinux/nlmsgtab.c | 3 +-
14 files changed, 392 insertions(+), 1 deletion(-)
--
2.26.3
SyncE - Synchronous Ethernet is defined in ITU-T Rec. G.8264
(https://www.itu.int/rec/T-REC-G.8264)
SyncE allows synchronizing the frequency of ethernet PHY clock signal
(the frequency used to send the data onto wire), to some reference
clock signal.
Multiple reference clock sources can be available. PHY ports recover
the frequency at which the transmitter sent the data on the RX side.
Alternatively, we can use external sources like 1PPS GPS, etc.
This patch series introduces basic interfaces for communication
with a SyncE capable device.
The first part of the interface allows acquiring the synchronization
state of DPLL (Digital Phase Locked Loop). DPLL LOCKED state means
that the frequency generated by it is locked to the input frequency.
As a result, PHYs connected to it are synchronized to the chosen input
frequency signal.
The second part can be used to select the port from which the clock
gets recovered. Each PHY chip can have multiple pins on which the
recovered clock can be propagated. For example, a SyncE-capable PHY
can recover the carrier frequency of the first port, divide it
internally, and output it as a reference clock on PIN 0.
When such a signal is enabled, the DPLL can LOCK to the frequency
recovered on PIN 0.
Next steps:
- Add CONFIG_SYNCE definition into Kconfig
- Add more configuration interfaces. Aiming at devlink, since this
would be device-wide configuration
Arkadiusz Kubalewski (7):
ptp: Add interface for acquiring DPLL state
selftests/ptp: Add usage of PTP_DPLL_GETSTATE ioctl in testptp
ice: add get_dpll_state ptp interface usage
net: add ioctl interface for recover reference clock on netdev
selftests/net: Add test app for SIOC{S|G}SYNCE
ice: add SIOC{S|G}SYNCE interface usage to recover reference signal
ice: add sysfs interface to configure PHY recovered reference signal
.../net/ethernet/intel/ice/ice_adminq_cmd.h | 62 +++++
drivers/net/ethernet/intel/ice/ice_common.c | 101 ++++++++
drivers/net/ethernet/intel/ice/ice_common.h | 9 +
drivers/net/ethernet/intel/ice/ice_main.c | 4 +
drivers/net/ethernet/intel/ice/ice_ptp.c | 234 +++++++++++++++++-
drivers/net/ethernet/intel/ice/ice_ptp.h | 9 +
drivers/net/ethernet/intel/ice/ice_ptp_hw.h | 6 +
drivers/ptp/ptp_chardev.c | 15 ++
drivers/ptp/ptp_clockmatrix.h | 12 -
drivers/ptp/ptp_private.h | 2 +
drivers/ptp/ptp_sysfs.c | 48 ++++
include/linux/ptp_clock_kernel.h | 9 +
include/uapi/linux/net_synce.h | 21 ++
include/uapi/linux/ptp_clock.h | 27 ++
include/uapi/linux/sockios.h | 4 +
net/core/dev_ioctl.c | 6 +-
tools/testing/selftests/net/Makefile | 1 +
tools/testing/selftests/net/phy_ref_clk.c | 138 +++++++++++
tools/testing/selftests/ptp/testptp.c | 27 +-
19 files changed, 720 insertions(+), 15 deletions(-)
create mode 100644 include/uapi/linux/net_synce.h
create mode 100644 tools/testing/selftests/net/phy_ref_clk.c
base-commit: aba1e4adb54e020d3ca85a4df3ef0f8febe87548
--
2.24.0
This series of patches updates the format of kselftest TAP results to improve
compatibility with the proposed KTAP specification
(https://lore.kernel.org/linux-kselftest/CA+GJov6tdjvY9x12JsJT14qn6c7NViJxqa…).
Three changes:
- Change from "# " to " " for indentation of nested tests
- Add subtest header line at start of tests with subtests. Line format
is "# Subtest: [name of test]".
- Remove TAP header in nested tests
Standardizing TAP results would not only allow for clearer documentation and ease of reading but by standardizing the format across different testing frameworks, we could also share the use of tools.
As an example:
This is a truncated version of TAP results from the kselftest ptrace with the new format changes:
TAP version 13
1..1
# selftests: ptrace: get_syscall_info
# Subtest: selftests: ptrace: get_syscall_info
1..1
# Starting 1 tests from 1 test cases.
# RUN global.get_syscall_info ...
# OK global.get_syscall_info
ok 1 global.get_syscall_info
# PASSED: 1 / 1 tests passed.
# Totals: pass:1 fail:0 xfail:0 xpass:0 skip:0 error:0
ok 1 selftests: ptrace: get_syscall_info
With the new patch to update the KUnit parser to improve compatibility with the proposed KTAP specification, (https://lore.kernel.org/linux-kselftest/20210826195505.3066755-1-rmoar@goog…) the above TAP results would be parsed as the following:
[20:46:09] ============================================================
[20:46:09] ===== selftests: ptrace: get_syscall_info (1 subtest) ======
[20:46:09] [PASSED] global.get_syscall_info
[20:46:09] ======= [PASSED] selftests: ptrace: get_syscall_info =======
[20:46:09] ============================================================
[20:46:09] Testing complete. Passed: 1, Failed: 0, Crashed: 0, Skipped: 0, Errors: 0
Thus, the kunit parser could become a useful tool for kselftest users.
Rae Moar (2):
selftests: tool: Add subtest header line and change indentation format
in TAP results
Revert "selftests: Remove KSFT_TAP_LEVEL"
tools/testing/selftests/Makefile | 6 ++++++
tools/testing/selftests/kselftest/prefix.pl | 2 +-
tools/testing/selftests/kselftest/runner.sh | 7 ++++---
3 files changed, 11 insertions(+), 4 deletions(-)
--
2.33.0.259.gc128427fd7-goog
Update to kunit_parser to improve compatibility with KTAP
specification including arbitrarily nested tests. Patch accomplishes
three major changes:
- Use a general Test object to represent all tests rather than TestCase
and TestSuite objects. This allows for easier implementation of arbitrary
levels of nested tests and promotes the idea that both test suites and test
cases are tests.
- Print errors incrementally rather than all at once after the
parsing finishes to maximize information given to the user in the
case of the parser given invalid input and to increase the helpfulness
of the timestamps given during printing. Note that kunit.py parse does
not print incrementally yet. However, this fix brings us closer to
this feature.
- Increase compatibility for different formats of input. Arbitrary levels
of nested tests supported. Also, test cases and test suites are now
supported to be present on the same level of testing.
This patch now implements the KTAP specification as described here:
https://lore.kernel.org/linux-kselftest/CA+GJov6tdjvY9x12JsJT14qn6c7NViJxqa….
This patch adjusts the kunit_tool_test.py file to check for
the correct outputs from the new parser and adds a new test to check
the parsing for a KTAP result log with correct format for multiple nested
subtests (test_is_test_passed-all_passed_nested.log).
This patch also alters the kunit_json.py file to allow for arbitrarily
nested tests.
Signed-off-by: Rae Moar <rmoar(a)google.com>
Reviewed-by: Brendan Higgins <brendanhiggins(a)google.com>
---
Change log from v1:
https://lore.kernel.org/linux-kselftest/20210820200032.2178134-1-rmoar@goog…
- Rebase onto kselftest/kunit branch
- Add tests to kunit_tool_test.py to check parser is correctly stripping
hyphen, producing correct json objects with nested tests, correctly
passing kselftest TAP output, and correctly deals with missing test plan.
- Fix bug to correctly match test name in instance of a missing test plan.
- Fix bug in kunit_tool_test.py pointed out by Daniel where it was not
correctly checking for a proper match to the '0 tests run!' error
message. Reverts changes back to original.
- A few minor changes to commit message using Daniel's comments.
- Change docstrings using Daniel's comments to reduce:
- Shortens some docstrings to be one-line or just description if it is
self explanatory.
- Remove explicit respecification of types of parameters and returns
because this is already specified in the function annoations. However,
some descriptions of the parameters and returns remain and some contain
the type for context. Additionally, the types of public attributes of
classes remain.
- Remove any documentation of 'Return: None'
- Remove docstrings of helper methods within other methods.
---
tools/testing/kunit/kunit_json.py | 55 +-
tools/testing/kunit/kunit_parser.py | 1056 ++++++++++++-----
tools/testing/kunit/kunit_tool_test.py | 134 ++-
.../test_is_test_passed-all_passed_nested.log | 34 +
.../test_is_test_passed-kselftest.log | 14 +
.../test_is_test_passed-missing_plan.log | 31 +
.../kunit/test_data/test_strip_hyphen.log | 16 +
7 files changed, 951 insertions(+), 389 deletions(-)
create mode 100644 tools/testing/kunit/test_data/test_is_test_passed-all_passed_nested.log
create mode 100644 tools/testing/kunit/test_data/test_is_test_passed-kselftest.log
create mode 100644 tools/testing/kunit/test_data/test_is_test_passed-missing_plan.log
create mode 100644 tools/testing/kunit/test_data/test_strip_hyphen.log
diff --git a/tools/testing/kunit/kunit_json.py b/tools/testing/kunit/kunit_json.py
index f5cca5c38cac..e7317b4fad9d 100644
--- a/tools/testing/kunit/kunit_json.py
+++ b/tools/testing/kunit/kunit_json.py
@@ -11,47 +11,46 @@ import os
import kunit_parser
-from kunit_parser import TestStatus
-
-def get_json_result(test_result, def_config, build_dir, json_path) -> str:
- sub_groups = []
-
- # Each test suite is mapped to a KernelCI sub_group
- for test_suite in test_result.suites:
- sub_group = {
- "name": test_suite.name,
- "arch": "UM",
- "defconfig": def_config,
- "build_environment": build_dir,
- "test_cases": [],
- "lab_name": None,
- "kernel": None,
- "job": None,
- "git_branch": "kselftest",
- }
- test_cases = []
- # TODO: Add attachments attribute in test_case with detailed
- # failure message, see https://api.kernelci.org/schema-test-case.html#get
- for case in test_suite.cases:
- test_case = {"name": case.name, "status": "FAIL"}
- if case.status == TestStatus.SUCCESS:
+from kunit_parser import Test, TestResult, TestStatus
+from typing import Any, Dict
+
+JsonObj = Dict[str, Any]
+
+def _get_group_json(test: Test, def_config: str, build_dir: str) -> JsonObj:
+ sub_groups = [] # List[JsonObj]
+ test_cases = [] # List[JsonObj]
+
+ for subtest in test.subtests:
+ if len(subtest.subtests):
+ sub_group = _get_group_json(subtest, def_config,
+ build_dir)
+ sub_groups.append(sub_group)
+ else:
+ test_case = {"name": subtest.name, "status": "FAIL"}
+ if subtest.status == TestStatus.SUCCESS:
test_case["status"] = "PASS"
- elif case.status == TestStatus.TEST_CRASHED:
+ elif subtest.status == TestStatus.TEST_CRASHED:
test_case["status"] = "ERROR"
test_cases.append(test_case)
- sub_group["test_cases"] = test_cases
- sub_groups.append(sub_group)
+
test_group = {
- "name": "KUnit Test Group",
+ "name": test.name,
"arch": "UM",
"defconfig": def_config,
"build_environment": build_dir,
"sub_groups": sub_groups,
+ "test_cases": test_cases,
"lab_name": None,
"kernel": None,
"job": None,
"git_branch": "kselftest",
}
+ return test_group
+
+def get_json_result(test_result: TestResult, def_config: str, build_dir: str,
+ json_path: str) -> str:
+ test_group = _get_group_json(test_result.test, def_config, build_dir)
+ test_group["name"] = "KUnit Test Group"
json_obj = json.dumps(test_group, indent=4)
if json_path != 'stdout':
with open(json_path, 'w') as result_path:
diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py
index 6310a641b151..4b6086159c7f 100644
--- a/tools/testing/kunit/kunit_parser.py
+++ b/tools/testing/kunit/kunit_parser.py
@@ -1,11 +1,15 @@
# SPDX-License-Identifier: GPL-2.0
#
-# Parses test results from a kernel dmesg log.
+# Parses KTAP test results from a kernel dmesg log and incrementally prints
+# results with reader-friendly format. Stores and returns test results in a
+# Test object.
#
# Copyright (C) 2019, Google LLC.
# Author: Felix Guo <felixguoxiuping(a)gmail.com>
# Author: Brendan Higgins <brendanhiggins(a)google.com>
+# Author: Rae Moar <rmoar(a)google.com>
+from __future__ import annotations
import re
from collections import namedtuple
@@ -14,33 +18,55 @@ from enum import Enum, auto
from functools import reduce
from typing import Iterable, Iterator, List, Optional, Tuple
-TestResult = namedtuple('TestResult', ['status','suites','log'])
-
-class TestSuite(object):
- def __init__(self) -> None:
- self.status = TestStatus.SUCCESS
- self.name = ''
- self.cases = [] # type: List[TestCase]
-
- def __str__(self) -> str:
- return 'TestSuite(' + str(self.status) + ',' + self.name + ',' + str(self.cases) + ')'
-
- def __repr__(self) -> str:
- return str(self)
-
-class TestCase(object):
+TestResult = namedtuple('TestResult', ['status','test','log'])
+
+class Test(object):
+ """
+ A class to represent a test parsed from KTAP results. All KTAP
+ results within a test log are stored in a main Test object as
+ subtests.
+
+ Attributes:
+ status : TestStatus - status of the test
+ name : str - name of the test
+ expected_count : int - expected number of subtests (0 if single
+ test case and None if unknown expected number of subtests)
+ subtests : List[Test] - list of subtests
+ log : List[str] - log of KTAP lines that correspond to the test
+ counts : TestCounts - counts of the test statuses and errors of
+ subtests or of the test itself if the test is a single
+ test case.
+ """
def __init__(self) -> None:
+ """Constructs the default attributes of a Test class object.
+ """
self.status = TestStatus.SUCCESS
self.name = ''
+ self.expected_count = 0 # type: Optional[int]
+ self.subtests = [] # type: List[Test]
self.log = [] # type: List[str]
+ self.counts = TestCounts()
def __str__(self) -> str:
- return 'TestCase(' + str(self.status) + ',' + self.name + ',' + str(self.log) + ')'
+ """Returns string representation of a Test class object."""
+ return ('Test(' + str(self.status) + ', ' + self.name +
+ ', ' + str(self.expected_count) + ', ' +
+ str(self.subtests) + ', ' + str(self.log) + ', ' +
+ str(self.counts) + ')')
def __repr__(self) -> str:
+ """Returns string representation of a Test class object."""
return str(self)
+ def add_error(self, error_message: str) -> None:
+ """Adds error to test object by incrementing the error count
+ and printing the error message.
+ """
+ self.counts.errors += 1
+ print_error('Test ' + self.name + ': ' + error_message)
+
class TestStatus(Enum):
+ """An enumeration class to represent the status of a test."""
SUCCESS = auto()
FAILURE = auto()
SKIPPED = auto()
@@ -48,381 +74,769 @@ class TestStatus(Enum):
NO_TESTS = auto()
FAILURE_TO_PARSE_TESTS = auto()
+class TestCounts:
+ """
+ A class to represent the counts of statuses and test errors of
+ subtests or of the test itself if the test is a single test case with
+ no subtests. Note that the sum of the counts of passed, failed,
+ crashed, and skipped should sum to the total number of subtests for
+ the test.
+
+ Attributes:
+ passed : int - the number of tests that have passed
+ failed : int - the number of tests that have failed
+ crashed : int - the number of tests that have crashed
+ skipped : int - the number of tests that have skipped
+ errors : int - the number of errors in the test and subtests
+ """
+ def __init__(self):
+ """Contructs the default attributes of a TestCounts class
+ object. Sets the counts of all test statuses and test
+ errors to be 0.
+ """
+ self.passed = 0
+ self.failed = 0
+ self.crashed = 0
+ self.skipped = 0
+ self.errors = 0
+
+ def __str__(self) -> str:
+ """Returns the string representation of a TestCounts object.
+ """
+ return ('Passed: ' + str(self.passed) +
+ ', Failed: ' + str(self.failed) +
+ ', Crashed: ' + str(self.crashed) +
+ ', Skipped: ' + str(self.skipped) +
+ ', Errors: ' + str(self.errors))
+
+ def total(self) -> int:
+ """Returns total number of subtests or 1 if the test object
+ has no subtests to represent the test itself. This number is
+ calculated by the sum of the passed, failed, crashed, and
+ skipped subtests.
+ """
+ return (self.passed + self.failed + self.crashed +
+ self.skipped)
+
+ def add_subtest_counts(self, counts: TestCounts) -> None:
+ """
+ Adds the counts of another TestCounts object to the current
+ TestCounts object. Used to add the counts of a subtest to the
+ parent test.
+
+ Parameters:
+ counts - a different TestCounts object whose counts
+ will be added to the counts of the TestCounts object
+ """
+ self.passed += counts.passed
+ self.failed += counts.failed
+ self.crashed += counts.crashed
+ self.skipped += counts.skipped
+ self.errors += counts.errors
+
+ def get_status(self) -> TestStatus:
+ """Returns the expected status of a Test using test counts."""
+ if self.crashed:
+ # If one of the subtests crash, the expected status
+ # of the Test is crashed.
+ return TestStatus.TEST_CRASHED
+ elif self.failed:
+ # Otherwise if one of the subtests fail, the
+ # expected status of the Test is failed.
+ return TestStatus.FAILURE
+ elif self.passed:
+ # Otherwise if one of the subtests pass, the
+ # expected status of the Test is passed.
+ return TestStatus.SUCCESS
+ else:
+ # Finally, if none of the subtests have failed,
+ # crashed, or passed, the expected status of the
+ # Test is skipped.
+ return TestStatus.SKIPPED
+
+ def add_status(self, status: TestStatus) -> None:
+ """
+ Given inputted status, increments corresponding attribute of
+ TestCounts object.
+
+ Parameters:
+ status - status to be added to the TestCounts object
+ """
+ if status == TestStatus.SUCCESS or \
+ status == TestStatus.NO_TESTS:
+ # if status is NO_TESTS the most appropriate
+ # attribute to increment is passed because
+ # the test did not fail, crash or get skipped.
+ self.passed += 1
+ elif status == TestStatus.FAILURE:
+ self.failed += 1
+ elif status == TestStatus.SKIPPED:
+ self.skipped += 1
+ else:
+ self.crashed += 1
+
class LineStream:
- """Provides a peek()/pop() interface over an iterator of (line#, text)."""
+ """
+ A class to represent the lines of kernel output.
+ Provides a peek()/pop() interface over an iterator of
+ (line#, text).
+ """
_lines: Iterator[Tuple[int, str]]
_next: Tuple[int, str]
_done: bool
def __init__(self, lines: Iterator[Tuple[int, str]]):
+ """Set defaults for LineStream object and sets _lines
+ attribute to lines parameter.
+ """
self._lines = lines
self._done = False
self._next = (0, '')
self._get_next()
def _get_next(self) -> None:
+ """Advances the LineSteam to the next line or sets the _done
+ attribute if the LineStream has reached the end of the lines.
+ """
try:
self._next = next(self._lines)
except StopIteration:
self._done = True
def peek(self) -> str:
+ """Returns the next line in the LineStream without advancing
+ the LineStream.
+ """
return self._next[1]
def pop(self) -> str:
+ """Returns the next line in the LineStream and advances the
+ LineStream to the next line.
+ """
n = self._next
self._get_next()
return n[1]
def __bool__(self) -> bool:
+ """Returns whether the LineStream has reached the end of the
+ lines.
+ """
return not self._done
# Only used by kunit_tool_test.py.
def __iter__(self) -> Iterator[str]:
+ """Empties all lines stored in LineStream object into
+ Iterator object and returns the Iterator object.
+ """
while bool(self):
yield self.pop()
def line_number(self) -> int:
+ """Returns the line number of the next line in the
+ LineStream.
+ """
return self._next[0]
-kunit_start_re = re.compile(r'TAP version [0-9]+$')
-kunit_end_re = re.compile('(List of all partitions:|'
- 'Kernel panic - not syncing: VFS:|reboot: System halted)')
+# Parsing helper methods:
+
+KTAP_START = re.compile(r'KTAP version ([0-9]+)$')
+TAP_START = re.compile(r'TAP version ([0-9]+)$')
+KTAP_END = re.compile('(List of all partitions:|'
+ 'Kernel panic - not syncing: VFS:|reboot: System halted)')
def extract_tap_lines(kernel_output: Iterable[str]) -> LineStream:
- def isolate_kunit_output(kernel_output: Iterable[str]) -> Iterator[Tuple[int, str]]:
+ """Extracts KTAP lines from inputted kernel output in LineStream
+ object."""
+ def isolate_ktap_output(kernel_output: Iterable[str]) \
+ -> Iterator[Tuple[int, str]]:
line_num = 0
started = False
for line in kernel_output:
line_num += 1
- line = line.rstrip() # line always has a trailing \n
- if kunit_start_re.search(line):
+ line = line.rstrip() # remove trailing \n
+ if not started and KTAP_START.search(line):
+ # start extracting KTAP lines and set prefix
+ # to number of characters before version line
+ prefix_len = len(
+ line.split('KTAP version')[0])
+ started = True
+ yield line_num, line[prefix_len:]
+ elif not started and TAP_START.search(line):
+ # start extracting KTAP lines and set prefix
+ # to number of characters before version line
prefix_len = len(line.split('TAP version')[0])
started = True
yield line_num, line[prefix_len:]
- elif kunit_end_re.search(line):
+ elif started and KTAP_END.search(line):
+ # stop extracting KTAP lines
break
elif started:
- yield line_num, line[prefix_len:]
- return LineStream(lines=isolate_kunit_output(kernel_output))
-
-DIVIDER = '=' * 60
-
-RESET = '\033[0;0m'
-
-def red(text) -> str:
- return '\033[1;31m' + text + RESET
-
-def yellow(text) -> str:
- return '\033[1;33m' + text + RESET
-
-def green(text) -> str:
- return '\033[1;32m' + text + RESET
-
-def print_with_timestamp(message) -> None:
- print('[%s] %s' % (datetime.now().strftime('%H:%M:%S'), message))
-
-def format_suite_divider(message) -> str:
- return '======== ' + message + ' ========'
-
-def print_suite_divider(message) -> None:
- print_with_timestamp(DIVIDER)
- print_with_timestamp(format_suite_divider(message))
-
-def print_log(log) -> None:
- for m in log:
- print_with_timestamp(m)
-
-TAP_ENTRIES = re.compile(r'^(TAP|[\s]*ok|[\s]*not ok|[\s]*[0-9]+\.\.[0-9]+|[\s]*# (Subtest:|.*: kunit test case crashed!)).*$')
-
-def consume_non_diagnostic(lines: LineStream) -> None:
- while lines and not TAP_ENTRIES.match(lines.peek()):
- lines.pop()
-
-def save_non_diagnostic(lines: LineStream, test_case: TestCase) -> None:
- while lines and not TAP_ENTRIES.match(lines.peek()):
- test_case.log.append(lines.peek())
- lines.pop()
-
-OkNotOkResult = namedtuple('OkNotOkResult', ['is_ok','description', 'text'])
-
-OK_NOT_OK_SKIP = re.compile(r'^[\s]*(ok|not ok) [0-9]+ - (.*) # SKIP(.*)$')
-
-OK_NOT_OK_SUBTEST = re.compile(r'^[\s]+(ok|not ok) [0-9]+ - (.*)$')
-
-OK_NOT_OK_MODULE = re.compile(r'^(ok|not ok) ([0-9]+) - (.*)$')
-
-def parse_ok_not_ok_test_case(lines: LineStream, test_case: TestCase) -> bool:
- save_non_diagnostic(lines, test_case)
- if not lines:
- test_case.status = TestStatus.TEST_CRASHED
- return True
- line = lines.peek()
- match = OK_NOT_OK_SUBTEST.match(line)
- while not match and lines:
- line = lines.pop()
- match = OK_NOT_OK_SUBTEST.match(line)
- if match:
- test_case.log.append(lines.pop())
- test_case.name = match.group(2)
- skip_match = OK_NOT_OK_SKIP.match(line)
- if skip_match:
- test_case.status = TestStatus.SKIPPED
- return True
- if test_case.status == TestStatus.TEST_CRASHED:
- return True
- if match.group(1) == 'ok':
- test_case.status = TestStatus.SUCCESS
- else:
- test_case.status = TestStatus.FAILURE
- return True
+ # remove prefix and any indention and yield
+ # line with line number
+ line = line[prefix_len:].lstrip()
+ yield line_num, line
+ return LineStream(lines=isolate_ktap_output(kernel_output))
+
+def raw_output(kernel_output: Iterable[str]) -> None:
+ """Prints all lines of kernel output."""
+ for line in kernel_output:
+ print(line.rstrip())
+
+KTAP_VERSIONS = [1]
+TAP_VERSIONS = [13, 14]
+
+def check_version(version_num: int, accepted_versions: List[int],
+ version_type: str, test: Test) -> None:
+ """
+ Adds error to test object if version number is too high or too
+ low.
+
+ Parameters:
+ version_num - The inputted version number from the parsed KTAP or TAP
+ header line
+ accepted_version - List of accepted KTAP or TAP versions
+ version_type - 'KTAP' or 'TAP' depending on the type of
+ version line.
+ test - Test object for current test being parsed
+ """
+ if version_num < min(accepted_versions):
+ test.add_error(version_type +
+ ' version lower than expected!')
+ elif version_num > max(accepted_versions):
+ test.add_error(
+ version_type + ' version higher than expected!')
+
+def parse_ktap_header(lines: LineStream, test: Test) -> bool:
+ """
+ If the next line in LineStream matches the format of KTAP or TAP
+ header line, the version number is checked, the line is popped,
+ and returns True. Otherwise the method returns False.
+
+ Accepted formats:
+ - 'KTAP version [version number]'
+ - 'TAP version [version number]'
+
+ Parameters:
+ lines - LineStream of KTAP output to parse
+ test - Test object for current test being parsed
+
+ Return:
+ Boolean that represents if the next line in the LineStream was parsed
+ as the KTAP or TAP header line
+ """
+ ktap_match = KTAP_START.match(lines.peek())
+ tap_match = TAP_START.match(lines.peek())
+ if ktap_match:
+ version_num = int(ktap_match.group(1))
+ check_version(version_num, KTAP_VERSIONS, 'KTAP', test)
+ elif tap_match:
+ version_num = int(tap_match.group(1))
+ check_version(version_num, TAP_VERSIONS, 'TAP', test)
else:
return False
-
-SUBTEST_DIAGNOSTIC = re.compile(r'^[\s]+# (.*)$')
-DIAGNOSTIC_CRASH_MESSAGE = re.compile(r'^[\s]+# .*?: kunit test case crashed!$')
-
-def parse_diagnostic(lines: LineStream, test_case: TestCase) -> bool:
- save_non_diagnostic(lines, test_case)
- if not lines:
+ test.log.append(lines.pop())
+ return True
+
+TEST_HEADER = re.compile(r'^# Subtest: (.*)$')
+
+def parse_test_header(lines: LineStream, test: Test) -> bool:
+ """
+ If the next line in LineStream matches the format of a test
+ header line, the name of test is set, the line is popped,
+ and returns True. Otherwise the method returns False.
+
+ Accepted format:
+ - '# Subtest: [test name]'
+
+ Parameters:
+ lines - LineStream of ktap output to parse
+ test - Test object for current test being parsed
+
+ Return:
+ Boolean that represents if the next line in the LineStream was parsed
+ as a test header
+ """
+ match = TEST_HEADER.match(lines.peek())
+ if not match:
return False
+ test.log.append(lines.pop())
+ test.name = match.group(1)
+ return True
+
+TEST_PLAN = re.compile(r'1\.\.([0-9]+)')
+
+def parse_test_plan(lines: LineStream, test: Test) -> bool:
+ """
+ If the next line in LineStream matches the format of a test
+ plan line, the expected number of subtests is set in test object, an
+ error is thrown if there are 0 tests, the line is popped,
+ and returns True. Otherwise the method adds an error that the test
+ plan is missing to the test object and returns False.
+
+ Accepted format:
+ - '1..[number of subtests]'
+
+ Parameters:
+ lines - LineStream of ktap output to parse
+ test - Test object for current test being parsed
+
+ Return:
+ Boolean that represents if the next line in the LineStream was parsed
+ as a test plan
+ """
+ match = TEST_PLAN.match(lines.peek())
+ if not match:
+ test.expected_count = None
+ test.add_error('missing plan line!')
+ return False
+ test.log.append(lines.pop())
+ expected_count = int(match.group(1))
+ test.expected_count = expected_count
+ if expected_count == 0:
+ test.status = TestStatus.NO_TESTS
+ test.add_error('0 tests run!')
+ return True
+
+TEST_RESULT = re.compile(r'^(ok|not ok) ([0-9]+) (- )?(.*)$')
+
+TEST_RESULT_SKIP = re.compile(r'^(ok|not ok) ([0-9]+) (- )?(.*) # SKIP(.*)$')
+
+def peek_test_name_match(lines: LineStream, test: Test) -> bool:
+ """
+ If the next line in LineStream matches the format of a test
+ result line and the name of the result line matches the name of the
+ current test, the method returns True. Otherwise it returns False.
+
+ Accepted format:
+ - '[ok|not ok] [test number] [-] [test name] [optional skip
+ directive]'
+
+ Parameters:
+ lines - LineStream of KTAP output to parse
+ test - Test object for current test being parsed
+
+ Return:
+ Boolean that represents if the next line in the LineStream matched a
+ test result line and the name matched the expected test name
+ """
line = lines.peek()
- match = SUBTEST_DIAGNOSTIC.match(line)
- if match:
- test_case.log.append(lines.pop())
- crash_match = DIAGNOSTIC_CRASH_MESSAGE.match(line)
- if crash_match:
- test_case.status = TestStatus.TEST_CRASHED
- return True
- else:
+ match = TEST_RESULT.match(line)
+ if not match:
return False
+ name = match.group(4)
+ return (name == test.name)
+
+def parse_test_result(lines: LineStream, test: Test,
+ expected_num: int) -> bool:
+ """
+ If the next line in LineStream matches the format of a test
+ result line, the status in the result line is added to the test
+ object, the test number is checked to match the expected test number
+ and if not an error is added to the test object, and returns True.
+ Otherwise it returns False.
+
+ Note that the skip diirective is the only
+ directive that causes a change in status and otherwise the directive
+ is included in the name of the test.
+
+ Accepted format:
+ - '[ok|not ok] [test number] [-] [test name] [optional skip
+ directive]'
+
+ Parameters:
+ lines - LineStream of KTAP output to parse
+ test - Test object for current test being parsed
+ expected_num - expected test number for current test
+
+ Return:
+ Boolean that represents if the next line in the LineStream was parsed
+ as a test result line.
+ """
+ line = lines.peek()
+ match = TEST_RESULT.match(line)
+ skip_match = TEST_RESULT_SKIP.match(line)
-def parse_test_case(lines: LineStream) -> Optional[TestCase]:
- test_case = TestCase()
- save_non_diagnostic(lines, test_case)
- while parse_diagnostic(lines, test_case):
- pass
- if parse_ok_not_ok_test_case(lines, test_case):
- return test_case
- else:
- return None
-
-SUBTEST_HEADER = re.compile(r'^[\s]+# Subtest: (.*)$')
+ # Check if line matches test result line format
+ if not match:
+ return False
+ test.log.append(lines.pop())
-def parse_subtest_header(lines: LineStream) -> Optional[str]:
- consume_non_diagnostic(lines)
- if not lines:
- return None
- match = SUBTEST_HEADER.match(lines.peek())
- if match:
- lines.pop()
- return match.group(1)
+ # Set name of test object
+ if skip_match:
+ test.name = skip_match.group(4)
else:
- return None
+ test.name = match.group(4)
-SUBTEST_PLAN = re.compile(r'[\s]+[0-9]+\.\.([0-9]+)')
+ # Check test num
+ num = int(match.group(2))
+ if num != expected_num:
+ test.add_error('Expected test number ' +
+ str(expected_num) + ' but found ' + str(num))
-def parse_subtest_plan(lines: LineStream) -> Optional[int]:
- consume_non_diagnostic(lines)
- match = SUBTEST_PLAN.match(lines.peek())
- if match:
- lines.pop()
- return int(match.group(1))
- else:
- return None
-
-def max_status(left: TestStatus, right: TestStatus) -> TestStatus:
- if left == right:
- return left
- elif left == TestStatus.TEST_CRASHED or right == TestStatus.TEST_CRASHED:
- return TestStatus.TEST_CRASHED
- elif left == TestStatus.FAILURE or right == TestStatus.FAILURE:
- return TestStatus.FAILURE
- elif left == TestStatus.SKIPPED:
- return right
- else:
- return left
-
-def parse_ok_not_ok_test_suite(lines: LineStream,
- test_suite: TestSuite,
- expected_suite_index: int) -> bool:
- consume_non_diagnostic(lines)
- if not lines:
- test_suite.status = TestStatus.TEST_CRASHED
- return False
- line = lines.peek()
- match = OK_NOT_OK_MODULE.match(line)
- if match:
- lines.pop()
- if match.group(1) == 'ok':
- test_suite.status = TestStatus.SUCCESS
- else:
- test_suite.status = TestStatus.FAILURE
- skip_match = OK_NOT_OK_SKIP.match(line)
- if skip_match:
- test_suite.status = TestStatus.SKIPPED
- suite_index = int(match.group(2))
- if suite_index != expected_suite_index:
- print_with_timestamp(
- red('[ERROR] ') + 'expected_suite_index ' +
- str(expected_suite_index) + ', but got ' +
- str(suite_index))
+ # Set status of test object
+ status = match.group(1)
+ if test.status == TestStatus.TEST_CRASHED:
return True
+ elif skip_match:
+ test.status = TestStatus.SKIPPED
+ elif status == 'ok':
+ test.status = TestStatus.SUCCESS
else:
- return False
-
-def bubble_up_errors(status_list: Iterable[TestStatus]) -> TestStatus:
- return reduce(max_status, status_list, TestStatus.SKIPPED)
+ test.status = TestStatus.FAILURE
+ return True
+
+def parse_diagnostic(lines: LineStream) -> List[str]:
+ """
+ If the next line in LineStream does not match the format of a test
+ case line or test header line, the line is checked if the test has
+ crashed and if so adds an error message, pops the line and adds it to
+ the log.
+
+ Line formats that are not parsed:
+ - '# Subtest: [test name]'
+ - '[ok|not ok] [test number] [-] [test name] [optional skip
+ directive]'
+
+ Parameters:
+ lines - LineStream of KTAP output to parse
+
+ Return:
+ Log of diagnostic lines
+ """
+ log = [] # type: List[str]
+ while lines and not TEST_RESULT.match(lines.peek()) and not \
+ TEST_HEADER.match(lines.peek()):
+ log.append(lines.pop())
+ return log
+
+DIAGNOSTIC_CRASH_MESSAGE = re.compile(r'^# .*?: kunit test case crashed!$')
+
+def parse_crash_in_log(test: Test) -> bool:
+ """
+ Iterate through the lines of the log to parse for crash message.
+ If crash message found, set status to crashed and return True.
+ Otherwise return False.
+
+ Parameters:
+ test - Test object for current test being parsed
+
+ Return:
+ Boolean that represents if crash message found in log
+ """
+ for line in test.log:
+ if DIAGNOSTIC_CRASH_MESSAGE.match(line):
+ test.status = TestStatus.TEST_CRASHED
+ return True
+ return False
-def bubble_up_test_case_errors(test_suite: TestSuite) -> TestStatus:
- max_test_case_status = bubble_up_errors(x.status for x in test_suite.cases)
- return max_status(max_test_case_status, test_suite.status)
+# Printing helper methods:
-def parse_test_suite(lines: LineStream, expected_suite_index: int) -> Optional[TestSuite]:
- if not lines:
- return None
- consume_non_diagnostic(lines)
- test_suite = TestSuite()
- test_suite.status = TestStatus.SUCCESS
- name = parse_subtest_header(lines)
- if not name:
- return None
- test_suite.name = name
- expected_test_case_num = parse_subtest_plan(lines)
- if expected_test_case_num is None:
- return None
- while expected_test_case_num > 0:
- test_case = parse_test_case(lines)
- if not test_case:
- break
- test_suite.cases.append(test_case)
- expected_test_case_num -= 1
- if parse_ok_not_ok_test_suite(lines, test_suite, expected_suite_index):
- test_suite.status = bubble_up_test_case_errors(test_suite)
- return test_suite
- elif not lines:
- print_with_timestamp(red('[ERROR] ') + 'ran out of lines before end token')
- return test_suite
- else:
- print(f'failed to parse end of suite "{name}", at line {lines.line_number()}: {lines.peek()}')
- return None
+DIVIDER = '=' * 60
-TAP_HEADER = re.compile(r'^TAP version 14$')
+RESET = '\033[0;0m'
-def parse_tap_header(lines: LineStream) -> bool:
- consume_non_diagnostic(lines)
- if TAP_HEADER.match(lines.peek()):
- lines.pop()
- return True
- else:
- return False
+def red(text: str) -> str:
+ """Returns inputted string with red color code."""
+ return '\033[1;31m' + text + RESET
-TEST_PLAN = re.compile(r'[0-9]+\.\.([0-9]+)')
+def yellow(text: str) -> str:
+ """Returns inputted string with yellow color code."""
+ return '\033[1;33m' + text + RESET
-def parse_test_plan(lines: LineStream) -> Optional[int]:
- consume_non_diagnostic(lines)
- match = TEST_PLAN.match(lines.peek())
- if match:
- lines.pop()
- return int(match.group(1))
- else:
- return None
-
-def bubble_up_suite_errors(test_suites: Iterable[TestSuite]) -> TestStatus:
- return bubble_up_errors(x.status for x in test_suites)
-
-def parse_test_result(lines: LineStream) -> TestResult:
- consume_non_diagnostic(lines)
- if not lines or not parse_tap_header(lines):
- return TestResult(TestStatus.FAILURE_TO_PARSE_TESTS, [], lines)
- expected_test_suite_num = parse_test_plan(lines)
- if expected_test_suite_num == 0:
- return TestResult(TestStatus.NO_TESTS, [], lines)
- elif expected_test_suite_num is None:
- return TestResult(TestStatus.FAILURE_TO_PARSE_TESTS, [], lines)
- test_suites = []
- for i in range(1, expected_test_suite_num + 1):
- test_suite = parse_test_suite(lines, i)
- if test_suite:
- test_suites.append(test_suite)
- else:
- print_with_timestamp(
- red('[ERROR] ') + ' expected ' +
- str(expected_test_suite_num) +
- ' test suites, but got ' + str(i - 2))
- break
- test_suite = parse_test_suite(lines, -1)
- if test_suite:
- print_with_timestamp(red('[ERROR] ') +
- 'got unexpected test suite: ' + test_suite.name)
- if test_suites:
- return TestResult(bubble_up_suite_errors(test_suites), test_suites, lines)
- else:
- return TestResult(TestStatus.NO_TESTS, [], lines)
+def green(text: str) -> str:
+ """Returns inputted string with green color code."""
+ return '\033[1;32m' + text + RESET
-class TestCounts:
- passed: int
- failed: int
- crashed: int
- skipped: int
+ANSI_LEN = len(red(''))
- def __init__(self):
- self.passed = 0
- self.failed = 0
- self.crashed = 0
- self.skipped = 0
+def print_with_timestamp(message: str) -> None:
+ """Prints message with timestamp at beginning."""
+ print('[%s] %s' % (datetime.now().strftime('%H:%M:%S'), message))
- def total(self) -> int:
- return self.passed + self.failed + self.crashed + self.skipped
-
-def print_and_count_results(test_result: TestResult) -> TestCounts:
- counts = TestCounts()
- for test_suite in test_result.suites:
- if test_suite.status == TestStatus.SUCCESS:
- print_suite_divider(green('[PASSED] ') + test_suite.name)
- elif test_suite.status == TestStatus.SKIPPED:
- print_suite_divider(yellow('[SKIPPED] ') + test_suite.name)
- elif test_suite.status == TestStatus.TEST_CRASHED:
- print_suite_divider(red('[CRASHED] ' + test_suite.name))
+def format_test_divider(message: str, len_message: int) -> str:
+ """
+ Returns string with message centered in fixed width divider.
+
+ Example:
+ '===================== message example ====================='
+
+ Parameters:
+ message - message to be centered in divider line
+ len_message - length of the message to be printed such that
+ any characters of the color codes are not counted
+
+ Return:
+ String containing message centered in fixed width divider
+ """
+ default_count = 3 # default number of dashes
+ len_1 = default_count
+ len_2 = default_count
+ difference = len(DIVIDER) - len_message - 2 # 2 spaces added
+ if difference > 0:
+ # calculate number of dashes for each side of the divider
+ len_1 = int(difference / 2)
+ len_2 = difference - len_1
+ return ('=' * len_1) + ' ' + message + ' ' + ('=' * len_2)
+
+def print_test_header(test: Test) -> None:
+ """
+ Prints test header with test name and optionally the expected number
+ of subtests.
+
+ Example:
+ '=================== example (2 subtests) ==================='
+
+ Parameters:
+ test - Test object representing current test being printed
+ """
+ message = test.name
+ if test.expected_count:
+ message += ' (' + str(test.expected_count) + ' subtests)'
+ print_with_timestamp(format_test_divider(message, len(message)))
+
+def print_log(log: Iterable[str]) -> None:
+ """
+ Prints all strings in saved log for test in yellow.
+
+ Parameters:
+ log - Iterable object with all strings saved in log for test
+ """
+ for m in log:
+ print_with_timestamp(yellow(m))
+ print_with_timestamp('')
+
+def format_test_result(test: Test) -> str:
+ """
+ Returns string with formatted test result with colored status and test
+ name.
+
+ Example:
+ '[PASSED] example'
+
+ Parameters:
+ test - Test object representing current test being printed
+
+ Return:
+ String containing formatted test result
+ """
+ if test.status == TestStatus.SUCCESS:
+ return (green('[PASSED] ') + test.name)
+ elif test.status == TestStatus.SKIPPED:
+ return (yellow('[SKIPPED] ') + test.name)
+ elif test.status == TestStatus.TEST_CRASHED:
+ print_log(test.log)
+ return (red('[CRASHED] ') + test.name)
+ else:
+ print_log(test.log)
+ return (red('[FAILED] ') + test.name)
+
+def print_test_result(test: Test) -> None:
+ """
+ Prints result line with status of test.
+
+ Example:
+ '[PASSED] example'
+
+ Parameters:
+ test - Test object representing current test being printed
+ """
+ print_with_timestamp(format_test_result(test))
+
+def print_test_footer(test: Test) -> None:
+ """
+ Prints test footer with status of test.
+
+ Example:
+ '===================== [PASSED] example ====================='
+
+ Parameters:
+ test - Test object representing current test being printed
+ """
+ message = format_test_result(test)
+ print_with_timestamp(format_test_divider(message,
+ len(message) - ANSI_LEN))
+
+def print_summary_line(test: Test) -> None:
+ """
+ Prints summary line of test object. Color of line is dependent on
+ status of test. Color is green if test passes, yellow if test is
+ skipped, and red if the test fails or crashes. Summary line contains
+ counts of the statuses of the tests subtests or the test itself if it
+ has no subtests.
+
+ Example:
+ "Testing complete. Passed: 2, Failed: 0, Crashed: 0, Skipped: 0,
+ Errors: 0"
+
+ test - Test object representing current test being printed
+ """
+ if test.status == TestStatus.SUCCESS or \
+ test.status == TestStatus.NO_TESTS:
+ color = green
+ elif test.status == TestStatus.SKIPPED:
+ color = yellow
+ else:
+ color = red
+ counts = test.counts
+ print_with_timestamp(color('Testing complete. ' + str(counts)))
+
+def print_error(error_message: str) -> None:
+ """
+ Prints error message with error format.
+
+ Example:
+ "[ERROR] Test example: missing test plan!"
+
+ Parameters:
+ error_message - message describing error
+ """
+ print_with_timestamp(red('[ERROR] ') + error_message)
+
+# Other methods:
+
+def bubble_up_test_results(test: Test) -> None:
+ """
+ If the test has subtests, add the test counts of the subtests to the
+ test and check if any of the tests crashed and if so set the test
+ status to crashed. Otherwise if the test has no subtests add the
+ status of the test to the test counts.
+
+ Parameters:
+ test - Test object for current test being parsed
+ """
+ parse_crash_in_log(test)
+ subtests = test.subtests
+ counts = test.counts
+ status = test.status
+ for t in subtests:
+ counts.add_subtest_counts(t.counts)
+ if counts.total() == 0:
+ counts.add_status(status)
+ elif test.counts.get_status() == TestStatus.TEST_CRASHED:
+ test.status = TestStatus.TEST_CRASHED
+
+def parse_test(lines: LineStream, expected_num: int, log: List[str]) -> Test:
+ """
+ Finds next test to parse in LineStream, creates new Test object,
+ parses any subtests of the test, populates Test object with all
+ information (status, name) about the test and the Test objects for
+ any subtests, and then returns the Test object. The method accepts
+ three formats of tests:
+
+ Accepted test formats:
+
+ - Main KTAP/TAP header
+
+ Example:
+
+ KTAP version 1
+ 1..4
+ [subtests]
+
+ - Subtest header line
+
+ Example:
+
+ # Subtest: name
+ 1..3
+ [subtests]
+ ok 1 name
+
+ - Test result line
+
+ Example:
+
+ ok 1 - test
+
+ Parameters:
+ lines - LineStream of KTAP output to parse
+ expected_num - expected test number for test to be parsed
+ log - list of strings containing any preceding diagnostic lines
+ corresponding to the current test
+
+ Return:
+ Test object populated with characteristics and any subtests
+ """
+ test = Test()
+ test.log.extend(log)
+ parent_test = False
+ main = parse_ktap_header(lines, test)
+ if main:
+ # If KTAP/TAP header is found, attempt to parse
+ # test plan
+ test.name = "main"
+ parse_test_plan(lines, test)
+ else:
+ # If KTAP/TAP header is not found, test must be subtest
+ # header or test result line so parse attempt to parser
+ # subtest header
+ parent_test = parse_test_header(lines, test)
+ if parent_test:
+ # If subtest header is found, attempt to parse
+ # test plan and print header
+ parse_test_plan(lines, test)
+ print_test_header(test)
+ expected_count = test.expected_count
+ subtests = []
+ test_num = 1
+ while main or expected_count is None or test_num <= expected_count:
+ # Loop to parse any subtests.
+ # If test is main test, do not break until no lines left.
+ # Otherwise, break after parsing expected number of tests or
+ # if expected number of tests is unknown break when found
+ # test result line with matching name to subtest header.
+ if not lines:
+ if expected_count and test_num <= expected_count:
+ test.add_error('missing expected subtests!')
+ break
+ sub_log = parse_diagnostic(lines)
+ if not expected_count and not main and \
+ peek_test_name_match(lines, test):
+ test.log.extend(sub_log)
+ break
+ subtests.append(parse_test(lines, test_num, sub_log))
+ test_num += 1
+ test.subtests = subtests
+ if not main:
+ # If not main test, look for test result line
+ test.log.extend(parse_diagnostic(lines))
+ if (parent_test and peek_test_name_match(lines, test)) or \
+ not parent_test:
+ parse_test_result(lines, test, expected_num)
else:
- print_suite_divider(red('[FAILED] ') + test_suite.name)
- for test_case in test_suite.cases:
- if test_case.status == TestStatus.SUCCESS:
- counts.passed += 1
- print_with_timestamp(green('[PASSED] ') + test_case.name)
- elif test_case.status == TestStatus.SKIPPED:
- counts.skipped += 1
- print_with_timestamp(yellow('[SKIPPED] ') + test_case.name)
- elif test_case.status == TestStatus.TEST_CRASHED:
- counts.crashed += 1
- print_with_timestamp(red('[CRASHED] ' + test_case.name))
- print_log(map(yellow, test_case.log))
- print_with_timestamp('')
- else:
- counts.failed += 1
- print_with_timestamp(red('[FAILED] ') + test_case.name)
- print_log(map(yellow, test_case.log))
- print_with_timestamp('')
- return counts
+ test.add_error('missing subtest result line!')
+ # Add statuses to TestCounts attribute in Test object
+ bubble_up_test_results(test)
+ if parent_test:
+ # If test has subtests and is not the main test object, print
+ # footer.
+ print_test_footer(test)
+ elif not main:
+ print_test_result(test)
+ return test
def parse_run_tests(kernel_output: Iterable[str]) -> TestResult:
- counts = TestCounts()
+ """
+ Using kernel output, extract KTAP lines, parse the lines for test
+ results and print condensed test results and summary line .
+
+ Parameters:
+ kernel_output - Iterable object contains lines of kernel output
+
+ Return:
+ TestResult - Tuple containg status of main test object, main test
+ object with all subtests, and log of all KTAP lines.
+ """
+ print_with_timestamp(DIVIDER)
lines = extract_tap_lines(kernel_output)
- test_result = parse_test_result(lines)
- if test_result.status == TestStatus.NO_TESTS:
- print(red('[ERROR] ') + yellow('no tests run!'))
- elif test_result.status == TestStatus.FAILURE_TO_PARSE_TESTS:
- print(red('[ERROR] ') + yellow('could not parse test results!'))
+ test = Test()
+ if not lines:
+ test.add_error('invalid KTAP input!')
+ test.status = TestStatus.FAILURE_TO_PARSE_TESTS
else:
- counts = print_and_count_results(test_result)
+ test = parse_test(lines, 0, [])
+ if test.status != TestStatus.NO_TESTS:
+ test.status = test.counts.get_status()
print_with_timestamp(DIVIDER)
- if test_result.status == TestStatus.SUCCESS:
- fmt = green
- elif test_result.status == TestStatus.SKIPPED:
- fmt = yellow
- else:
- fmt =red
- print_with_timestamp(
- fmt('Testing complete. %d tests run. %d failed. %d crashed. %d skipped.' %
- (counts.total(), counts.failed, counts.crashed, counts.skipped)))
- return test_result
+ print_summary_line(test)
+ return TestResult(test.status, test, lines)
diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py
index 619c4554cbff..e527b90de8ea 100755
--- a/tools/testing/kunit/kunit_tool_test.py
+++ b/tools/testing/kunit/kunit_tool_test.py
@@ -106,10 +106,10 @@ class KUnitParserTest(unittest.TestCase):
with open(log_path) as file:
result = kunit_parser.extract_tap_lines(file.readlines())
self.assertContains('TAP version 14', result)
- self.assertContains(' # Subtest: example', result)
- self.assertContains(' 1..2', result)
- self.assertContains(' ok 1 - example_simple_test', result)
- self.assertContains(' ok 2 - example_mock_test', result)
+ self.assertContains('# Subtest: example', result)
+ self.assertContains('1..2', result)
+ self.assertContains('ok 1 - example_simple_test', result)
+ self.assertContains('ok 2 - example_mock_test', result)
self.assertContains('ok 1 - example', result)
def test_output_with_prefix_isolated_correctly(self):
@@ -117,28 +117,28 @@ class KUnitParserTest(unittest.TestCase):
with open(log_path) as file:
result = kunit_parser.extract_tap_lines(file.readlines())
self.assertContains('TAP version 14', result)
- self.assertContains(' # Subtest: kunit-resource-test', result)
- self.assertContains(' 1..5', result)
- self.assertContains(' ok 1 - kunit_resource_test_init_resources', result)
- self.assertContains(' ok 2 - kunit_resource_test_alloc_resource', result)
- self.assertContains(' ok 3 - kunit_resource_test_destroy_resource', result)
- self.assertContains(' foo bar #', result)
- self.assertContains(' ok 4 - kunit_resource_test_cleanup_resources', result)
- self.assertContains(' ok 5 - kunit_resource_test_proper_free_ordering', result)
+ self.assertContains('# Subtest: kunit-resource-test', result)
+ self.assertContains('1..5', result)
+ self.assertContains('ok 1 - kunit_resource_test_init_resources', result)
+ self.assertContains('ok 2 - kunit_resource_test_alloc_resource', result)
+ self.assertContains('ok 3 - kunit_resource_test_destroy_resource', result)
+ self.assertContains('foo bar #', result)
+ self.assertContains('ok 4 - kunit_resource_test_cleanup_resources', result)
+ self.assertContains('ok 5 - kunit_resource_test_proper_free_ordering', result)
self.assertContains('ok 1 - kunit-resource-test', result)
- self.assertContains(' foo bar # non-kunit output', result)
- self.assertContains(' # Subtest: kunit-try-catch-test', result)
- self.assertContains(' 1..2', result)
- self.assertContains(' ok 1 - kunit_test_try_catch_successful_try_no_catch',
+ self.assertContains('foo bar # non-kunit output', result)
+ self.assertContains('# Subtest: kunit-try-catch-test', result)
+ self.assertContains('1..2', result)
+ self.assertContains('ok 1 - kunit_test_try_catch_successful_try_no_catch',
result)
- self.assertContains(' ok 2 - kunit_test_try_catch_unsuccessful_try_does_catch',
+ self.assertContains('ok 2 - kunit_test_try_catch_unsuccessful_try_does_catch',
result)
self.assertContains('ok 2 - kunit-try-catch-test', result)
- self.assertContains(' # Subtest: string-stream-test', result)
- self.assertContains(' 1..3', result)
- self.assertContains(' ok 1 - string_stream_test_empty_on_creation', result)
- self.assertContains(' ok 2 - string_stream_test_not_empty_after_add', result)
- self.assertContains(' ok 3 - string_stream_test_get_string', result)
+ self.assertContains('# Subtest: string-stream-test', result)
+ self.assertContains('1..3', result)
+ self.assertContains('ok 1 - string_stream_test_empty_on_creation', result)
+ self.assertContains('ok 2 - string_stream_test_not_empty_after_add', result)
+ self.assertContains('ok 3 - string_stream_test_get_string', result)
self.assertContains('ok 3 - string-stream-test', result)
def test_parse_successful_test_log(self):
@@ -148,6 +148,13 @@ class KUnitParserTest(unittest.TestCase):
self.assertEqual(
kunit_parser.TestStatus.SUCCESS,
result.status)
+ def test_parse_successful_nested_tests_log(self):
+ all_passed_log = test_data_path('test_is_test_passed-all_passed_nested.log')
+ with open(all_passed_log) as file:
+ result = kunit_parser.parse_run_tests(file.readlines())
+ self.assertEqual(
+ kunit_parser.TestStatus.SUCCESS,
+ result.status)
def test_parse_failed_test_log(self):
failed_log = test_data_path('test_is_test_passed-failure.log')
@@ -162,17 +169,31 @@ class KUnitParserTest(unittest.TestCase):
with open(empty_log) as file:
result = kunit_parser.parse_run_tests(
kunit_parser.extract_tap_lines(file.readlines()))
- self.assertEqual(0, len(result.suites))
+ self.assertEqual(0, len(result.test.subtests))
self.assertEqual(
kunit_parser.TestStatus.FAILURE_TO_PARSE_TESTS,
result.status)
+ def test_missing_test_plan(self):
+ missing_plan_log = test_data_path('test_is_test_passed-'
+ 'missing_plan.log')
+ with open(missing_plan_log) as file:
+ result = kunit_parser.parse_run_tests(
+ kunit_parser.extract_tap_lines(
+ file.readlines()))
+ self.assertEqual(2, result.test.counts.errors)
+ self.assertEqual(
+ kunit_parser.TestStatus.SUCCESS,
+ result.status)
+
def test_no_tests(self):
- empty_log = test_data_path('test_is_test_passed-no_tests_run_with_header.log')
- with open(empty_log) as file:
+ header_log = test_data_path('test_is_test_passed-'
+ 'no_tests_run_with_header.log')
+ with open(header_log) as file:
result = kunit_parser.parse_run_tests(
- kunit_parser.extract_tap_lines(file.readlines()))
- self.assertEqual(0, len(result.suites))
+ kunit_parser.extract_tap_lines(
+ file.readlines()))
+ self.assertEqual(0, len(result.test.subtests))
self.assertEqual(
kunit_parser.TestStatus.NO_TESTS,
result.status)
@@ -182,15 +203,17 @@ class KUnitParserTest(unittest.TestCase):
print_mock = mock.patch('builtins.print').start()
with open(crash_log) as file:
result = kunit_parser.parse_run_tests(
- kunit_parser.extract_tap_lines(file.readlines()))
- print_mock.assert_any_call(StrContains('could not parse test results!'))
+ kunit_parser.extract_tap_lines(
+ file.readlines()))
+ print_mock.assert_any_call(StrContains('invalid KTAP input!'))
print_mock.stop()
file.close()
def test_crashed_test(self):
crashed_log = test_data_path('test_is_test_passed-crash.log')
with open(crashed_log) as file:
- result = kunit_parser.parse_run_tests(file.readlines())
+ result = kunit_parser.parse_run_tests(
+ file.readlines())
self.assertEqual(
kunit_parser.TestStatus.TEST_CRASHED,
result.status)
@@ -216,6 +239,23 @@ class KUnitParserTest(unittest.TestCase):
result.status)
file.close()
+ def test_ignores_hyphen(self):
+ hyphen_log = test_data_path('test_strip_hyphen.log')
+ file = open(hyphen_log)
+ result = kunit_parser.parse_run_tests(file.readlines())
+
+ # A skipped test does not fail the whole suite.
+ self.assertEqual(
+ kunit_parser.TestStatus.SUCCESS,
+ result.status)
+ self.assertEqual(
+ "sysctl_test",
+ result.test.subtests[0].name)
+ self.assertEqual(
+ "example",
+ result.test.subtests[1].name)
+ file.close()
+
def test_ignores_prefix_printk_time(self):
prefix_log = test_data_path('test_config_printk_time.log')
@@ -224,7 +264,7 @@ class KUnitParserTest(unittest.TestCase):
self.assertEqual(
kunit_parser.TestStatus.SUCCESS,
result.status)
- self.assertEqual('kunit-resource-test', result.suites[0].name)
+ self.assertEqual('kunit-resource-test', result.test.subtests[0].name)
def test_ignores_multiple_prefixes(self):
prefix_log = test_data_path('test_multiple_prefixes.log')
@@ -233,7 +273,7 @@ class KUnitParserTest(unittest.TestCase):
self.assertEqual(
kunit_parser.TestStatus.SUCCESS,
result.status)
- self.assertEqual('kunit-resource-test', result.suites[0].name)
+ self.assertEqual('kunit-resource-test', result.test.subtests[0].name)
def test_prefix_mixed_kernel_output(self):
mixed_prefix_log = test_data_path('test_interrupted_tap_output.log')
@@ -242,7 +282,7 @@ class KUnitParserTest(unittest.TestCase):
self.assertEqual(
kunit_parser.TestStatus.SUCCESS,
result.status)
- self.assertEqual('kunit-resource-test', result.suites[0].name)
+ self.assertEqual('kunit-resource-test', result.test.subtests[0].name)
def test_prefix_poundsign(self):
pound_log = test_data_path('test_pound_sign.log')
@@ -251,16 +291,16 @@ class KUnitParserTest(unittest.TestCase):
self.assertEqual(
kunit_parser.TestStatus.SUCCESS,
result.status)
- self.assertEqual('kunit-resource-test', result.suites[0].name)
+ self.assertEqual('kunit-resource-test', result.test.subtests[0].name)
def test_kernel_panic_end(self):
panic_log = test_data_path('test_kernel_panic_interrupt.log')
with open(panic_log) as file:
result = kunit_parser.parse_run_tests(file.readlines())
self.assertEqual(
- kunit_parser.TestStatus.TEST_CRASHED,
+ kunit_parser.TestStatus.SUCCESS,
result.status)
- self.assertEqual('kunit-resource-test', result.suites[0].name)
+ self.assertEqual('kunit-resource-test', result.test.subtests[0].name)
def test_pound_no_prefix(self):
pound_log = test_data_path('test_pound_no_prefix.log')
@@ -269,7 +309,7 @@ class KUnitParserTest(unittest.TestCase):
self.assertEqual(
kunit_parser.TestStatus.SUCCESS,
result.status)
- self.assertEqual('kunit-resource-test', result.suites[0].name)
+ self.assertEqual('kunit-resource-test', result.test.subtests[0].name)
class LinuxSourceTreeTest(unittest.TestCase):
@@ -291,6 +331,14 @@ class LinuxSourceTreeTest(unittest.TestCase):
pass
tree = kunit_kernel.LinuxSourceTree('', kunitconfig_path=dir)
+ def test_kselftest_nested(self):
+ kselftest_log = test_data_path('test_is_test_passed-kselftest.log')
+ with open(kselftest_log) as file:
+ result = kunit_parser.parse_run_tests(file.readlines())
+ self.assertEqual(
+ kunit_parser.TestStatus.SUCCESS,
+ result.status)
+
# TODO: add more test cases.
@@ -322,6 +370,12 @@ class KUnitJsonTest(unittest.TestCase):
result = self._json_for('test_is_test_passed-no_tests_run_with_header.log')
self.assertEqual(0, len(result['sub_groups']))
+ def test_nested_json(self):
+ result = self._json_for('test_is_test_passed-all_passed_nested.log')
+ self.assertEqual(
+ {'name': 'example_simple_test', 'status': 'PASS'},
+ result["sub_groups"][0]["sub_groups"][0]["test_cases"][0])
+
class StrContains(str):
def __eq__(self, other):
return self in other
@@ -380,7 +434,7 @@ class KUnitMainTest(unittest.TestCase):
self.assertEqual(e.exception.code, 1)
self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1)
- self.print_mock.assert_any_call(StrContains(' 0 tests run'))
+ self.print_mock.assert_any_call(StrContains('invalid KTAP input!'))
def test_exec_raw_output(self):
self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
@@ -388,7 +442,7 @@ class KUnitMainTest(unittest.TestCase):
self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1)
for call in self.print_mock.call_args_list:
self.assertNotEqual(call, mock.call(StrContains('Testing complete.')))
- self.assertNotEqual(call, mock.call(StrContains(' 0 tests run')))
+ self.assertNotEqual(call, mock.call(StrContains(' 0 tests run!')))
def test_run_raw_output(self):
self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
@@ -397,7 +451,7 @@ class KUnitMainTest(unittest.TestCase):
self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1)
for call in self.print_mock.call_args_list:
self.assertNotEqual(call, mock.call(StrContains('Testing complete.')))
- self.assertNotEqual(call, mock.call(StrContains(' 0 tests run')))
+ self.assertNotEqual(call, mock.call(StrContains(' 0 tests run!')))
def test_run_raw_output_kunit(self):
self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
diff --git a/tools/testing/kunit/test_data/test_is_test_passed-all_passed_nested.log b/tools/testing/kunit/test_data/test_is_test_passed-all_passed_nested.log
new file mode 100644
index 000000000000..9d5b04fe43a6
--- /dev/null
+++ b/tools/testing/kunit/test_data/test_is_test_passed-all_passed_nested.log
@@ -0,0 +1,34 @@
+TAP version 14
+1..2
+ # Subtest: sysctl_test
+ 1..4
+ # sysctl_test_dointvec_null_tbl_data: sysctl_test_dointvec_null_tbl_data passed
+ ok 1 - sysctl_test_dointvec_null_tbl_data
+ # Subtest: example
+ 1..2
+ init_suite
+ # example_simple_test: initializing
+ # example_simple_test: example_simple_test passed
+ ok 1 - example_simple_test
+ # example_mock_test: initializing
+ # example_mock_test: example_mock_test passed
+ ok 2 - example_mock_test
+ kunit example: all tests passed
+ ok 2 - example
+ # sysctl_test_dointvec_table_len_is_zero: sysctl_test_dointvec_table_len_is_zero passed
+ ok 3 - sysctl_test_dointvec_table_len_is_zero
+ # sysctl_test_dointvec_table_read_but_position_set: sysctl_test_dointvec_table_read_but_position_set passed
+ ok 4 - sysctl_test_dointvec_table_read_but_position_set
+kunit sysctl_test: all tests passed
+ok 1 - sysctl_test
+ # Subtest: example
+ 1..2
+init_suite
+ # example_simple_test: initializing
+ # example_simple_test: example_simple_test passed
+ ok 1 - example_simple_test
+ # example_mock_test: initializing
+ # example_mock_test: example_mock_test passed
+ ok 2 - example_mock_test
+kunit example: all tests passed
+ok 2 - example
diff --git a/tools/testing/kunit/test_data/test_is_test_passed-kselftest.log b/tools/testing/kunit/test_data/test_is_test_passed-kselftest.log
new file mode 100644
index 000000000000..65d3f27feaf2
--- /dev/null
+++ b/tools/testing/kunit/test_data/test_is_test_passed-kselftest.log
@@ -0,0 +1,14 @@
+TAP version 13
+1..2
+# selftests: membarrier: membarrier_test_single_thread
+# TAP version 13
+# 1..2
+# ok 1 sys_membarrier available
+# ok 2 sys membarrier invalid command test: command = -1, flags = 0, errno = 22. Failed as expected
+ok 1 selftests: membarrier: membarrier_test_single_thread
+# selftests: membarrier: membarrier_test_multi_thread
+# TAP version 13
+# 1..2
+# ok 1 sys_membarrier available
+# ok 2 sys membarrier invalid command test: command = -1, flags = 0, errno = 22. Failed as expected
+ok 2 selftests: membarrier: membarrier_test_multi_thread
diff --git a/tools/testing/kunit/test_data/test_is_test_passed-missing_plan.log b/tools/testing/kunit/test_data/test_is_test_passed-missing_plan.log
new file mode 100644
index 000000000000..5cd17b7f818a
--- /dev/null
+++ b/tools/testing/kunit/test_data/test_is_test_passed-missing_plan.log
@@ -0,0 +1,31 @@
+KTAP version 1
+ # Subtest: sysctl_test
+ # sysctl_test_dointvec_null_tbl_data: sysctl_test_dointvec_null_tbl_data passed
+ ok 1 - sysctl_test_dointvec_null_tbl_data
+ # sysctl_test_dointvec_table_maxlen_unset: sysctl_test_dointvec_table_maxlen_unset passed
+ ok 2 - sysctl_test_dointvec_table_maxlen_unset
+ # sysctl_test_dointvec_table_len_is_zero: sysctl_test_dointvec_table_len_is_zero passed
+ ok 3 - sysctl_test_dointvec_table_len_is_zero
+ # sysctl_test_dointvec_table_read_but_position_set: sysctl_test_dointvec_table_read_but_position_set passed
+ ok 4 - sysctl_test_dointvec_table_read_but_position_set
+ # sysctl_test_dointvec_happy_single_positive: sysctl_test_dointvec_happy_single_positive passed
+ ok 5 - sysctl_test_dointvec_happy_single_positive
+ # sysctl_test_dointvec_happy_single_negative: sysctl_test_dointvec_happy_single_negative passed
+ ok 6 - sysctl_test_dointvec_happy_single_negative
+ # sysctl_test_dointvec_single_less_int_min: sysctl_test_dointvec_single_less_int_min passed
+ ok 7 - sysctl_test_dointvec_single_less_int_min
+ # sysctl_test_dointvec_single_greater_int_max: sysctl_test_dointvec_single_greater_int_max passed
+ ok 8 - sysctl_test_dointvec_single_greater_int_max
+kunit sysctl_test: all tests passed
+ok 1 - sysctl_test
+ # Subtest: example
+ 1..2
+init_suite
+ # example_simple_test: initializing
+ # example_simple_test: example_simple_test passed
+ ok 1 - example_simple_test
+ # example_mock_test: initializing
+ # example_mock_test: example_mock_test passed
+ ok 2 - example_mock_test
+kunit example: all tests passed
+ok 2 - example
diff --git a/tools/testing/kunit/test_data/test_strip_hyphen.log b/tools/testing/kunit/test_data/test_strip_hyphen.log
new file mode 100644
index 000000000000..92ac7c24b374
--- /dev/null
+++ b/tools/testing/kunit/test_data/test_strip_hyphen.log
@@ -0,0 +1,16 @@
+KTAP version 1
+1..2
+ # Subtest: sysctl_test
+ 1..1
+ # sysctl_test_dointvec_null_tbl_data: sysctl_test_dointvec_null_tbl_data passed
+ ok 1 - sysctl_test_dointvec_null_tbl_data
+kunit sysctl_test: all tests passed
+ok 1 - sysctl_test
+ # Subtest: example
+ 1..1
+init_suite
+ # example_simple_test: initializing
+ # example_simple_test: example_simple_test passed
+ ok 1 example_simple_test
+kunit example: all tests passed
+ok 2 example
--
2.33.0.259.gc128427fd7-goog
v7:
- Simplify the documentation patch (patch 5) as suggested by Tejun.
- Fix a typo in patch 2 and improper commit log in patch 3.
v6:
- Remove duplicated tmpmask from update_prstate() which should fix the
frame size too large problem reported by kernel test robot.
v5:
- Rebased to the latest for-5.15 branch of cgroup git tree and drop the
1st v4 patch as it has been merged.
- Update patch 1 to always allow changing partition root back to member
even if it invalidates child partitions undeneath it.
- Adjust the empty effective cpu partition patch to not allow 0 effective
cpu for terminal partition which will make it invalid).
- Add a new patch to enable reading of cpuset.cpus.partition to display
the reason that causes invalid partition.
- Adjust the documentation and testing patch accordingly.
This patchset makes four enhancements to the cpuset v2 code.
Patch 1: Properly handle partition root tree and make partition
invalid in case changes to cpuset.cpus violate any of the partition
root constraints.
Patch 2: Enable the "cpuset.cpus.partition" file to show the reason
that causes invalid partition like "root invalid (No cpu available
due to hotplug)".
Patch 3: Add a new partition state "isolated" to create a partition
root without load balancing. This is for handling intermitten workloads
that have a strict low latency requirement.
Patch 4: Allow partition roots that are not the top cpuset to distribute
all its cpus to child partitions as long as there is no task associated
with that partition root. This allows more flexibility for middleware
to manage multiple partitions.
Patch 5 updates the cgroup-v2.rst file accordingly. Patch 6 adds a new
cpuset test to test the new cpuset partition code.
Waiman Long (6):
cgroup/cpuset: Properly transition to invalid partition
cgroup/cpuset: Show invalid partition reason string
cgroup/cpuset: Add a new isolated cpus.partition type
cgroup/cpuset: Allow non-top parent partition to distribute out all
CPUs
cgroup/cpuset: Update description of cpuset.cpus.partition in
cgroup-v2.rst
kselftest/cgroup: Add cpuset v2 partition root state test
Documentation/admin-guide/cgroup-v2.rst | 112 +--
kernel/cgroup/cpuset.c | 337 ++++++---
tools/testing/selftests/cgroup/Makefile | 5 +-
.../selftests/cgroup/test_cpuset_prs.sh | 663 ++++++++++++++++++
tools/testing/selftests/cgroup/wait_inotify.c | 86 +++
5 files changed, 1050 insertions(+), 153 deletions(-)
create mode 100755 tools/testing/selftests/cgroup/test_cpuset_prs.sh
create mode 100644 tools/testing/selftests/cgroup/wait_inotify.c
--
2.18.1
Current PTP driver exposes one PTP device to user which binds network
interface/interfaces to provide timestamping. Actually we have a way
utilizing timecounter/cyclecounter to virtualize any number of PTP
clocks based on a same free running physical clock for using.
The purpose of having multiple PTP virtual clocks is for user space
to directly/easily use them for multiple domains synchronization.
user
space: ^ ^
| SO_TIMESTAMPING new flag: | Packets with
| SOF_TIMESTAMPING_BIND_PHC | TX/RX HW timestamps
v v
+--------------------------------------------+
sock: | sock (new member sk_bind_phc) |
+--------------------------------------------+
^ ^
| ethtool_get_phc_vclocks | Convert HW timestamps
| | to sk_bind_phc
v v
+--------------+--------------+--------------+
vclock: | ptp1 | ptp2 | ptpN |
+--------------+--------------+--------------+
pclock: | ptp0 free running |
+--------------------------------------------+
The block diagram may explain how it works. Besides the PTP virtual
clocks, the packet HW timestamp converting to the bound PHC is also
done in sock driver. For user space, PTP virtual clocks can be
created via sysfs, and extended SO_TIMESTAMPING API (new flag
SOF_TIMESTAMPING_BIND_PHC) can be used to bind one PTP virtual clock
for timestamping.
The test tool timestamping.c (together with linuxptp phc_ctl tool) can
be used to verify:
# echo 4 > /sys/class/ptp/ptp0/n_vclocks
[ 129.399472] ptp ptp0: new virtual clock ptp2
[ 129.404234] ptp ptp0: new virtual clock ptp3
[ 129.409532] ptp ptp0: new virtual clock ptp4
[ 129.413942] ptp ptp0: new virtual clock ptp5
[ 129.418257] ptp ptp0: guarantee physical clock free running
#
# phc_ctl /dev/ptp2 set 10000
# phc_ctl /dev/ptp3 set 20000
#
# timestamping eno0 2 SOF_TIMESTAMPING_TX_HARDWARE SOF_TIMESTAMPING_RAW_HARDWARE SOF_TIMESTAMPING_BIND_PHC
# timestamping eno0 2 SOF_TIMESTAMPING_RX_HARDWARE SOF_TIMESTAMPING_RAW_HARDWARE SOF_TIMESTAMPING_BIND_PHC
# timestamping eno0 3 SOF_TIMESTAMPING_TX_HARDWARE SOF_TIMESTAMPING_RAW_HARDWARE SOF_TIMESTAMPING_BIND_PHC
# timestamping eno0 3 SOF_TIMESTAMPING_RX_HARDWARE SOF_TIMESTAMPING_RAW_HARDWARE SOF_TIMESTAMPING_BIND_PHC
Changes for v2:
- Converted to num_vclocks for creating virtual clocks.
- Guranteed physical clock free running when using virtual
clocks.
- Fixed build warning.
- Updated copyright.
Changes for v3:
- Supported PTP virtual clock in default in PTP driver.
- Protected concurrency of ptp->num_vclocks accessing.
- Supported PHC vclocks query via ethtool.
- Extended SO_TIMESTAMPING API for PHC binding.
- Converted HW timestamps to PHC bound, instead of previous
binding domain value to PHC idea.
- Other minor fixes.
Changes for v4:
- Used do_aux_work callback for vclock refreshing instead.
- Used unsigned int for vclocks number, and max_vclocks
for limitiation.
- Fixed mutex locking.
- Dynamically allocated memory for vclock index storage.
- Removed ethtool ioctl command for vclocks getting.
- Updated doc for ethtool phc vclocks get.
- Converted to mptcp_setsockopt_sol_socket_timestamping().
- Passed so_timestamping for sock_set_timestamping.
- Fixed checkpatch/build.
- Other minor fixed.
Yangbo Lu (11):
ptp: add ptp virtual clock driver framework
ptp: support ptp physical/virtual clocks conversion
ptp: track available ptp vclocks information
ptp: add kernel API ptp_get_vclocks_index()
ethtool: add a new command for getting PHC virtual clocks
ptp: add kernel API ptp_convert_timestamp()
mptcp: setsockopt: convert to
mptcp_setsockopt_sol_socket_timestamping()
net: sock: extend SO_TIMESTAMPING for PHC binding
net: socket: support hardware timestamp conversion to PHC bound
selftests/net: timestamping: support binding PHC
MAINTAINERS: add entry for PTP virtual clock driver
Documentation/ABI/testing/sysfs-ptp | 20 ++
Documentation/networking/ethtool-netlink.rst | 22 ++
MAINTAINERS | 7 +
drivers/ptp/Makefile | 2 +-
drivers/ptp/ptp_clock.c | 41 +++-
drivers/ptp/ptp_private.h | 39 ++++
drivers/ptp/ptp_sysfs.c | 160 ++++++++++++++
drivers/ptp/ptp_vclock.c | 219 +++++++++++++++++++
include/linux/ethtool.h | 10 +
include/linux/ptp_clock_kernel.h | 31 ++-
include/net/sock.h | 8 +-
include/uapi/linux/ethtool_netlink.h | 15 ++
include/uapi/linux/net_tstamp.h | 17 +-
net/core/sock.c | 65 +++++-
net/ethtool/Makefile | 2 +-
net/ethtool/common.c | 14 ++
net/ethtool/netlink.c | 10 +
net/ethtool/netlink.h | 2 +
net/ethtool/phc_vclocks.c | 94 ++++++++
net/mptcp/sockopt.c | 69 ++++--
net/socket.c | 19 +-
tools/testing/selftests/net/timestamping.c | 62 ++++--
22 files changed, 875 insertions(+), 53 deletions(-)
create mode 100644 drivers/ptp/ptp_vclock.c
create mode 100644 net/ethtool/phc_vclocks.c
base-commit: 19938bafa7ae8fc0a4a2c1c1430abb1a04668da1
--
2.25.1
Synchronous Ethernet networks use a physical layer clock to syntonize
the frequency across different network elements.
Multiple reference clock sources can be used. Clocks recovered from
PHY ports on the RX side or external sources like 1PPS GPS, etc.
This patch series introduces basic interface for reading the DPLL
state on a SyncE capable device. This state gives us information
about the source of the syntonization signal and whether the DPLL
circuit is tuned to the incoming signal.
Next steps:
- add interface to enable recovered clocks and get information
about them
Maciej Machnikowski (2):
rtnetlink: Add new RTM_GETSYNCESTATE message to get SyncE status
ice: add support for reading SyncE DPLL state
drivers/net/ethernet/intel/ice/ice.h | 5 ++
.../net/ethernet/intel/ice/ice_adminq_cmd.h | 34 ++++++++
drivers/net/ethernet/intel/ice/ice_common.c | 62 +++++++++++++++
drivers/net/ethernet/intel/ice/ice_common.h | 4 +
drivers/net/ethernet/intel/ice/ice_devids.h | 3 +
drivers/net/ethernet/intel/ice/ice_main.c | 55 +++++++++++++
drivers/net/ethernet/intel/ice/ice_ptp.c | 35 +++++++++
drivers/net/ethernet/intel/ice/ice_ptp_hw.c | 44 +++++++++++
drivers/net/ethernet/intel/ice/ice_ptp_hw.h | 22 ++++++
include/linux/netdevice.h | 6 ++
include/uapi/linux/if_link.h | 43 +++++++++++
include/uapi/linux/rtnetlink.h | 11 ++-
net/core/rtnetlink.c | 77 +++++++++++++++++++
security/selinux/nlmsgtab.c | 3 +-
14 files changed, 399 insertions(+), 5 deletions(-)
--
2.26.3
From: Changcheng Deng <deng.changcheng(a)zte.com.cn>
tools/testing/selftests/move_mount_set_group/move_mount_set_group_test.c:
225:18-23:WARNING: conversion to bool not needed here
Because the definition of function is
"static int move_mount_set_group_supported(void)",
the return type should be int.
Reported-by: Zeal Robot <zealci(a)zte.com.cn>
Signed-off-by: Changcheng Deng <deng.changcheng(a)zte.com.cn>
---
.../testing/selftests/move_mount_set_group/move_mount_set_group_test.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tools/testing/selftests/move_mount_set_group/move_mount_set_group_test.c b/tools/testing/selftests/move_mount_set_group/move_mount_set_group_test.c
index 860198f..beade21 100644
--- a/tools/testing/selftests/move_mount_set_group/move_mount_set_group_test.c
+++ b/tools/testing/selftests/move_mount_set_group/move_mount_set_group_test.c
@@ -222,7 +222,7 @@ static int move_mount_set_group_supported(void)
AT_FDCWD, SET_GROUP_TO, MOVE_MOUNT_SET_GROUP);
umount2("/tmp", MNT_DETACH);
- return ret < 0 ? false : true;
+ return ret < 0 ? 0 : 1;
}
FIXTURE(move_mount_set_group) {
--
1.8.3.1
Patch 1 fixes a KVM+rseq bug where KVM's handling of TIF_NOTIFY_RESUME,
e.g. for task migration, clears the flag without informing rseq and leads
to stale data in userspace's rseq struct.
Patch 2 is a cleanup to try and make future bugs less likely. It's also
a baby step towards moving and renaming tracehook_notify_resume() since
it has nothing to do with tracing.
Patch 3 is a fix/cleanup to stop overriding x86's unistd_{32,64}.h when
the include path (intentionally) omits tools' uapi headers. KVM's
selftests do exactly that so that they can pick up the uapi headers from
the installed kernel headers, and still use various tools/ headers that
mirror kernel code, e.g. linux/types.h. This allows the new test in
patch 4 to reference __NR_rseq without having to manually define it.
Patch 4 is a regression test for the KVM+rseq bug.
Patch 5 is a cleanup made possible by patch 3.
v2:
- Don't touch rseq_cs when handling KVM case so that rseq_syscall() will
still detect a naughty userspace. [Mathieu]
- Use a sequence counter + retry in the test to ensure the process isn't
migrated between sched_getcpu() and reading rseq.cpu_id, i.e. to
avoid a flaky test. [Mathieu]
- Add Mathieu's ack for patch 2.
- Add more comments in the test.
v1: https://lkml.kernel.org/r/20210818001210.4073390-1-seanjc@google.com
Sean Christopherson (5):
KVM: rseq: Update rseq when processing NOTIFY_RESUME on xfer to KVM
guest
entry: rseq: Call rseq_handle_notify_resume() in
tracehook_notify_resume()
tools: Move x86 syscall number fallbacks to .../uapi/
KVM: selftests: Add a test for KVM_RUN+rseq to detect task migration
bugs
KVM: selftests: Remove __NR_userfaultfd syscall fallback
arch/arm/kernel/signal.c | 1 -
arch/arm64/kernel/signal.c | 1 -
arch/csky/kernel/signal.c | 4 +-
arch/mips/kernel/signal.c | 4 +-
arch/powerpc/kernel/signal.c | 4 +-
arch/s390/kernel/signal.c | 1 -
include/linux/tracehook.h | 2 +
kernel/entry/common.c | 4 +-
kernel/rseq.c | 14 +-
.../x86/include/{ => uapi}/asm/unistd_32.h | 0
.../x86/include/{ => uapi}/asm/unistd_64.h | 3 -
tools/testing/selftests/kvm/.gitignore | 1 +
tools/testing/selftests/kvm/Makefile | 3 +
tools/testing/selftests/kvm/rseq_test.c | 154 ++++++++++++++++++
14 files changed, 175 insertions(+), 21 deletions(-)
rename tools/arch/x86/include/{ => uapi}/asm/unistd_32.h (100%)
rename tools/arch/x86/include/{ => uapi}/asm/unistd_64.h (83%)
create mode 100644 tools/testing/selftests/kvm/rseq_test.c
--
2.33.0.rc2.250.ged5fa647cd-goog
0Day will check if all configs listing under selftests are able to be enabled properly.
For the missing configs, it will report something like:
LKP WARN miss config CONFIG_SYNC= of sync/config
CC: kernel test robot <lkp(a)intel.com>
CC: "Jason A. Donenfeld" <Jason(a)zx2c4.com>
CC: Nick Desaulniers <ndesaulniers(a)google.com>
CC: Masahiro Yamada <masahiroy(a)kernel.org>
CC: wireguard(a)lists.zx2c4.com
CC: netdev(a)vger.kernel.org
CC: "Rafael J. Wysocki" <rjw(a)rjwysocki.net>
CC: Viresh Kumar <viresh.kumar(a)linaro.org>
CC: linux-pm(a)vger.kernel.org
Reported-by: kernel test robot <lkp(a)intel.com>
Li Zhijian (3):
selftests/sync: Remove the deprecated config SYNC
selftests/cpufreq: Rename DEBUG_PI_LIST to DEBUG_PLIST
selftests/wireguard: Rename DEBUG_PI_LIST to DEBUG_PLIST
tools/testing/selftests/cpufreq/config | 2 +-
tools/testing/selftests/sync/config | 1 -
tools/testing/selftests/wireguard/qemu/debug.config | 2 +-
3 files changed, 2 insertions(+), 3 deletions(-)
--
2.31.1
From: Colin Ian King <colin.king(a)canonical.com>
There is a spelling mistake in an error message. Fix it.
Signed-off-by: Colin Ian King <colin.king(a)canonical.com>
---
tools/testing/selftests/safesetid/safesetid-test.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tools/testing/selftests/safesetid/safesetid-test.c b/tools/testing/selftests/safesetid/safesetid-test.c
index 0c4d50644c13..4b809c93ba36 100644
--- a/tools/testing/selftests/safesetid/safesetid-test.c
+++ b/tools/testing/selftests/safesetid/safesetid-test.c
@@ -152,7 +152,7 @@ static void write_policies(void)
fd = open(add_whitelist_policy_file, O_WRONLY);
if (fd < 0)
- die("cant open add_whitelist_policy file\n");
+ die("can't open add_whitelist_policy file\n");
written = write(fd, policy_str, strlen(policy_str));
if (written != strlen(policy_str)) {
if (written >= 0) {
--
2.32.0
From: Colin Ian King <colin.king(a)canonical.com>
There is a spelling mistake in an error message. Fix it.
Signed-off-by: Colin Ian King <colin.king(a)canonical.com>
---
tools/testing/selftests/vm/mlock-random-test.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tools/testing/selftests/vm/mlock-random-test.c b/tools/testing/selftests/vm/mlock-random-test.c
index ff4d72eb74b9..782ea94dee2f 100644
--- a/tools/testing/selftests/vm/mlock-random-test.c
+++ b/tools/testing/selftests/vm/mlock-random-test.c
@@ -70,7 +70,7 @@ int get_proc_locked_vm_size(void)
}
}
- perror("cann't parse VmLck in /proc/self/status\n");
+ perror("cannot parse VmLck in /proc/self/status\n");
fclose(f);
return -1;
}
--
2.32.0
v6:
- Remove duplicated tmpmask from update_prstate() which should fix the
frame size too large problem reported by kernel test robot.
v5:
- Rebased to the latest for-5.15 branch of cgroup git tree and drop the
1st v4 patch as it has been merged.
- Update patch 1 to always allow changing partition root back to member
even if it invalidates child partitions undeneath it.
- Adjust the empty effective cpu partition patch to not allow 0 effective
cpu for terminal partition which will make it invalid).
- Add a new patch to enable reading of cpuset.cpus.partition to display
the reason that causes invalid partition.
- Adjust the documentation and testing patch accordingly.
v4:
- Rebased to the for-5.15 branch of cgroup git tree and dropped the
first 3 patches of v3 series which have been merged.
- Beside prohibiting violation of cpu exclusivity rule, allow arbitrary
changes to cpuset.cpus of a partition root and force the partition root
to become invalid in case any of the partition root constraints
are violated. The documentation file and self test are modified
accordingly.
This patchset makes four enhancements to the cpuset v2 code.
Patch 1: Properly handle partition root tree and make partition
invalid in case changes to cpuset.cpus violate any of the partition
root constraints.
Patch 2: Enable the "cpuset.cpus.partition" file to show the reason
that causes invalid partition like "root invalid (No cpu available
due to hotplug)".
Patch 3: Add a new partition state "isolated" to create a partition
root without load balancing. This is for handling intermitten workloads
that have a strict low latency requirement.
Patch 4: Allow partition roots that are not the top cpuset to distribute
all its cpus to child partitions as long as there is no task associated
with that partition root. This allows more flexibility for middleware
to manage multiple partitions.
Patch 5 updates the cgroup-v2.rst file accordingly. Patch 6 adds a new
cpuset test to test the new cpuset partition code.
Waiman Long (6):
cgroup/cpuset: Properly transition to invalid partition
cgroup/cpuset: Show invalid partition reason string
cgroup/cpuset: Add a new isolated cpus.partition type
cgroup/cpuset: Allow non-top parent partition to distribute out all
CPUs
cgroup/cpuset: Update description of cpuset.cpus.partition in
cgroup-v2.rst
kselftest/cgroup: Add cpuset v2 partition root state test
Documentation/admin-guide/cgroup-v2.rst | 116 +--
kernel/cgroup/cpuset.c | 337 ++++++---
tools/testing/selftests/cgroup/Makefile | 5 +-
.../selftests/cgroup/test_cpuset_prs.sh | 663 ++++++++++++++++++
tools/testing/selftests/cgroup/wait_inotify.c | 86 +++
5 files changed, 1058 insertions(+), 149 deletions(-)
create mode 100755 tools/testing/selftests/cgroup/test_cpuset_prs.sh
create mode 100644 tools/testing/selftests/cgroup/wait_inotify.c
--
2.18.1
There are several test cases in the net directory are still using
exit 0 or exit 1 when they need to be skipped. Use kselftest
framework skip code instead so it can help us to distinguish the
return status.
Criterion to filter out what should be fixed in net directory:
grep -r "exit [01]" -B1 | grep -i skip
This change might cause some false-positives if people are running
these test scripts directly and only checking their return codes,
which will change from 0 to 4. However I think the impact should be
small as most of our scripts here are already using this skip code.
And there will be no such issue if running them with the kselftest
framework.
Signed-off-by: Po-Hsu Lin <po-hsu.lin(a)canonical.com>
---
tools/testing/selftests/net/fcnal-test.sh | 5 +++-
tools/testing/selftests/net/fib_rule_tests.sh | 7 ++++--
.../selftests/net/forwarding/devlink_lib.sh | 15 +++++++-----
tools/testing/selftests/net/forwarding/lib.sh | 27 ++++++++++++----------
.../selftests/net/forwarding/router_mpath_nh.sh | 2 +-
.../net/forwarding/router_mpath_nh_res.sh | 2 +-
tools/testing/selftests/net/run_afpackettests | 5 +++-
.../selftests/net/srv6_end_dt46_l3vpn_test.sh | 9 +++++---
.../selftests/net/srv6_end_dt4_l3vpn_test.sh | 9 +++++---
.../selftests/net/srv6_end_dt6_l3vpn_test.sh | 9 +++++---
tools/testing/selftests/net/unicast_extensions.sh | 5 +++-
.../testing/selftests/net/vrf_strict_mode_test.sh | 9 +++++---
12 files changed, 67 insertions(+), 37 deletions(-)
diff --git a/tools/testing/selftests/net/fcnal-test.sh b/tools/testing/selftests/net/fcnal-test.sh
index a8ad928..9074e25 100755
--- a/tools/testing/selftests/net/fcnal-test.sh
+++ b/tools/testing/selftests/net/fcnal-test.sh
@@ -37,6 +37,9 @@
#
# server / client nomenclature relative to ns-A
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
VERBOSE=0
NSA_DEV=eth1
@@ -3946,7 +3949,7 @@ fi
which nettest >/dev/null
if [ $? -ne 0 ]; then
echo "'nettest' command not found; skipping tests"
- exit 0
+ exit $ksft_skip
fi
declare -i nfail=0
diff --git a/tools/testing/selftests/net/fib_rule_tests.sh b/tools/testing/selftests/net/fib_rule_tests.sh
index a93e6b6..43ea840 100755
--- a/tools/testing/selftests/net/fib_rule_tests.sh
+++ b/tools/testing/selftests/net/fib_rule_tests.sh
@@ -3,6 +3,9 @@
# This test is for checking IPv4 and IPv6 FIB rules API
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
ret=0
PAUSE_ON_FAIL=${PAUSE_ON_FAIL:=no}
@@ -238,12 +241,12 @@ run_fibrule_tests()
if [ "$(id -u)" -ne 0 ];then
echo "SKIP: Need root privileges"
- exit 0
+ exit $ksft_skip
fi
if [ ! -x "$(command -v ip)" ]; then
echo "SKIP: Could not run test without ip tool"
- exit 0
+ exit $ksft_skip
fi
# start clean
diff --git a/tools/testing/selftests/net/forwarding/devlink_lib.sh b/tools/testing/selftests/net/forwarding/devlink_lib.sh
index 13d3d44..2c14a86 100644
--- a/tools/testing/selftests/net/forwarding/devlink_lib.sh
+++ b/tools/testing/selftests/net/forwarding/devlink_lib.sh
@@ -1,6 +1,9 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
##############################################################################
# Defines
@@ -9,11 +12,11 @@ if [[ ! -v DEVLINK_DEV ]]; then
| jq -r '.port | keys[]' | cut -d/ -f-2)
if [ -z "$DEVLINK_DEV" ]; then
echo "SKIP: ${NETIFS[p1]} has no devlink device registered for it"
- exit 1
+ exit $ksft_skip
fi
if [[ "$(echo $DEVLINK_DEV | grep -c pci)" -eq 0 ]]; then
echo "SKIP: devlink device's bus is not PCI"
- exit 1
+ exit $ksft_skip
fi
DEVLINK_VIDDID=$(lspci -s $(echo $DEVLINK_DEV | cut -d"/" -f2) \
@@ -22,7 +25,7 @@ elif [[ ! -z "$DEVLINK_DEV" ]]; then
devlink dev show $DEVLINK_DEV &> /dev/null
if [ $? -ne 0 ]; then
echo "SKIP: devlink device \"$DEVLINK_DEV\" not found"
- exit 1
+ exit $ksft_skip
fi
fi
@@ -32,19 +35,19 @@ fi
devlink help 2>&1 | grep resource &> /dev/null
if [ $? -ne 0 ]; then
echo "SKIP: iproute2 too old, missing devlink resource support"
- exit 1
+ exit $ksft_skip
fi
devlink help 2>&1 | grep trap &> /dev/null
if [ $? -ne 0 ]; then
echo "SKIP: iproute2 too old, missing devlink trap support"
- exit 1
+ exit $ksft_skip
fi
devlink dev help 2>&1 | grep info &> /dev/null
if [ $? -ne 0 ]; then
echo "SKIP: iproute2 too old, missing devlink dev info support"
- exit 1
+ exit $ksft_skip
fi
##############################################################################
diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh
index 42e28c9..e7fc5c3 100644
--- a/tools/testing/selftests/net/forwarding/lib.sh
+++ b/tools/testing/selftests/net/forwarding/lib.sh
@@ -4,6 +4,9 @@
##############################################################################
# Defines
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
# Can be overridden by the configuration file.
PING=${PING:=ping}
PING6=${PING6:=ping6}
@@ -38,7 +41,7 @@ check_tc_version()
tc -j &> /dev/null
if [[ $? -ne 0 ]]; then
echo "SKIP: iproute2 too old; tc is missing JSON support"
- exit 1
+ exit $ksft_skip
fi
}
@@ -51,7 +54,7 @@ check_tc_mpls_support()
matchall action pipe &> /dev/null
if [[ $? -ne 0 ]]; then
echo "SKIP: iproute2 too old; tc is missing MPLS support"
- return 1
+ return $ksft_skip
fi
tc filter del dev $dev ingress protocol mpls_uc pref 1 handle 1 \
matchall
@@ -69,7 +72,7 @@ check_tc_mpls_lse_stats()
if [[ $? -ne 0 ]]; then
echo "SKIP: iproute2 too old; tc-flower is missing extended MPLS support"
- return 1
+ return $ksft_skip
fi
tc -j filter show dev $dev ingress protocol mpls_uc | jq . &> /dev/null
@@ -79,7 +82,7 @@ check_tc_mpls_lse_stats()
if [[ $ret -ne 0 ]]; then
echo "SKIP: iproute2 too old; tc-flower produces invalid json output for extended MPLS filters"
- return 1
+ return $ksft_skip
fi
}
@@ -88,7 +91,7 @@ check_tc_shblock_support()
tc filter help 2>&1 | grep block &> /dev/null
if [[ $? -ne 0 ]]; then
echo "SKIP: iproute2 too old; tc is missing shared block support"
- exit 1
+ exit $ksft_skip
fi
}
@@ -97,7 +100,7 @@ check_tc_chain_support()
tc help 2>&1|grep chain &> /dev/null
if [[ $? -ne 0 ]]; then
echo "SKIP: iproute2 too old; tc is missing chain support"
- exit 1
+ exit $ksft_skip
fi
}
@@ -106,7 +109,7 @@ check_tc_action_hw_stats_support()
tc actions help 2>&1 | grep -q hw_stats
if [[ $? -ne 0 ]]; then
echo "SKIP: iproute2 too old; tc is missing action hw_stats support"
- exit 1
+ exit $ksft_skip
fi
}
@@ -115,13 +118,13 @@ check_ethtool_lanes_support()
ethtool --help 2>&1| grep lanes &> /dev/null
if [[ $? -ne 0 ]]; then
echo "SKIP: ethtool too old; it is missing lanes support"
- exit 1
+ exit $ksft_skip
fi
}
if [[ "$(id -u)" -ne 0 ]]; then
echo "SKIP: need root privileges"
- exit 0
+ exit $ksft_skip
fi
if [[ "$CHECK_TC" = "yes" ]]; then
@@ -134,7 +137,7 @@ require_command()
if [[ ! -x "$(command -v "$cmd")" ]]; then
echo "SKIP: $cmd not installed"
- exit 1
+ exit $ksft_skip
fi
}
@@ -143,7 +146,7 @@ require_command $MZ
if [[ ! -v NUM_NETIFS ]]; then
echo "SKIP: importer does not define \"NUM_NETIFS\""
- exit 1
+ exit $ksft_skip
fi
##############################################################################
@@ -203,7 +206,7 @@ for ((i = 1; i <= NUM_NETIFS; ++i)); do
ip link show dev ${NETIFS[p$i]} &> /dev/null
if [[ $? -ne 0 ]]; then
echo "SKIP: could not find all required interfaces"
- exit 1
+ exit $ksft_skip
fi
done
diff --git a/tools/testing/selftests/net/forwarding/router_mpath_nh.sh b/tools/testing/selftests/net/forwarding/router_mpath_nh.sh
index 76efb1f..a0d612e 100755
--- a/tools/testing/selftests/net/forwarding/router_mpath_nh.sh
+++ b/tools/testing/selftests/net/forwarding/router_mpath_nh.sh
@@ -411,7 +411,7 @@ ping_ipv6()
ip nexthop ls >/dev/null 2>&1
if [ $? -ne 0 ]; then
echo "Nexthop objects not supported; skipping tests"
- exit 0
+ exit $ksft_skip
fi
trap cleanup EXIT
diff --git a/tools/testing/selftests/net/forwarding/router_mpath_nh_res.sh b/tools/testing/selftests/net/forwarding/router_mpath_nh_res.sh
index 4898dd4..cb08ffe 100755
--- a/tools/testing/selftests/net/forwarding/router_mpath_nh_res.sh
+++ b/tools/testing/selftests/net/forwarding/router_mpath_nh_res.sh
@@ -386,7 +386,7 @@ ping_ipv6()
ip nexthop ls >/dev/null 2>&1
if [ $? -ne 0 ]; then
echo "Nexthop objects not supported; skipping tests"
- exit 0
+ exit $ksft_skip
fi
trap cleanup EXIT
diff --git a/tools/testing/selftests/net/run_afpackettests b/tools/testing/selftests/net/run_afpackettests
index 8b42e8b..a59cb6a 100755
--- a/tools/testing/selftests/net/run_afpackettests
+++ b/tools/testing/selftests/net/run_afpackettests
@@ -1,9 +1,12 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
if [ $(id -u) != 0 ]; then
echo $msg must be run as root >&2
- exit 0
+ exit $ksft_skip
fi
ret=0
diff --git a/tools/testing/selftests/net/srv6_end_dt46_l3vpn_test.sh b/tools/testing/selftests/net/srv6_end_dt46_l3vpn_test.sh
index 75ada17..aebaab8 100755
--- a/tools/testing/selftests/net/srv6_end_dt46_l3vpn_test.sh
+++ b/tools/testing/selftests/net/srv6_end_dt46_l3vpn_test.sh
@@ -193,6 +193,9 @@
# +---------------------------------------------------+
#
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
readonly LOCALSID_TABLE_ID=90
readonly IPv6_RT_NETWORK=fd00
readonly IPv6_HS_NETWORK=cafe
@@ -543,18 +546,18 @@ host_vpn_isolation_tests()
if [ "$(id -u)" -ne 0 ];then
echo "SKIP: Need root privileges"
- exit 0
+ exit $ksft_skip
fi
if [ ! -x "$(command -v ip)" ]; then
echo "SKIP: Could not run test without ip tool"
- exit 0
+ exit $ksft_skip
fi
modprobe vrf &>/dev/null
if [ ! -e /proc/sys/net/vrf/strict_mode ]; then
echo "SKIP: vrf sysctl does not exist"
- exit 0
+ exit $ksft_skip
fi
cleanup &>/dev/null
diff --git a/tools/testing/selftests/net/srv6_end_dt4_l3vpn_test.sh b/tools/testing/selftests/net/srv6_end_dt4_l3vpn_test.sh
index ad7a9fc..1003119 100755
--- a/tools/testing/selftests/net/srv6_end_dt4_l3vpn_test.sh
+++ b/tools/testing/selftests/net/srv6_end_dt4_l3vpn_test.sh
@@ -163,6 +163,9 @@
# +---------------------------------------------------+
#
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
readonly LOCALSID_TABLE_ID=90
readonly IPv6_RT_NETWORK=fd00
readonly IPv4_HS_NETWORK=10.0.0
@@ -464,18 +467,18 @@ host_vpn_isolation_tests()
if [ "$(id -u)" -ne 0 ];then
echo "SKIP: Need root privileges"
- exit 0
+ exit $ksft_skip
fi
if [ ! -x "$(command -v ip)" ]; then
echo "SKIP: Could not run test without ip tool"
- exit 0
+ exit $ksft_skip
fi
modprobe vrf &>/dev/null
if [ ! -e /proc/sys/net/vrf/strict_mode ]; then
echo "SKIP: vrf sysctl does not exist"
- exit 0
+ exit $ksft_skip
fi
cleanup &>/dev/null
diff --git a/tools/testing/selftests/net/srv6_end_dt6_l3vpn_test.sh b/tools/testing/selftests/net/srv6_end_dt6_l3vpn_test.sh
index 68708f5..b9b06ef 100755
--- a/tools/testing/selftests/net/srv6_end_dt6_l3vpn_test.sh
+++ b/tools/testing/selftests/net/srv6_end_dt6_l3vpn_test.sh
@@ -164,6 +164,9 @@
# +---------------------------------------------------+
#
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
readonly LOCALSID_TABLE_ID=90
readonly IPv6_RT_NETWORK=fd00
readonly IPv6_HS_NETWORK=cafe
@@ -472,18 +475,18 @@ host_vpn_isolation_tests()
if [ "$(id -u)" -ne 0 ];then
echo "SKIP: Need root privileges"
- exit 0
+ exit $ksft_skip
fi
if [ ! -x "$(command -v ip)" ]; then
echo "SKIP: Could not run test without ip tool"
- exit 0
+ exit $ksft_skip
fi
modprobe vrf &>/dev/null
if [ ! -e /proc/sys/net/vrf/strict_mode ]; then
echo "SKIP: vrf sysctl does not exist"
- exit 0
+ exit $ksft_skip
fi
cleanup &>/dev/null
diff --git a/tools/testing/selftests/net/unicast_extensions.sh b/tools/testing/selftests/net/unicast_extensions.sh
index 66354cd..2d10cca 100755
--- a/tools/testing/selftests/net/unicast_extensions.sh
+++ b/tools/testing/selftests/net/unicast_extensions.sh
@@ -28,12 +28,15 @@
# These tests provide an easy way to flip the expected result of any
# of these behaviors for testing kernel patches that change them.
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
# nettest can be run from PATH or from same directory as this selftest
if ! which nettest >/dev/null; then
PATH=$PWD:$PATH
if ! which nettest >/dev/null; then
echo "'nettest' command not found; skipping tests"
- exit 0
+ exit $ksft_skip
fi
fi
diff --git a/tools/testing/selftests/net/vrf_strict_mode_test.sh b/tools/testing/selftests/net/vrf_strict_mode_test.sh
index 18b982d..865d53c 100755
--- a/tools/testing/selftests/net/vrf_strict_mode_test.sh
+++ b/tools/testing/selftests/net/vrf_strict_mode_test.sh
@@ -3,6 +3,9 @@
# This test is designed for testing the new VRF strict_mode functionality.
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
ret=0
# identifies the "init" network namespace which is often called root network
@@ -371,18 +374,18 @@ vrf_strict_mode_check_support()
if [ "$(id -u)" -ne 0 ];then
echo "SKIP: Need root privileges"
- exit 0
+ exit $ksft_skip
fi
if [ ! -x "$(command -v ip)" ]; then
echo "SKIP: Could not run test without ip tool"
- exit 0
+ exit $ksft_skip
fi
modprobe vrf &>/dev/null
if [ ! -e /proc/sys/net/vrf/strict_mode ]; then
echo "SKIP: vrf sysctl does not exist"
- exit 0
+ exit $ksft_skip
fi
cleanup &> /dev/null
--
2.7.4