mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-05-22 09:13:05 +00:00
kunit: tool: Support skipped tests in kunit_tool
Add support for the SKIP directive to kunit_tool's TAP parser. Skipped tests now show up as such in the printed summary. The number of skipped tests is counted, and if all tests in a suite are skipped, the suite is also marked as skipped. Otherwise, skipped tests do affect the suite result. Example output: [00:22:34] ======== [SKIPPED] example_skip ======== [00:22:34] [SKIPPED] example_skip_test # SKIP this test should be skipped [00:22:34] [SKIPPED] example_mark_skipped_test # SKIP this test should be skipped [00:22:34] ============================================================ [00:22:34] Testing complete. 2 tests run. 0 failed. 0 crashed. 2 skipped. Signed-off-by: David Gow <davidgow@google.com> Reviewed-by: Daniel Latypov <dlatypov@google.com> Reviewed-by: Brendan Higgins <brendanhiggins@google.com> Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
This commit is contained in:
parent
6d2426b2f2
commit
5acaf6031f
4 changed files with 105 additions and 24 deletions
|
@ -43,6 +43,7 @@ class TestCase(object):
|
||||||
class TestStatus(Enum):
|
class TestStatus(Enum):
|
||||||
SUCCESS = auto()
|
SUCCESS = auto()
|
||||||
FAILURE = auto()
|
FAILURE = auto()
|
||||||
|
SKIPPED = auto()
|
||||||
TEST_CRASHED = auto()
|
TEST_CRASHED = auto()
|
||||||
NO_TESTS = auto()
|
NO_TESTS = auto()
|
||||||
FAILURE_TO_PARSE_TESTS = auto()
|
FAILURE_TO_PARSE_TESTS = auto()
|
||||||
|
@ -149,6 +150,8 @@ def save_non_diagnostic(lines: LineStream, test_case: TestCase) -> None:
|
||||||
|
|
||||||
OkNotOkResult = namedtuple('OkNotOkResult', ['is_ok','description', 'text'])
|
OkNotOkResult = namedtuple('OkNotOkResult', ['is_ok','description', 'text'])
|
||||||
|
|
||||||
|
OK_NOT_OK_SKIP = re.compile(r'^[\s]*(ok|not ok) [0-9]+ - (.*) # SKIP(.*)$')
|
||||||
|
|
||||||
OK_NOT_OK_SUBTEST = re.compile(r'^[\s]+(ok|not ok) [0-9]+ - (.*)$')
|
OK_NOT_OK_SUBTEST = re.compile(r'^[\s]+(ok|not ok) [0-9]+ - (.*)$')
|
||||||
|
|
||||||
OK_NOT_OK_MODULE = re.compile(r'^(ok|not ok) ([0-9]+) - (.*)$')
|
OK_NOT_OK_MODULE = re.compile(r'^(ok|not ok) ([0-9]+) - (.*)$')
|
||||||
|
@ -166,6 +169,10 @@ def parse_ok_not_ok_test_case(lines: LineStream, test_case: TestCase) -> bool:
|
||||||
if match:
|
if match:
|
||||||
test_case.log.append(lines.pop())
|
test_case.log.append(lines.pop())
|
||||||
test_case.name = match.group(2)
|
test_case.name = match.group(2)
|
||||||
|
skip_match = OK_NOT_OK_SKIP.match(line)
|
||||||
|
if skip_match:
|
||||||
|
test_case.status = TestStatus.SKIPPED
|
||||||
|
return True
|
||||||
if test_case.status == TestStatus.TEST_CRASHED:
|
if test_case.status == TestStatus.TEST_CRASHED:
|
||||||
return True
|
return True
|
||||||
if match.group(1) == 'ok':
|
if match.group(1) == 'ok':
|
||||||
|
@ -229,16 +236,16 @@ def parse_subtest_plan(lines: LineStream) -> Optional[int]:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def max_status(left: TestStatus, right: TestStatus) -> TestStatus:
|
def max_status(left: TestStatus, right: TestStatus) -> TestStatus:
|
||||||
if left == TestStatus.TEST_CRASHED or right == TestStatus.TEST_CRASHED:
|
if left == right:
|
||||||
|
return left
|
||||||
|
elif left == TestStatus.TEST_CRASHED or right == TestStatus.TEST_CRASHED:
|
||||||
return TestStatus.TEST_CRASHED
|
return TestStatus.TEST_CRASHED
|
||||||
elif left == TestStatus.FAILURE or right == TestStatus.FAILURE:
|
elif left == TestStatus.FAILURE or right == TestStatus.FAILURE:
|
||||||
return TestStatus.FAILURE
|
return TestStatus.FAILURE
|
||||||
elif left != TestStatus.SUCCESS:
|
elif left == TestStatus.SKIPPED:
|
||||||
return left
|
|
||||||
elif right != TestStatus.SUCCESS:
|
|
||||||
return right
|
return right
|
||||||
else:
|
else:
|
||||||
return TestStatus.SUCCESS
|
return left
|
||||||
|
|
||||||
def parse_ok_not_ok_test_suite(lines: LineStream,
|
def parse_ok_not_ok_test_suite(lines: LineStream,
|
||||||
test_suite: TestSuite,
|
test_suite: TestSuite,
|
||||||
|
@ -255,6 +262,9 @@ def parse_ok_not_ok_test_suite(lines: LineStream,
|
||||||
test_suite.status = TestStatus.SUCCESS
|
test_suite.status = TestStatus.SUCCESS
|
||||||
else:
|
else:
|
||||||
test_suite.status = TestStatus.FAILURE
|
test_suite.status = TestStatus.FAILURE
|
||||||
|
skip_match = OK_NOT_OK_SKIP.match(line)
|
||||||
|
if skip_match:
|
||||||
|
test_suite.status = TestStatus.SKIPPED
|
||||||
suite_index = int(match.group(2))
|
suite_index = int(match.group(2))
|
||||||
if suite_index != expected_suite_index:
|
if suite_index != expected_suite_index:
|
||||||
print_with_timestamp(
|
print_with_timestamp(
|
||||||
|
@ -265,8 +275,8 @@ def parse_ok_not_ok_test_suite(lines: LineStream,
|
||||||
else:
|
else:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def bubble_up_errors(statuses: Iterable[TestStatus]) -> TestStatus:
|
def bubble_up_errors(status_list: Iterable[TestStatus]) -> TestStatus:
|
||||||
return reduce(max_status, statuses, TestStatus.SUCCESS)
|
return reduce(max_status, status_list, TestStatus.SKIPPED)
|
||||||
|
|
||||||
def bubble_up_test_case_errors(test_suite: TestSuite) -> TestStatus:
|
def bubble_up_test_case_errors(test_suite: TestSuite) -> TestStatus:
|
||||||
max_test_case_status = bubble_up_errors(x.status for x in test_suite.cases)
|
max_test_case_status = bubble_up_errors(x.status for x in test_suite.cases)
|
||||||
|
@ -352,37 +362,53 @@ def parse_test_result(lines: LineStream) -> TestResult:
|
||||||
else:
|
else:
|
||||||
return TestResult(TestStatus.NO_TESTS, [], lines)
|
return TestResult(TestStatus.NO_TESTS, [], lines)
|
||||||
|
|
||||||
def print_and_count_results(test_result: TestResult) -> Tuple[int, int, int]:
|
class TestCounts:
|
||||||
total_tests = 0
|
passed: int
|
||||||
failed_tests = 0
|
failed: int
|
||||||
crashed_tests = 0
|
crashed: int
|
||||||
|
skipped: int
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.passed = 0
|
||||||
|
self.failed = 0
|
||||||
|
self.crashed = 0
|
||||||
|
self.skipped = 0
|
||||||
|
|
||||||
|
def total(self) -> int:
|
||||||
|
return self.passed + self.failed + self.crashed + self.skipped
|
||||||
|
|
||||||
|
def print_and_count_results(test_result: TestResult) -> TestCounts:
|
||||||
|
counts = TestCounts()
|
||||||
for test_suite in test_result.suites:
|
for test_suite in test_result.suites:
|
||||||
if test_suite.status == TestStatus.SUCCESS:
|
if test_suite.status == TestStatus.SUCCESS:
|
||||||
print_suite_divider(green('[PASSED] ') + test_suite.name)
|
print_suite_divider(green('[PASSED] ') + test_suite.name)
|
||||||
|
elif test_suite.status == TestStatus.SKIPPED:
|
||||||
|
print_suite_divider(yellow('[SKIPPED] ') + test_suite.name)
|
||||||
elif test_suite.status == TestStatus.TEST_CRASHED:
|
elif test_suite.status == TestStatus.TEST_CRASHED:
|
||||||
print_suite_divider(red('[CRASHED] ' + test_suite.name))
|
print_suite_divider(red('[CRASHED] ' + test_suite.name))
|
||||||
else:
|
else:
|
||||||
print_suite_divider(red('[FAILED] ') + test_suite.name)
|
print_suite_divider(red('[FAILED] ') + test_suite.name)
|
||||||
for test_case in test_suite.cases:
|
for test_case in test_suite.cases:
|
||||||
total_tests += 1
|
|
||||||
if test_case.status == TestStatus.SUCCESS:
|
if test_case.status == TestStatus.SUCCESS:
|
||||||
|
counts.passed += 1
|
||||||
print_with_timestamp(green('[PASSED] ') + test_case.name)
|
print_with_timestamp(green('[PASSED] ') + test_case.name)
|
||||||
|
elif test_case.status == TestStatus.SKIPPED:
|
||||||
|
counts.skipped += 1
|
||||||
|
print_with_timestamp(yellow('[SKIPPED] ') + test_case.name)
|
||||||
elif test_case.status == TestStatus.TEST_CRASHED:
|
elif test_case.status == TestStatus.TEST_CRASHED:
|
||||||
crashed_tests += 1
|
counts.crashed += 1
|
||||||
print_with_timestamp(red('[CRASHED] ' + test_case.name))
|
print_with_timestamp(red('[CRASHED] ' + test_case.name))
|
||||||
print_log(map(yellow, test_case.log))
|
print_log(map(yellow, test_case.log))
|
||||||
print_with_timestamp('')
|
print_with_timestamp('')
|
||||||
else:
|
else:
|
||||||
failed_tests += 1
|
counts.failed += 1
|
||||||
print_with_timestamp(red('[FAILED] ') + test_case.name)
|
print_with_timestamp(red('[FAILED] ') + test_case.name)
|
||||||
print_log(map(yellow, test_case.log))
|
print_log(map(yellow, test_case.log))
|
||||||
print_with_timestamp('')
|
print_with_timestamp('')
|
||||||
return total_tests, failed_tests, crashed_tests
|
return counts
|
||||||
|
|
||||||
def parse_run_tests(kernel_output: Iterable[str]) -> TestResult:
|
def parse_run_tests(kernel_output: Iterable[str]) -> TestResult:
|
||||||
total_tests = 0
|
counts = TestCounts()
|
||||||
failed_tests = 0
|
|
||||||
crashed_tests = 0
|
|
||||||
lines = extract_tap_lines(kernel_output)
|
lines = extract_tap_lines(kernel_output)
|
||||||
test_result = parse_test_result(lines)
|
test_result = parse_test_result(lines)
|
||||||
if test_result.status == TestStatus.NO_TESTS:
|
if test_result.status == TestStatus.NO_TESTS:
|
||||||
|
@ -390,12 +416,15 @@ def parse_run_tests(kernel_output: Iterable[str]) -> TestResult:
|
||||||
elif test_result.status == TestStatus.FAILURE_TO_PARSE_TESTS:
|
elif test_result.status == TestStatus.FAILURE_TO_PARSE_TESTS:
|
||||||
print(red('[ERROR] ') + yellow('could not parse test results!'))
|
print(red('[ERROR] ') + yellow('could not parse test results!'))
|
||||||
else:
|
else:
|
||||||
(total_tests,
|
counts = print_and_count_results(test_result)
|
||||||
failed_tests,
|
|
||||||
crashed_tests) = print_and_count_results(test_result)
|
|
||||||
print_with_timestamp(DIVIDER)
|
print_with_timestamp(DIVIDER)
|
||||||
fmt = green if test_result.status == TestStatus.SUCCESS else red
|
if test_result.status == TestStatus.SUCCESS:
|
||||||
|
fmt = green
|
||||||
|
elif test_result.status == TestStatus.SKIPPED:
|
||||||
|
fmt = yellow
|
||||||
|
else:
|
||||||
|
fmt =red
|
||||||
print_with_timestamp(
|
print_with_timestamp(
|
||||||
fmt('Testing complete. %d tests run. %d failed. %d crashed.' %
|
fmt('Testing complete. %d tests run. %d failed. %d crashed. %d skipped.' %
|
||||||
(total_tests, failed_tests, crashed_tests)))
|
(counts.total(), counts.failed, counts.crashed, counts.skipped)))
|
||||||
return test_result
|
return test_result
|
||||||
|
|
|
@ -185,6 +185,28 @@ class KUnitParserTest(unittest.TestCase):
|
||||||
kunit_parser.TestStatus.TEST_CRASHED,
|
kunit_parser.TestStatus.TEST_CRASHED,
|
||||||
result.status)
|
result.status)
|
||||||
|
|
||||||
|
def test_skipped_test(self):
|
||||||
|
skipped_log = test_data_path('test_skip_tests.log')
|
||||||
|
file = open(skipped_log)
|
||||||
|
result = kunit_parser.parse_run_tests(file.readlines())
|
||||||
|
|
||||||
|
# A skipped test does not fail the whole suite.
|
||||||
|
self.assertEqual(
|
||||||
|
kunit_parser.TestStatus.SUCCESS,
|
||||||
|
result.status)
|
||||||
|
file.close()
|
||||||
|
|
||||||
|
def test_skipped_all_tests(self):
|
||||||
|
skipped_log = test_data_path('test_skip_all_tests.log')
|
||||||
|
file = open(skipped_log)
|
||||||
|
result = kunit_parser.parse_run_tests(file.readlines())
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
kunit_parser.TestStatus.SKIPPED,
|
||||||
|
result.status)
|
||||||
|
file.close()
|
||||||
|
|
||||||
|
|
||||||
def test_ignores_prefix_printk_time(self):
|
def test_ignores_prefix_printk_time(self):
|
||||||
prefix_log = test_data_path('test_config_printk_time.log')
|
prefix_log = test_data_path('test_config_printk_time.log')
|
||||||
with open(prefix_log) as file:
|
with open(prefix_log) as file:
|
||||||
|
|
15
tools/testing/kunit/test_data/test_skip_all_tests.log
Normal file
15
tools/testing/kunit/test_data/test_skip_all_tests.log
Normal file
|
@ -0,0 +1,15 @@
|
||||||
|
TAP version 14
|
||||||
|
1..2
|
||||||
|
# Subtest: string-stream-test
|
||||||
|
1..3
|
||||||
|
ok 1 - string_stream_test_empty_on_creation # SKIP all tests skipped
|
||||||
|
ok 2 - string_stream_test_not_empty_after_add # SKIP all tests skipped
|
||||||
|
ok 3 - string_stream_test_get_string # SKIP all tests skipped
|
||||||
|
ok 1 - string-stream-test # SKIP
|
||||||
|
# Subtest: example
|
||||||
|
1..2
|
||||||
|
# example_simple_test: initializing
|
||||||
|
ok 1 - example_simple_test # SKIP all tests skipped
|
||||||
|
# example_skip_test: initializing
|
||||||
|
ok 2 - example_skip_test # SKIP this test should be skipped
|
||||||
|
ok 2 - example # SKIP
|
15
tools/testing/kunit/test_data/test_skip_tests.log
Normal file
15
tools/testing/kunit/test_data/test_skip_tests.log
Normal file
|
@ -0,0 +1,15 @@
|
||||||
|
TAP version 14
|
||||||
|
1..2
|
||||||
|
# Subtest: string-stream-test
|
||||||
|
1..3
|
||||||
|
ok 1 - string_stream_test_empty_on_creation
|
||||||
|
ok 2 - string_stream_test_not_empty_after_add
|
||||||
|
ok 3 - string_stream_test_get_string
|
||||||
|
ok 1 - string-stream-test
|
||||||
|
# Subtest: example
|
||||||
|
1..2
|
||||||
|
# example_simple_test: initializing
|
||||||
|
ok 1 - example_simple_test
|
||||||
|
# example_skip_test: initializing
|
||||||
|
ok 2 - example_skip_test # SKIP this test should be skipped
|
||||||
|
ok 2 - example
|
Loading…
Add table
Reference in a new issue