Commit 5acaf603 authored by David Gow's avatar David Gow Committed by Shuah Khan
Browse files

kunit: tool: Support skipped tests in kunit_tool



Add support for the SKIP directive to kunit_tool's TAP parser.

Skipped tests now show up as such in the printed summary. The number of
skipped tests is counted, and if all tests in a suite are skipped, the
suite is also marked as skipped. Otherwise, skipped tests do affect the
suite result.

Example output:
[00:22:34] ======== [SKIPPED] example_skip ========
[00:22:34] [SKIPPED] example_skip_test # SKIP this test should be skipped
[00:22:34] [SKIPPED] example_mark_skipped_test # SKIP this test should be skipped
[00:22:34] ============================================================
[00:22:34] Testing complete. 2 tests run. 0 failed. 0 crashed. 2 skipped.

Signed-off-by: default avatarDavid Gow <davidgow@google.com>
Reviewed-by: default avatarDaniel Latypov <dlatypov@google.com>
Reviewed-by: default avatarBrendan Higgins <brendanhiggins@google.com>
Signed-off-by: default avatarShuah Khan <skhan@linuxfoundation.org>
parent 6d2426b2
Loading
Loading
Loading
Loading
+53 −24
Original line number Diff line number Diff line
@@ -43,6 +43,7 @@ class TestCase(object):
class TestStatus(Enum):
	SUCCESS = auto()
	FAILURE = auto()
	SKIPPED = auto()
	TEST_CRASHED = auto()
	NO_TESTS = auto()
	FAILURE_TO_PARSE_TESTS = auto()
@@ -149,6 +150,8 @@ def save_non_diagnostic(lines: LineStream, test_case: TestCase) -> None:

OkNotOkResult = namedtuple('OkNotOkResult', ['is_ok','description', 'text'])

OK_NOT_OK_SKIP = re.compile(r'^[\s]*(ok|not ok) [0-9]+ - (.*) # SKIP(.*)$')

OK_NOT_OK_SUBTEST = re.compile(r'^[\s]+(ok|not ok) [0-9]+ - (.*)$')

OK_NOT_OK_MODULE = re.compile(r'^(ok|not ok) ([0-9]+) - (.*)$')
@@ -166,6 +169,10 @@ def parse_ok_not_ok_test_case(lines: LineStream, test_case: TestCase) -> bool:
	if match:
		test_case.log.append(lines.pop())
		test_case.name = match.group(2)
		skip_match = OK_NOT_OK_SKIP.match(line)
		if skip_match:
			test_case.status = TestStatus.SKIPPED
			return True
		if test_case.status == TestStatus.TEST_CRASHED:
			return True
		if match.group(1) == 'ok':
@@ -229,16 +236,16 @@ def parse_subtest_plan(lines: LineStream) -> Optional[int]:
		return None

def max_status(left: TestStatus, right: TestStatus) -> TestStatus:
	if left == TestStatus.TEST_CRASHED or right == TestStatus.TEST_CRASHED:
	if left == right:
		return left
	elif left == TestStatus.TEST_CRASHED or right == TestStatus.TEST_CRASHED:
		return TestStatus.TEST_CRASHED
	elif left == TestStatus.FAILURE or right == TestStatus.FAILURE:
		return TestStatus.FAILURE
	elif left != TestStatus.SUCCESS:
		return left
	elif right != TestStatus.SUCCESS:
	elif left == TestStatus.SKIPPED:
		return right
	else:
		return TestStatus.SUCCESS
		return left

def parse_ok_not_ok_test_suite(lines: LineStream,
			       test_suite: TestSuite,
@@ -255,6 +262,9 @@ def parse_ok_not_ok_test_suite(lines: LineStream,
			test_suite.status = TestStatus.SUCCESS
		else:
			test_suite.status = TestStatus.FAILURE
		skip_match = OK_NOT_OK_SKIP.match(line)
		if skip_match:
			test_suite.status = TestStatus.SKIPPED
		suite_index = int(match.group(2))
		if suite_index != expected_suite_index:
			print_with_timestamp(
@@ -265,8 +275,8 @@ def parse_ok_not_ok_test_suite(lines: LineStream,
	else:
		return False

def bubble_up_errors(statuses: Iterable[TestStatus]) -> TestStatus:
	return reduce(max_status, statuses, TestStatus.SUCCESS)
def bubble_up_errors(status_list: Iterable[TestStatus]) -> TestStatus:
	return reduce(max_status, status_list, TestStatus.SKIPPED)

def bubble_up_test_case_errors(test_suite: TestSuite) -> TestStatus:
	max_test_case_status = bubble_up_errors(x.status for x in test_suite.cases)
@@ -352,37 +362,53 @@ def parse_test_result(lines: LineStream) -> TestResult:
	else:
		return TestResult(TestStatus.NO_TESTS, [], lines)

def print_and_count_results(test_result: TestResult) -> Tuple[int, int, int]:
	total_tests = 0
	failed_tests = 0
	crashed_tests = 0
class TestCounts:
	passed: int
	failed: int
	crashed: int
	skipped: int

	def __init__(self):
		self.passed = 0
		self.failed = 0
		self.crashed = 0
		self.skipped = 0

	def total(self) -> int:
		return self.passed + self.failed + self.crashed + self.skipped

def print_and_count_results(test_result: TestResult) -> TestCounts:
	counts = TestCounts()
	for test_suite in test_result.suites:
		if test_suite.status == TestStatus.SUCCESS:
			print_suite_divider(green('[PASSED] ') + test_suite.name)
		elif test_suite.status == TestStatus.SKIPPED:
			print_suite_divider(yellow('[SKIPPED] ') + test_suite.name)
		elif test_suite.status == TestStatus.TEST_CRASHED:
			print_suite_divider(red('[CRASHED] ' + test_suite.name))
		else:
			print_suite_divider(red('[FAILED] ') + test_suite.name)
		for test_case in test_suite.cases:
			total_tests += 1
			if test_case.status == TestStatus.SUCCESS:
				counts.passed += 1
				print_with_timestamp(green('[PASSED] ') + test_case.name)
			elif test_case.status == TestStatus.SKIPPED:
				counts.skipped += 1
				print_with_timestamp(yellow('[SKIPPED] ') + test_case.name)
			elif test_case.status == TestStatus.TEST_CRASHED:
				crashed_tests += 1
				counts.crashed += 1
				print_with_timestamp(red('[CRASHED] ' + test_case.name))
				print_log(map(yellow, test_case.log))
				print_with_timestamp('')
			else:
				failed_tests += 1
				counts.failed += 1
				print_with_timestamp(red('[FAILED] ') + test_case.name)
				print_log(map(yellow, test_case.log))
				print_with_timestamp('')
	return total_tests, failed_tests, crashed_tests
	return counts

def parse_run_tests(kernel_output: Iterable[str]) -> TestResult:
	total_tests = 0
	failed_tests = 0
	crashed_tests = 0
	counts = TestCounts()
	lines = extract_tap_lines(kernel_output)
	test_result = parse_test_result(lines)
	if test_result.status == TestStatus.NO_TESTS:
@@ -390,12 +416,15 @@ def parse_run_tests(kernel_output: Iterable[str]) -> TestResult:
	elif test_result.status == TestStatus.FAILURE_TO_PARSE_TESTS:
		print(red('[ERROR] ') + yellow('could not parse test results!'))
	else:
		(total_tests,
		 failed_tests,
		 crashed_tests) = print_and_count_results(test_result)
		counts = print_and_count_results(test_result)
	print_with_timestamp(DIVIDER)
	fmt = green if test_result.status == TestStatus.SUCCESS else red
	if test_result.status == TestStatus.SUCCESS:
		fmt = green
	elif test_result.status == TestStatus.SKIPPED:
		fmt = yellow
	else:
		fmt =red
	print_with_timestamp(
		fmt('Testing complete. %d tests run. %d failed. %d crashed.' %
		    (total_tests, failed_tests, crashed_tests)))
		fmt('Testing complete. %d tests run. %d failed. %d crashed. %d skipped.' %
		    (counts.total(), counts.failed, counts.crashed, counts.skipped)))
	return test_result
+22 −0
Original line number Diff line number Diff line
@@ -185,6 +185,28 @@ class KUnitParserTest(unittest.TestCase):
			kunit_parser.TestStatus.TEST_CRASHED,
			result.status)

	def test_skipped_test(self):
		skipped_log = test_data_path('test_skip_tests.log')
		file = open(skipped_log)
		result = kunit_parser.parse_run_tests(file.readlines())

		# A skipped test does not fail the whole suite.
		self.assertEqual(
			kunit_parser.TestStatus.SUCCESS,
			result.status)
		file.close()

	def test_skipped_all_tests(self):
		skipped_log = test_data_path('test_skip_all_tests.log')
		file = open(skipped_log)
		result = kunit_parser.parse_run_tests(file.readlines())

		self.assertEqual(
			kunit_parser.TestStatus.SKIPPED,
			result.status)
		file.close()


	def test_ignores_prefix_printk_time(self):
		prefix_log = test_data_path('test_config_printk_time.log')
		with open(prefix_log) as file:
+15 −0
Original line number Diff line number Diff line
TAP version 14
1..2
    # Subtest: string-stream-test
    1..3
    ok 1 - string_stream_test_empty_on_creation # SKIP all tests skipped
    ok 2 - string_stream_test_not_empty_after_add # SKIP all tests skipped
    ok 3 - string_stream_test_get_string # SKIP all tests skipped
ok 1 - string-stream-test # SKIP
    # Subtest: example
    1..2
    # example_simple_test: initializing
    ok 1 - example_simple_test # SKIP all tests skipped
    # example_skip_test: initializing
    ok 2 - example_skip_test # SKIP this test should be skipped
ok 2 - example # SKIP
+15 −0
Original line number Diff line number Diff line
TAP version 14
1..2
    # Subtest: string-stream-test
    1..3
    ok 1 - string_stream_test_empty_on_creation
    ok 2 - string_stream_test_not_empty_after_add
    ok 3 - string_stream_test_get_string
ok 1 - string-stream-test
    # Subtest: example
    1..2
    # example_simple_test: initializing
    ok 1 - example_simple_test
    # example_skip_test: initializing
    ok 2 - example_skip_test # SKIP this test should be skipped
ok 2 - example