[PATCH] kunit: Print test statistics on failure
From: David Gow
Date: Fri Dec 11 2020 - 02:25:14 EST
When a number of tests fail, it can be useful to get higher-level
statistics of how many tests are failing (or how many parameters are
failing in parameterised tests), and in what cases or suites. This is
already done by some non-KUnit tests, so add support for automatically
generating these for KUnit tests.
This change adds a 'kunit_stats_enabled' switch which has three values:
- 0: No stats are printed (current behaviour)
- 1: Stats are printed only for tests/suites with more than one
subtests, and at least one failure (new default)
- 2: Always print test statistics
For parameterised tests, the summary line looks as follows:
" # inode_test_xtimestamp_decoding: 0 / 16 test parameters failed"
For test suites, it looks like this:
"# ext4_inode_test: (0 / 1) tests failed (0 / 16 test parameters)"
kunit_tool is also updated to correctly ignore diagnostic lines, so that
these statistics do not prevent the result from parsing.
Signed-off-by: David Gow <davidgow@xxxxxxxxxx>
---
This is largely a follow-up to the discussion here:
https://lore.kernel.org/linux-kselftest/CABVgOSmy4n_LGwDS7yWfoLftcQzxv6S+iXx9Y=OPcgG2gu0z1w@xxxxxxxxxxxxxx/T/#t
Does this seem like a sensible addition?
Cheers,
-- David
lib/kunit/test.c | 71 +++++++++++++++++++++++++++++
tools/testing/kunit/kunit_parser.py | 2 +-
2 files changed, 72 insertions(+), 1 deletion(-)
diff --git a/lib/kunit/test.c b/lib/kunit/test.c
index ec9494e914ef..711e269366a7 100644
--- a/lib/kunit/test.c
+++ b/lib/kunit/test.c
@@ -9,6 +9,7 @@
#include <kunit/test.h>
#include <linux/kernel.h>
#include <linux/kref.h>
+#include <linux/moduleparam.h>
#include <linux/sched/debug.h>
#include <linux/sched.h>
@@ -16,6 +17,40 @@
#include "string-stream.h"
#include "try-catch-impl.h"
+/*
+ * KUnit statistic mode:
+ * 0 - disabled
+ * 1 - only when there is at least one failure, and more than one subtest
+ * 2 - enabled
+ */
+static int kunit_stats_enabled = 1;
+core_param(kunit_stats_enabled, kunit_stats_enabled, int, 0644);
+
+static bool kunit_should_print_stats(int num_failures, int num_subtests)
+{
+ if (kunit_stats_enabled == 0)
+ return false;
+
+ if (kunit_stats_enabled == 2)
+ return true;
+
+ return (num_failures > 0 && num_subtests > 1);
+}
+
+static void kunit_print_test_stats(struct kunit *test,
+ size_t num_failures, size_t num_subtests)
+{
+ if (!kunit_should_print_stats(num_failures, num_subtests))
+ return;
+
+ kunit_log(KERN_INFO, test,
+ KUNIT_SUBTEST_INDENT
+ "# %s: %lu / %lu test parameters failed",
+ test->name,
+ num_failures,
+ num_subtests);
+}
+
/*
* Append formatted message to log, size of which is limited to
* KUNIT_LOG_SIZE bytes (including null terminating byte).
@@ -346,15 +381,37 @@ static void kunit_run_case_catch_errors(struct kunit_suite *suite,
test_case->success = test->success;
}
+static void kunit_print_suite_stats(struct kunit_suite *suite,
+ size_t num_failures,
+ size_t total_param_failures,
+ size_t total_params)
+{
+ size_t num_cases = kunit_suite_num_test_cases(suite);
+
+ if (!kunit_should_print_stats(num_failures, num_cases))
+ return;
+
+ kunit_log(KERN_INFO, suite,
+ "# %s: (%lu / %lu) tests failed (%lu / %lu test parameters)",
+ suite->name,
+ num_failures,
+ num_cases,
+ total_param_failures,
+ total_params);
+}
+
int kunit_run_tests(struct kunit_suite *suite)
{
char param_desc[KUNIT_PARAM_DESC_SIZE];
struct kunit_case *test_case;
+ size_t num_suite_failures = 0;
+ size_t total_param_failures = 0, total_params = 0;
kunit_print_subtest_start(suite);
kunit_suite_for_each_test_case(suite, test_case) {
struct kunit test = { .param_value = NULL, .param_index = 0 };
+ size_t num_params = 0, num_failures = 0;
bool test_success = true;
if (test_case->generate_params) {
@@ -385,13 +442,27 @@ int kunit_run_tests(struct kunit_suite *suite)
test.param_value = test_case->generate_params(test.param_value, param_desc);
test.param_index++;
}
+
+ if (!test.success)
+ num_failures++;
+ num_params++;
+
} while (test.param_value);
+ kunit_print_test_stats(&test, num_failures, num_params);
+
kunit_print_ok_not_ok(&test, true, test_success,
kunit_test_case_num(suite, test_case),
test_case->name);
+
+ if (!test_success)
+ num_suite_failures++;
+ total_params += num_params;
+ total_param_failures += num_failures;
}
+ kunit_print_suite_stats(suite, num_suite_failures,
+ total_param_failures, total_params);
kunit_print_subtest_end(suite);
return 0;
diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py
index 6614ec4d0898..88ee2b2668ad 100644
--- a/tools/testing/kunit/kunit_parser.py
+++ b/tools/testing/kunit/kunit_parser.py
@@ -95,7 +95,7 @@ def print_log(log):
for m in log:
print_with_timestamp(m)
-TAP_ENTRIES = re.compile(r'^(TAP|[\s]*ok|[\s]*not ok|[\s]*[0-9]+\.\.[0-9]+|[\s]*#).*$')
+TAP_ENTRIES = re.compile(r'^(TAP|[\s]*ok|[\s]*not ok|[\s]*[0-9]+\.\.[0-9]+|[\s]*# Subtest:).*$')
def consume_non_diagnositic(lines: List[str]) -> None:
while lines and not TAP_ENTRIES.match(lines[0]):
base-commit: 5f6b99d0287de2c2d0b5e7abcb0092d553ad804a
--
2.29.2.576.ga3fc446d84-goog