Re: [PATCH 3/3] kcsan: Add atomic builtin test case
From: Paul E. McKenney
Date: Mon Jul 06 2020 - 19:45:13 EST
On Fri, Jul 03, 2020 at 03:40:31PM +0200, Marco Elver wrote:
> Adds test case to kcsan-test module, to test atomic builtin
> instrumentation works.
>
> Signed-off-by: Marco Elver <elver@xxxxxxxxxx>
Applied all three, thank you!!!
Thanx, Paul
> ---
> kernel/kcsan/kcsan-test.c | 63 +++++++++++++++++++++++++++++++++++++++
> 1 file changed, 63 insertions(+)
>
> diff --git a/kernel/kcsan/kcsan-test.c b/kernel/kcsan/kcsan-test.c
> index fed6fcb5768c..721180cbbab1 100644
> --- a/kernel/kcsan/kcsan-test.c
> +++ b/kernel/kcsan/kcsan-test.c
> @@ -390,6 +390,15 @@ static noinline void test_kernel_seqlock_writer(void)
> write_sequnlock_irqrestore(&test_seqlock, flags);
> }
>
> +static noinline void test_kernel_atomic_builtins(void)
> +{
> + /*
> + * Generate concurrent accesses, expecting no reports, ensuring KCSAN
> + * treats builtin atomics as actually atomic.
> + */
> + __atomic_load_n(&test_var, __ATOMIC_RELAXED);
> +}
> +
> /* ===== Test cases ===== */
>
> /* Simple test with normal data race. */
> @@ -852,6 +861,59 @@ static void test_seqlock_noreport(struct kunit *test)
> KUNIT_EXPECT_FALSE(test, match_never);
> }
>
> +/*
> + * Test atomic builtins work and required instrumentation functions exist. We
> + * also test that KCSAN understands they're atomic by racing with them via
> + * test_kernel_atomic_builtins(), and expect no reports.
> + *
> + * The atomic builtins _SHOULD NOT_ be used in normal kernel code!
> + */
> +static void test_atomic_builtins(struct kunit *test)
> +{
> + bool match_never = false;
> +
> + begin_test_checks(test_kernel_atomic_builtins, test_kernel_atomic_builtins);
> + do {
> + long tmp;
> +
> + kcsan_enable_current();
> +
> + __atomic_store_n(&test_var, 42L, __ATOMIC_RELAXED);
> + KUNIT_EXPECT_EQ(test, 42L, __atomic_load_n(&test_var, __ATOMIC_RELAXED));
> +
> + KUNIT_EXPECT_EQ(test, 42L, __atomic_exchange_n(&test_var, 20, __ATOMIC_RELAXED));
> + KUNIT_EXPECT_EQ(test, 20L, test_var);
> +
> + tmp = 20L;
> + KUNIT_EXPECT_TRUE(test, __atomic_compare_exchange_n(&test_var, &tmp, 30L,
> + 0, __ATOMIC_RELAXED,
> + __ATOMIC_RELAXED));
> + KUNIT_EXPECT_EQ(test, tmp, 20L);
> + KUNIT_EXPECT_EQ(test, test_var, 30L);
> + KUNIT_EXPECT_FALSE(test, __atomic_compare_exchange_n(&test_var, &tmp, 40L,
> + 1, __ATOMIC_RELAXED,
> + __ATOMIC_RELAXED));
> + KUNIT_EXPECT_EQ(test, tmp, 30L);
> + KUNIT_EXPECT_EQ(test, test_var, 30L);
> +
> + KUNIT_EXPECT_EQ(test, 30L, __atomic_fetch_add(&test_var, 1, __ATOMIC_RELAXED));
> + KUNIT_EXPECT_EQ(test, 31L, __atomic_fetch_sub(&test_var, 1, __ATOMIC_RELAXED));
> + KUNIT_EXPECT_EQ(test, 30L, __atomic_fetch_and(&test_var, 0xf, __ATOMIC_RELAXED));
> + KUNIT_EXPECT_EQ(test, 14L, __atomic_fetch_xor(&test_var, 0xf, __ATOMIC_RELAXED));
> + KUNIT_EXPECT_EQ(test, 1L, __atomic_fetch_or(&test_var, 0xf0, __ATOMIC_RELAXED));
> + KUNIT_EXPECT_EQ(test, 241L, __atomic_fetch_nand(&test_var, 0xf, __ATOMIC_RELAXED));
> + KUNIT_EXPECT_EQ(test, -2L, test_var);
> +
> + __atomic_thread_fence(__ATOMIC_SEQ_CST);
> + __atomic_signal_fence(__ATOMIC_SEQ_CST);
> +
> + kcsan_disable_current();
> +
> + match_never = report_available();
> + } while (!end_test_checks(match_never));
> + KUNIT_EXPECT_FALSE(test, match_never);
> +}
> +
> /*
> * Each test case is run with different numbers of threads. Until KUnit supports
> * passing arguments for each test case, we encode #threads in the test case
> @@ -891,6 +953,7 @@ static struct kunit_case kcsan_test_cases[] = {
> KCSAN_KUNIT_CASE(test_assert_exclusive_access_scoped),
> KCSAN_KUNIT_CASE(test_jiffies_noreport),
> KCSAN_KUNIT_CASE(test_seqlock_noreport),
> + KCSAN_KUNIT_CASE(test_atomic_builtins),
> {},
> };
>
> --
> 2.27.0.212.ge8ba1cc988-goog
>