Re: [PATCH] kunit: memcpy: Split slow memcpy tests into MEMCPY_SLOW_KUNIT_TEST
From: Guenter Roeck
Date: Mon Jan 09 2023 - 19:11:37 EST
On Fri, Jan 06, 2023 at 08:02:07PM -0800, Kees Cook wrote:
> Since the long memcpy tests may stall a system for tens of seconds
> in virtualized architecture environments, split those tests off under
> CONFIG_MEMCPY_SLOW_KUNIT_TEST so they can be separately disabled.
>
> Reported-by: Guenter Roeck <linux@xxxxxxxxxxxx>
> Link: https://lore.kernel.org/lkml/20221226195206.GA2626419@xxxxxxxxxxxx
> Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
> Cc: Nathan Chancellor <nathan@xxxxxxxxxx>
> Cc: linux-hardening@xxxxxxxxxxxxxxx
> Signed-off-by: Kees Cook <keescook@xxxxxxxxxxxx>
> ---
> Guenter, does this give you the needed flexibility to turn on the memcpy
> kunit tests again in your slower environments?
Yes, it does, and it works.
Reviewed-and-tested-by: Guenter Roeck <linux@xxxxxxxxxxxx>
> ---
> lib/Kconfig.debug | 9 +++++++++
> lib/memcpy_kunit.c | 17 +++++++++++++----
> 2 files changed, 22 insertions(+), 4 deletions(-)
>
> diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
> index c2c78d0e761c..b5e94807f41c 100644
> --- a/lib/Kconfig.debug
> +++ b/lib/Kconfig.debug
> @@ -2621,6 +2621,15 @@ config MEMCPY_KUNIT_TEST
>
> If unsure, say N.
>
> +config MEMCPY_SLOW_KUNIT_TEST
> + tristate "Include exhaustive memcpy tests" if !KUNIT_ALL_TESTS
As Geert noticed, bool should be sufficient here since this is not a
separate test.
Thanks,
Guenter
> + depends on MEMCPY_KUNIT_TEST
> + default KUNIT_ALL_TESTS
> + help
> + Some memcpy tests are quite exhaustive in checking for overlaps
> + and bit ranges. These can be very slow, so they are split out
> + as a separate config.
> +
> config IS_SIGNED_TYPE_KUNIT_TEST
> tristate "Test is_signed_type() macro" if !KUNIT_ALL_TESTS
> depends on KUNIT
> diff --git a/lib/memcpy_kunit.c b/lib/memcpy_kunit.c
> index 89128551448d..cc1f36335a9b 100644
> --- a/lib/memcpy_kunit.c
> +++ b/lib/memcpy_kunit.c
> @@ -307,8 +307,12 @@ static void set_random_nonzero(struct kunit *test, u8 *byte)
> }
> }
>
> -static void init_large(struct kunit *test)
> +static int init_large(struct kunit *test)
> {
> + if (!IS_ENABLED(CONFIG_MEMCPY_SLOW_KUNIT_TEST)) {
> + kunit_skip(test, "Slow test skipped. Enable with CONFIG_MEMCPY_SLOW_KUNIT_TEST=y");
> + return -EBUSY;
> + }
>
> /* Get many bit patterns. */
> get_random_bytes(large_src, ARRAY_SIZE(large_src));
> @@ -319,6 +323,8 @@ static void init_large(struct kunit *test)
>
> /* Explicitly zero the entire destination. */
> memset(large_dst, 0, ARRAY_SIZE(large_dst));
> +
> + return 0;
> }
>
> /*
> @@ -327,7 +333,9 @@ static void init_large(struct kunit *test)
> */
> static void copy_large_test(struct kunit *test, bool use_memmove)
> {
> - init_large(test);
> +
> + if (init_large(test))
> + return;
>
> /* Copy a growing number of non-overlapping bytes ... */
> for (int bytes = 1; bytes <= ARRAY_SIZE(large_src); bytes++) {
> @@ -472,7 +480,8 @@ static void memmove_overlap_test(struct kunit *test)
> static const int bytes_start = 1;
> static const int bytes_end = ARRAY_SIZE(large_src) + 1;
>
> - init_large(test);
> + if (init_large(test))
> + return;
>
> /* Copy a growing number of overlapping bytes ... */
> for (int bytes = bytes_start; bytes < bytes_end;
> @@ -549,8 +558,8 @@ static void strtomem_test(struct kunit *test)
> static struct kunit_case memcpy_test_cases[] = {
> KUNIT_CASE(memset_test),
> KUNIT_CASE(memcpy_test),
> - KUNIT_CASE(memcpy_large_test),
> KUNIT_CASE(memmove_test),
> + KUNIT_CASE(memcpy_large_test),
> KUNIT_CASE(memmove_large_test),
> KUNIT_CASE(memmove_overlap_test),
> KUNIT_CASE(strtomem_test),
> --
> 2.34.1
>