Re: [PATCH v5 1/6] kexec: move locking into do_kexec_load

From: Eric W. Biederman
Date: Wed Jul 28 2021 - 12:11:16 EST


Arnd Bergmann <arnd@xxxxxxxxxx> writes:

> From: Arnd Bergmann <arnd@xxxxxxxx>
>
> The locking is the same between the native and compat version of
> sys_kexec_load(), so it can be done in the common implementation
> to reduce duplication.

Acked-by: "Eric W. Biederman" <ebiederm@xxxxxxxxxxxx>

>
> Co-developed-by: Eric Biederman <ebiederm@xxxxxxxxxxxx>
> Co-developed-by: Christoph Hellwig <hch@xxxxxxxxxxxxx>
> Signed-off-by: Arnd Bergmann <arnd@xxxxxxxx>
> ---
> kernel/kexec.c | 44 ++++++++++++++++----------------------------
> 1 file changed, 16 insertions(+), 28 deletions(-)
>
> diff --git a/kernel/kexec.c b/kernel/kexec.c
> index c82c6c06f051..9c7aef8f4bb6 100644
> --- a/kernel/kexec.c
> +++ b/kernel/kexec.c
> @@ -110,6 +110,17 @@ static int do_kexec_load(unsigned long entry, unsigned long nr_segments,
> unsigned long i;
> int ret;
>
> + /*
> + * Because we write directly to the reserved memory region when loading
> + * crash kernels we need a mutex here to prevent multiple crash kernels
> + * from attempting to load simultaneously, and to prevent a crash kernel
> + * from loading over the top of a in use crash kernel.
> + *
> + * KISS: always take the mutex.
> + */
> + if (!mutex_trylock(&kexec_mutex))
> + return -EBUSY;
> +
> if (flags & KEXEC_ON_CRASH) {
> dest_image = &kexec_crash_image;
> if (kexec_crash_image)
> @@ -121,7 +132,8 @@ static int do_kexec_load(unsigned long entry, unsigned long nr_segments,
> if (nr_segments == 0) {
> /* Uninstall image */
> kimage_free(xchg(dest_image, NULL));
> - return 0;
> + ret = 0;
> + goto out_unlock;
> }
> if (flags & KEXEC_ON_CRASH) {
> /*
> @@ -134,7 +146,7 @@ static int do_kexec_load(unsigned long entry, unsigned long nr_segments,
>
> ret = kimage_alloc_init(&image, entry, nr_segments, segments, flags);
> if (ret)
> - return ret;
> + goto out_unlock;
>
> if (flags & KEXEC_PRESERVE_CONTEXT)
> image->preserve_context = 1;
> @@ -171,6 +183,8 @@ static int do_kexec_load(unsigned long entry, unsigned long nr_segments,
> arch_kexec_protect_crashkres();
>
> kimage_free(image);
> +out_unlock:
> + mutex_unlock(&kexec_mutex);
> return ret;
> }
>
> @@ -247,21 +261,8 @@ SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
> ((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT))
> return -EINVAL;
>
> - /* Because we write directly to the reserved memory
> - * region when loading crash kernels we need a mutex here to
> - * prevent multiple crash kernels from attempting to load
> - * simultaneously, and to prevent a crash kernel from loading
> - * over the top of a in use crash kernel.
> - *
> - * KISS: always take the mutex.
> - */
> - if (!mutex_trylock(&kexec_mutex))
> - return -EBUSY;
> -
> result = do_kexec_load(entry, nr_segments, segments, flags);
>
> - mutex_unlock(&kexec_mutex);
> -
> return result;
> }
>
> @@ -301,21 +302,8 @@ COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry,
> return -EFAULT;
> }
>
> - /* Because we write directly to the reserved memory
> - * region when loading crash kernels we need a mutex here to
> - * prevent multiple crash kernels from attempting to load
> - * simultaneously, and to prevent a crash kernel from loading
> - * over the top of a in use crash kernel.
> - *
> - * KISS: always take the mutex.
> - */
> - if (!mutex_trylock(&kexec_mutex))
> - return -EBUSY;
> -
> result = do_kexec_load(entry, nr_segments, ksegments, flags);
>
> - mutex_unlock(&kexec_mutex);
> -
> return result;
> }
> #endif