Re: [PATCH] mm/kasan: Add shadow memory validation in ksize()
From: Andrey Konovalov
Date: Mon Jun 24 2019 - 07:46:27 EST
On Mon, Jun 24, 2019 at 1:05 PM Marco Elver <elver@xxxxxxxxxx> wrote:
>
> ksize() has been unconditionally unpoisoning the whole shadow memory region
> associated with an allocation. This can lead to various undetected bugs,
> for example, double-kzfree().
>
> kzfree() uses ksize() to determine the actual allocation size, and
> subsequently zeroes the memory. Since ksize() used to just unpoison the
> whole shadow memory region, no invalid free was detected.
>
> This patch addresses this as follows:
>
> 1. For each SLAB and SLUB allocators: add a check in ksize() that the
> pointed to object's shadow memory is valid, and only then unpoison
> the memory region.
>
> 2. Update kasan_unpoison_slab() to explicitly unpoison the shadow memory
> region using the size obtained from ksize(); it is possible that
> double-unpoison can occur if the shadow was already valid, however,
> this should not be the general case.
>
> Tested:
> 1. With SLAB allocator: a) normal boot without warnings; b) verified the
> added double-kzfree() is detected.
> 2. With SLUB allocator: a) normal boot without warnings; b) verified the
> added double-kzfree() is detected.
>
> Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=199359
> Signed-off-by: Marco Elver <elver@xxxxxxxxxx>
> Cc: Andrey Ryabinin <aryabinin@xxxxxxxxxxxxx>
> Cc: Dmitry Vyukov <dvyukov@xxxxxxxxxx>
> Cc: Alexander Potapenko <glider@xxxxxxxxxx>
> Cc: Andrey Konovalov <andreyknvl@xxxxxxxxxx>
> Cc: Christoph Lameter <cl@xxxxxxxxx>
> Cc: Pekka Enberg <penberg@xxxxxxxxxx>
> Cc: David Rientjes <rientjes@xxxxxxxxxx>
> Cc: Joonsoo Kim <iamjoonsoo.kim@xxxxxxx>
> Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
> Cc: kasan-dev@xxxxxxxxxxxxxxxx
> Cc: linux-kernel@xxxxxxxxxxxxxxx
> Cc: linux-mm@xxxxxxxxx
> ---
> include/linux/kasan.h | 20 +++++++++++++++++++-
> lib/test_kasan.c | 17 +++++++++++++++++
> mm/kasan/common.c | 15 ++++++++++++---
> mm/slab.c | 12 ++++++++----
> mm/slub.c | 11 +++++++----
> 5 files changed, 63 insertions(+), 12 deletions(-)
>
> diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> index b40ea104dd36..9778a68fb5cf 100644
> --- a/include/linux/kasan.h
> +++ b/include/linux/kasan.h
> @@ -63,6 +63,14 @@ void * __must_check kasan_krealloc(const void *object, size_t new_size,
>
> void * __must_check kasan_slab_alloc(struct kmem_cache *s, void *object,
> gfp_t flags);
> +
> +/**
> + * kasan_shadow_invalid - Check if shadow memory of object is invalid.
> + * @object: The pointed to object; the object pointer may be tagged.
> + * @return: true if shadow is invalid, false if valid.
> + */
> +bool kasan_shadow_invalid(const void *object);
> +
> bool kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip);
>
> struct kasan_cache {
> @@ -77,7 +85,11 @@ int kasan_add_zero_shadow(void *start, unsigned long size);
> void kasan_remove_zero_shadow(void *start, unsigned long size);
>
> size_t ksize(const void *);
> -static inline void kasan_unpoison_slab(const void *ptr) { ksize(ptr); }
> +static inline void kasan_unpoison_slab(const void *ptr)
> +{
> + /* Force unpoison: ksize() only unpoisons if shadow of ptr is valid. */
> + kasan_unpoison_shadow(ptr, ksize(ptr));
> +}
> size_t kasan_metadata_size(struct kmem_cache *cache);
>
> bool kasan_save_enable_multi_shot(void);
> @@ -133,6 +145,12 @@ static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
> {
> return object;
> }
> +
> +static inline bool kasan_shadow_invalid(const void *object)
> +{
> + return false;
> +}
> +
> static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
> unsigned long ip)
> {
> diff --git a/lib/test_kasan.c b/lib/test_kasan.c
> index 7de2702621dc..9b710bfa84da 100644
> --- a/lib/test_kasan.c
> +++ b/lib/test_kasan.c
> @@ -623,6 +623,22 @@ static noinline void __init kasan_strings(void)
> strnlen(ptr, 1);
> }
>
> +static noinline void __init kmalloc_pagealloc_double_kzfree(void)
> +{
> + char *ptr;
> + size_t size = 16;
> +
> + pr_info("kmalloc pagealloc allocation: double-free (kzfree)\n");
> + ptr = kmalloc(size, GFP_KERNEL);
> + if (!ptr) {
> + pr_err("Allocation failed\n");
> + return;
> + }
> +
> + kzfree(ptr);
> + kzfree(ptr);
> +}
> +
> static int __init kmalloc_tests_init(void)
> {
> /*
> @@ -664,6 +680,7 @@ static int __init kmalloc_tests_init(void)
> kasan_memchr();
> kasan_memcmp();
> kasan_strings();
> + kmalloc_pagealloc_double_kzfree();
>
> kasan_restore_multi_shot(multishot);
>
> diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> index 242fdc01aaa9..357e02e73163 100644
> --- a/mm/kasan/common.c
> +++ b/mm/kasan/common.c
> @@ -413,10 +413,20 @@ static inline bool shadow_invalid(u8 tag, s8 shadow_byte)
> return tag != (u8)shadow_byte;
> }
>
> +bool kasan_shadow_invalid(const void *object)
> +{
> + u8 tag = get_tag(object);
> + s8 shadow_byte;
> +
> + object = reset_tag(object);
> +
> + shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
> + return shadow_invalid(tag, shadow_byte);
> +}
> +
> static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
> unsigned long ip, bool quarantine)
> {
> - s8 shadow_byte;
> u8 tag;
The tag variable is not used any more in this function, right? If so,
it can be removed.
> void *tagged_object;
> unsigned long rounded_up_size;
> @@ -435,8 +445,7 @@ static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
> if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
> return false;
>
> - shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
> - if (shadow_invalid(tag, shadow_byte)) {
> + if (kasan_shadow_invalid(tagged_object)) {
> kasan_report_invalid_free(tagged_object, ip);
> return true;
> }
> diff --git a/mm/slab.c b/mm/slab.c
> index f7117ad9b3a3..3595348c401b 100644
> --- a/mm/slab.c
> +++ b/mm/slab.c
> @@ -4226,10 +4226,14 @@ size_t ksize(const void *objp)
> return 0;
>
> size = virt_to_cache(objp)->object_size;
> - /* We assume that ksize callers could use the whole allocated area,
> - * so we need to unpoison this area.
> - */
> - kasan_unpoison_shadow(objp, size);
> +
> + if (!kasan_shadow_invalid(objp)) {
> + /*
> + * We assume that ksize callers could use the whole allocated
> + * area, so we need to unpoison this area.
> + */
> + kasan_unpoison_shadow(objp, size);
> + }
>
> return size;
> }
> diff --git a/mm/slub.c b/mm/slub.c
> index cd04dbd2b5d0..28231d30358e 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -3921,10 +3921,13 @@ static size_t __ksize(const void *object)
> size_t ksize(const void *object)
> {
> size_t size = __ksize(object);
> - /* We assume that ksize callers could use whole allocated area,
> - * so we need to unpoison this area.
> - */
> - kasan_unpoison_shadow(object, size);
> + if (!kasan_shadow_invalid(object)) {
> + /*
> + * We assume that ksize callers could use whole allocated area,
> + * so we need to unpoison this area.
> + */
> + kasan_unpoison_shadow(object, size);
> + }
I think it's better to add a kasan_ksize() hook that implements this
logic. This way we don't have to duplicate it for SLAB and SLUB.
In this case we also don't need an exported kasan_shadow_invalid()
hook, and its logic can be moved into shadow_invalid().
> return size;
> }
> EXPORT_SYMBOL(ksize);
> --
> 2.22.0.410.gd8fdbe21b5-goog
>