Re: [PATCH v5 4/4] mm/slub: extend redzone check to extra allocated kmalloc space than requested

From: Hyeonggon Yoo
Date: Fri Sep 09 2022 - 02:26:47 EST


On Wed, Sep 07, 2022 at 03:10:23PM +0800, Feng Tang wrote:
> kmalloc will round up the request size to a fixed size (mostly power
> of 2), so there could be a extra space than what is requested, whose
> size is the actual buffer size minus original request size.
>
> To better detect out of bound access or abuse of this space, add
> redzone sanity check for it.
>
> And in current kernel, some kmalloc user already knows the existence
> of the space and utilizes it after calling 'ksize()' to know the real
> size of the allocated buffer. So we skip the sanity check for objects
> which have been called with ksize(), as treating them as legitimate
> users.
>
> Suggested-by: Vlastimil Babka <vbabka@xxxxxxx>
> Signed-off-by: Feng Tang <feng.tang@xxxxxxxxx>
> ---
> mm/slab.h | 4 ++++
> mm/slab_common.c | 4 ++++
> mm/slub.c | 57 +++++++++++++++++++++++++++++++++++++++++++++---
> 3 files changed, 62 insertions(+), 3 deletions(-)
>
> diff --git a/mm/slab.h b/mm/slab.h
> index 20f9e2a9814f..0bc91b30b031 100644
> --- a/mm/slab.h
> +++ b/mm/slab.h
> @@ -885,4 +885,8 @@ void __check_heap_object(const void *ptr, unsigned long n,
> }
> #endif
>
> +#ifdef CONFIG_SLUB_DEBUG
> +void skip_orig_size_check(struct kmem_cache *s, const void *object);
> +#endif
> +
> #endif /* MM_SLAB_H */
> diff --git a/mm/slab_common.c b/mm/slab_common.c
> index 8e13e3aac53f..5106667d6adb 100644
> --- a/mm/slab_common.c
> +++ b/mm/slab_common.c
> @@ -1001,6 +1001,10 @@ size_t __ksize(const void *object)
> return folio_size(folio);
> }
>
> +#ifdef CONFIG_SLUB_DEBUG
> + skip_orig_size_check(folio_slab(folio)->slab_cache, object);
> +#endif
> +
> return slab_ksize(folio_slab(folio)->slab_cache);
> }
>
> diff --git a/mm/slub.c b/mm/slub.c
> index f523601d3fcf..2f0302136604 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -812,12 +812,27 @@ static inline void set_orig_size(struct kmem_cache *s,
> if (!slub_debug_orig_size(s))
> return;
>
> +#ifdef CONFIG_KASAN_GENERIC
> + /*
> + * KASAN could save its free meta data in the start part of object
> + * area, so skip the redzone check if kasan's meta data size is
> + * bigger enough to possibly overlap with kmalloc redzone
> + */
> + if (s->kasan_info.free_meta_size_in_object * 2 >= s->object_size)
> + orig_size = s->object_size;
> +#endif
> +
> p += get_info_end(s);
> p += sizeof(struct track) * 2;
>
> *(unsigned int *)p = orig_size;
> }
>
> +void skip_orig_size_check(struct kmem_cache *s, const void *object)
> +{
> + set_orig_size(s, (void *)object, s->object_size);
> +}
> +
> static unsigned int get_orig_size(struct kmem_cache *s, void *object)
> {
> void *p = kasan_reset_tag(object);
> @@ -949,13 +964,34 @@ static __printf(3, 4) void slab_err(struct kmem_cache *s, struct slab *slab,
> static void init_object(struct kmem_cache *s, void *object, u8 val)
> {
> u8 *p = kasan_reset_tag(object);
> + unsigned int orig_size = s->object_size;
>
> - if (s->flags & SLAB_RED_ZONE)
> + if (s->flags & SLAB_RED_ZONE) {
> memset(p - s->red_left_pad, val, s->red_left_pad);
>
> + if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) {
> + unsigned int zone_start;
> +
> + orig_size = get_orig_size(s, object);
> + zone_start = orig_size;
> +
> + if (!freeptr_outside_object(s))
> + zone_start = max_t(unsigned int, orig_size,
> + s->offset + sizeof(void *));
> +
> + /*
> + * Redzone the extra allocated space by kmalloc
> + * than requested.
> + */
> + if (zone_start < s->object_size)
> + memset(p + zone_start, val,
> + s->object_size - zone_start);
> + }
> + }
> +
> if (s->flags & __OBJECT_POISON) {
> - memset(p, POISON_FREE, s->object_size - 1);
> - p[s->object_size - 1] = POISON_END;
> + memset(p, POISON_FREE, orig_size - 1);
> + p[orig_size - 1] = POISON_END;
> }
>
> if (s->flags & SLAB_RED_ZONE)
> @@ -1103,6 +1139,7 @@ static int check_object(struct kmem_cache *s, struct slab *slab,
> {
> u8 *p = object;
> u8 *endobject = object + s->object_size;
> + unsigned int orig_size;
>
> if (s->flags & SLAB_RED_ZONE) {
> if (!check_bytes_and_report(s, slab, object, "Left Redzone",
> @@ -1112,6 +1149,20 @@ static int check_object(struct kmem_cache *s, struct slab *slab,
> if (!check_bytes_and_report(s, slab, object, "Right Redzone",
> endobject, val, s->inuse - s->object_size))
> return 0;
> +
> + if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) {
> + orig_size = get_orig_size(s, object);
> +
> + if (!freeptr_outside_object(s))
> + orig_size = max_t(unsigned int, orig_size,
> + s->offset + sizeof(void *));
> + if (s->object_size > orig_size &&
> + !check_bytes_and_report(s, slab, object,
> + "kmalloc Redzone", p + orig_size,
> + val, s->object_size - orig_size)) {
> + return 0;
> + }
> + }
> } else {
> if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
> check_bytes_and_report(s, slab, p, "Alignment padding",
> --
> 2.34.1
>

Looks good, but what about putting
free pointer outside object when slub_debug_orig_size(s)?

diff --git a/mm/slub.c b/mm/slub.c
index 9d1a985c9ede..7e57d9f718d1 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -970,22 +970,15 @@ static void init_object(struct kmem_cache *s, void *object, u8 val)
memset(p - s->red_left_pad, val, s->red_left_pad);

if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) {
- unsigned int zone_start;
-
orig_size = get_orig_size(s, object);
- zone_start = orig_size;
-
- if (!freeptr_outside_object(s))
- zone_start = max_t(unsigned int, orig_size,
- s->offset + sizeof(void *));

/*
* Redzone the extra allocated space by kmalloc
* than requested.
*/
- if (zone_start < s->object_size)
- memset(p + zone_start, val,
- s->object_size - zone_start);
+ if (orig_size < s->object_size)
+ memset(p + orig_size, val,
+ s->object_size - orig_size);
}
}

@@ -1153,9 +1146,6 @@ static int check_object(struct kmem_cache *s, struct slab *slab,
if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) {
orig_size = get_orig_size(s, object);

- if (!freeptr_outside_object(s))
- orig_size = max_t(unsigned int, orig_size,
- s->offset + sizeof(void *));
if (s->object_size > orig_size &&
!check_bytes_and_report(s, slab, object,
"kmalloc Redzone", p + orig_size,
@@ -4234,7 +4224,8 @@ static int calculate_sizes(struct kmem_cache *s)
*/
s->inuse = size;

- if ((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) ||
+ if (slub_debug_orig_size(s) ||
+ (flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) ||
((flags & SLAB_RED_ZONE) && s->object_size < sizeof(void *)) ||
s->ctor) {
/*

--
Thanks,
Hyeonggon