Re: [PATCH 3/5] mm/slub: Improve redzone check and zeroing for krealloc()
From: Danilo Krummrich
Date: Tue Sep 10 2024 - 06:10:59 EST
On Mon, Sep 09, 2024 at 09:29:56AM +0800, Feng Tang wrote:
> For current krealloc(), one problem is its caller doesn't know what's
> the actual request size, say the object is 64 bytes kmalloc one, but
> the original caller may only requested 48 bytes. And when krealloc()
> shrinks or grows in the same object, or allocate a new bigger object,
> it lacks this 'original size' information to do accurate data preserving
> or zeroing (when __GFP_ZERO is set).
>
> And when some slub debug option is enabled, kmalloc caches do have this
> 'orig_size' feature. So utilize it to do more accurate data handling,
> as well as enforce the kmalloc-redzone sanity check.
>
> The krealloc() related code is moved from slab_common.c to slub.c for
> more efficient function calling.
I think it would be good to do this in a separate commit, for a better diff and
history.
>
> Suggested-by: Vlastimil Babka <vbabka@xxxxxxx>
> Signed-off-by: Feng Tang <feng.tang@xxxxxxxxx>
> ---
> mm/slab_common.c | 84 -------------------------------------
> mm/slub.c | 106 +++++++++++++++++++++++++++++++++++++++++++++++
> 2 files changed, 106 insertions(+), 84 deletions(-)
>
> diff --git a/mm/slab_common.c b/mm/slab_common.c
> index ad438ba62485..e59942fb7970 100644
> --- a/mm/slab_common.c
> +++ b/mm/slab_common.c
> @@ -1297,90 +1297,6 @@ module_init(slab_proc_init);
>
> #endif /* CONFIG_SLUB_DEBUG */
>
> -static __always_inline __realloc_size(2) void *
> -__do_krealloc(const void *p, size_t new_size, gfp_t flags)
> -{
> - void *ret;
> - size_t ks;
> -
> - /* Check for double-free before calling ksize. */
> - if (likely(!ZERO_OR_NULL_PTR(p))) {
> - if (!kasan_check_byte(p))
> - return NULL;
> - ks = ksize(p);
> - } else
> - ks = 0;
> -
> - /* If the object still fits, repoison it precisely. */
> - if (ks >= new_size) {
> - /* Zero out spare memory. */
> - if (want_init_on_alloc(flags)) {
> - kasan_disable_current();
> - memset((void *)p + new_size, 0, ks - new_size);
> - kasan_enable_current();
> - }
> -
> - p = kasan_krealloc((void *)p, new_size, flags);
> - return (void *)p;
> - }
> -
> - ret = kmalloc_node_track_caller_noprof(new_size, flags, NUMA_NO_NODE, _RET_IP_);
> - if (ret && p) {
> - /* Disable KASAN checks as the object's redzone is accessed. */
> - kasan_disable_current();
> - memcpy(ret, kasan_reset_tag(p), ks);
> - kasan_enable_current();
> - }
> -
> - return ret;
> -}
> -
> -/**
> - * krealloc - reallocate memory. The contents will remain unchanged.
> - * @p: object to reallocate memory for.
> - * @new_size: how many bytes of memory are required.
> - * @flags: the type of memory to allocate.
> - *
> - * If @p is %NULL, krealloc() behaves exactly like kmalloc(). If @new_size
> - * is 0 and @p is not a %NULL pointer, the object pointed to is freed.
> - *
> - * If __GFP_ZERO logic is requested, callers must ensure that, starting with the
> - * initial memory allocation, every subsequent call to this API for the same
> - * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that
> - * __GFP_ZERO is not fully honored by this API.
> - *
> - * This is the case, since krealloc() only knows about the bucket size of an
> - * allocation (but not the exact size it was allocated with) and hence
> - * implements the following semantics for shrinking and growing buffers with
> - * __GFP_ZERO.
> - *
> - * new bucket
> - * 0 size size
> - * |--------|----------------|
> - * | keep | zero |
> - *
> - * In any case, the contents of the object pointed to are preserved up to the
> - * lesser of the new and old sizes.
> - *
> - * Return: pointer to the allocated memory or %NULL in case of error
> - */
> -void *krealloc_noprof(const void *p, size_t new_size, gfp_t flags)
> -{
> - void *ret;
> -
> - if (unlikely(!new_size)) {
> - kfree(p);
> - return ZERO_SIZE_PTR;
> - }
> -
> - ret = __do_krealloc(p, new_size, flags);
> - if (ret && kasan_reset_tag(p) != kasan_reset_tag(ret))
> - kfree(p);
> -
> - return ret;
> -}
> -EXPORT_SYMBOL(krealloc_noprof);
> -
> /**
> * kfree_sensitive - Clear sensitive information in memory before freeing
> * @p: object to free memory of
> diff --git a/mm/slub.c b/mm/slub.c
> index 4cb3822dba08..d4c938dfb89e 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -4709,6 +4709,112 @@ void kfree(const void *object)
> }
> EXPORT_SYMBOL(kfree);
>
> +static __always_inline __realloc_size(2) void *
> +__do_krealloc(const void *p, size_t new_size, gfp_t flags)
> +{
> + void *ret;
> + size_t ks;
> + int orig_size = 0;
> + struct kmem_cache *s;
> +
> + /* Check for double-free before calling ksize. */
> + if (likely(!ZERO_OR_NULL_PTR(p))) {
> + if (!kasan_check_byte(p))
> + return NULL;
> +
> + s = virt_to_cache(p);
> + orig_size = get_orig_size(s, (void *)p);
> + ks = s->object_size;
> + } else
> + ks = 0;
> +
> + /* If the object doesn't fit, allocate a bigger one */
> + if (new_size > ks)
> + goto alloc_new;
> +
> + /* Zero out spare memory. */
> + if (want_init_on_alloc(flags)) {
> + kasan_disable_current();
> + if (orig_size < new_size)
> + memset((void *)p + orig_size, 0, new_size - orig_size);
> + else
> + memset((void *)p + new_size, 0, ks - new_size);
> + kasan_enable_current();
> + }
> +
> + if (slub_debug_orig_size(s) && !is_kfence_address(p)) {
> + set_orig_size(s, (void *)p, new_size);
> + if (s->flags & SLAB_RED_ZONE && new_size < ks)
> + memset_no_sanitize_memory((void *)p + new_size,
> + SLUB_RED_ACTIVE, ks - new_size);
> + }
> +
> + p = kasan_krealloc((void *)p, new_size, flags);
> + return (void *)p;
> +
> +alloc_new:
> + ret = kmalloc_node_track_caller_noprof(new_size, flags, NUMA_NO_NODE, _RET_IP_);
> + if (ret && p) {
> + /* Disable KASAN checks as the object's redzone is accessed. */
> + kasan_disable_current();
> + if (orig_size)
> + memcpy(ret, kasan_reset_tag(p), orig_size);
> + kasan_enable_current();
> + }
> +
> + return ret;
> +}
> +
> +/**
> + * krealloc - reallocate memory. The contents will remain unchanged.
> + * @p: object to reallocate memory for.
> + * @new_size: how many bytes of memory are required.
> + * @flags: the type of memory to allocate.
> + *
> + * If @p is %NULL, krealloc() behaves exactly like kmalloc(). If @new_size
> + * is 0 and @p is not a %NULL pointer, the object pointed to is freed.
> + *
> + * If __GFP_ZERO logic is requested, callers must ensure that, starting with the
> + * initial memory allocation, every subsequent call to this API for the same
> + * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that
> + * __GFP_ZERO is not fully honored by this API.
> + *
> + * When slub_debug_orig_size() is off, since krealloc() only knows about the
I think you want to remove ' since ' here.
> + * bucket size of an allocation (but not the exact size it was allocated with)
> + * and hence implements the following semantics for shrinking and growing
> + * buffers with __GFP_ZERO.
> + *
> + * new bucket
> + * 0 size size
> + * |--------|----------------|
> + * | keep | zero |
> + *
> + * Otherwize, the original allocation size 'orig_size' could be used to
Typo in 'otherwise'.
> + * precisely clear the requested size, and the new size will also be stored as
> + * the new 'orig_size'.
> + *
> + * In any case, the contents of the object pointed to are preserved up to the
> + * lesser of the new and old sizes.
> + *
> + * Return: pointer to the allocated memory or %NULL in case of error
> + */
> +void *krealloc_noprof(const void *p, size_t new_size, gfp_t flags)
> +{
> + void *ret;
> +
> + if (unlikely(!new_size)) {
> + kfree(p);
> + return ZERO_SIZE_PTR;
> + }
> +
> + ret = __do_krealloc(p, new_size, flags);
> + if (ret && kasan_reset_tag(p) != kasan_reset_tag(ret))
> + kfree(p);
> +
> + return ret;
> +}
> +EXPORT_SYMBOL(krealloc_noprof);
> +
> struct detached_freelist {
> struct slab *slab;
> void *tail;
> --
> 2.34.1
>