[PATCH mm 10/21] kasan: clean up and rename ____kasan_kmalloc

From: andrey . konovalov
Date: Tue Dec 19 2023 - 17:31:43 EST


From: Andrey Konovalov <andreyknvl@xxxxxxxxxx>

Introduce a new poison_kmalloc_redzone helper function that poisons
the redzone for kmalloc object.

Drop the confusingly named ____kasan_kmalloc function and instead use
poison_kmalloc_redzone along with the other required parts of
____kasan_kmalloc in the callers' code.

This is a preparatory change for the following patches in this series.

Signed-off-by: Andrey Konovalov <andreyknvl@xxxxxxxxxx>
---
mm/kasan/common.c | 42 ++++++++++++++++++++++--------------------
1 file changed, 22 insertions(+), 20 deletions(-)

diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 59146886e57d..1217b260abc3 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -312,26 +312,12 @@ void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
return tagged_object;
}

-static inline void *____kasan_kmalloc(struct kmem_cache *cache,
+static inline void poison_kmalloc_redzone(struct kmem_cache *cache,
const void *object, size_t size, gfp_t flags)
{
unsigned long redzone_start;
unsigned long redzone_end;

- if (gfpflags_allow_blocking(flags))
- kasan_quarantine_reduce();
-
- if (unlikely(object == NULL))
- return NULL;
-
- if (is_kfence_address(kasan_reset_tag(object)))
- return (void *)object;
-
- /*
- * The object has already been unpoisoned by kasan_slab_alloc() for
- * kmalloc() or by kasan_krealloc() for krealloc().
- */
-
/*
* The redzone has byte-level precision for the generic mode.
* Partially poison the last object granule to cover the unaligned
@@ -355,14 +341,25 @@ static inline void *____kasan_kmalloc(struct kmem_cache *cache,
if (kasan_stack_collection_enabled() && is_kmalloc_cache(cache))
kasan_save_alloc_info(cache, (void *)object, flags);

- /* Keep the tag that was set by kasan_slab_alloc(). */
- return (void *)object;
}

void * __must_check __kasan_kmalloc(struct kmem_cache *cache, const void *object,
size_t size, gfp_t flags)
{
- return ____kasan_kmalloc(cache, object, size, flags);
+ if (gfpflags_allow_blocking(flags))
+ kasan_quarantine_reduce();
+
+ if (unlikely(object == NULL))
+ return NULL;
+
+ if (is_kfence_address(kasan_reset_tag(object)))
+ return (void *)object;
+
+ /* The object has already been unpoisoned by kasan_slab_alloc(). */
+ poison_kmalloc_redzone(cache, object, size, flags);
+
+ /* Keep the tag that was set by kasan_slab_alloc(). */
+ return (void *)object;
}
EXPORT_SYMBOL(__kasan_kmalloc);

@@ -408,6 +405,9 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag
if (unlikely(object == ZERO_SIZE_PTR))
return (void *)object;

+ if (is_kfence_address(kasan_reset_tag(object)))
+ return (void *)object;
+
/*
* Unpoison the object's data.
* Part of it might already have been unpoisoned, but it's unknown
@@ -420,8 +420,10 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag
/* Piggy-back on kmalloc() instrumentation to poison the redzone. */
if (unlikely(!slab))
return __kasan_kmalloc_large(object, size, flags);
- else
- return ____kasan_kmalloc(slab->slab_cache, object, size, flags);
+ else {
+ poison_kmalloc_redzone(slab->slab_cache, object, size, flags);
+ return (void *)object;
+ }
}

bool __kasan_mempool_poison_pages(struct page *page, unsigned int order,
--
2.25.1