[PATCH v4] mempool: Do not use ksize() for poisoning

From: Kees Cook
Date: Fri Oct 28 2022 - 11:53:11 EST


Nothing appears to be using ksize() within the kmalloc-backed mempools
except the mempool poisoning logic. Use the actual pool size instead
of the ksize() to avoid needing any special handling of the memory as
needed by KASAN, UBSAN_BOUNDS, nor FORTIFY_SOURCE.

Suggested-by: Vlastimil Babka <vbabka@xxxxxxx>
Link: https://lore.kernel.org/lkml/f4fc52c4-7c18-1d76-0c7a-4058ea2486b9@xxxxxxx/
Cc: David Rientjes <rientjes@xxxxxxxxxx>
Cc: Marco Elver <elver@xxxxxxxxxx>
Cc: Vincenzo Frascino <vincenzo.frascino@xxxxxxx>
Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
Cc: linux-mm@xxxxxxxxx
Acked-by: Vlastimil Babka <vbabka@xxxxxxx>
Reviewed-by: Andrey Konovalov <andreyknvl@xxxxxxxxx>
Signed-off-by: Kees Cook <keescook@xxxxxxxxxxxx>
---
This replaces mempool-use-kmalloc_size_roundup-to-match-ksize-usage.patch
v4: add review/ack tags, explicitly call out subject change
v3: https://lore.kernel.org/lkml/20221025233421.you.825-kees@xxxxxxxxxx/
v2: https://lore.kernel.org/lkml/20221018090323.never.897-kees@xxxxxxxxxx/
v1: https://lore.kernel.org/lkml/20220923202822.2667581-14-keescook@xxxxxxxxxxxx/
---
mm/mempool.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/mm/mempool.c b/mm/mempool.c
index 96488b13a1ef..54204065037d 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -58,7 +58,7 @@ static void check_element(mempool_t *pool, void *element)
{
/* Mempools backed by slab allocator */
if (pool->free == mempool_free_slab || pool->free == mempool_kfree) {
- __check_element(pool, element, ksize(element));
+ __check_element(pool, element, (size_t)pool->pool_data);
} else if (pool->free == mempool_free_pages) {
/* Mempools backed by page allocator */
int order = (int)(long)pool->pool_data;
@@ -81,7 +81,7 @@ static void poison_element(mempool_t *pool, void *element)
{
/* Mempools backed by slab allocator */
if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) {
- __poison_element(element, ksize(element));
+ __poison_element(element, (size_t)pool->pool_data);
} else if (pool->alloc == mempool_alloc_pages) {
/* Mempools backed by page allocator */
int order = (int)(long)pool->pool_data;
@@ -112,7 +112,7 @@ static __always_inline void kasan_poison_element(mempool_t *pool, void *element)
static void kasan_unpoison_element(mempool_t *pool, void *element)
{
if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
- kasan_unpoison_range(element, __ksize(element));
+ kasan_unpoison_range(element, (size_t)pool->pool_data);
else if (pool->alloc == mempool_alloc_pages)
kasan_unpoison_pages(element, (unsigned long)pool->pool_data,
false);
--
2.34.1