Re: [PATCH v3 1/2] kasan: catch invalid free before SLUB reinitializes the object
From: Andrey Konovalov
Date: Thu Jul 25 2024 - 20:44:06 EST
On Thu, Jul 25, 2024 at 5:32 PM Jann Horn <jannh@xxxxxxxxxx> wrote:
>
> Currently, when KASAN is combined with init-on-free behavior, the
> initialization happens before KASAN's "invalid free" checks.
>
> More importantly, a subsequent commit will want to use the object metadata
> region to store an rcu_head, and we should let KASAN check that the object
> pointer is valid before that. (Otherwise that change will make the existing
> testcase kmem_cache_invalid_free fail.)
This is not the case since v3, right? Do we still need this patch?
If it's still needed, see the comment below.
Thank you!
> So add a new KASAN hook that allows KASAN to pre-validate a
> kmem_cache_free() operation before SLUB actually starts modifying the
> object or its metadata.
>
> Acked-by: Vlastimil Babka <vbabka@xxxxxxx> #slub
> Signed-off-by: Jann Horn <jannh@xxxxxxxxxx>
> ---
> include/linux/kasan.h | 16 ++++++++++++++++
> mm/kasan/common.c | 51 +++++++++++++++++++++++++++++++++++++++------------
> mm/slub.c | 7 +++++++
> 3 files changed, 62 insertions(+), 12 deletions(-)
>
> diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> index 70d6a8f6e25d..ebd93c843e78 100644
> --- a/include/linux/kasan.h
> +++ b/include/linux/kasan.h
> @@ -175,6 +175,16 @@ static __always_inline void * __must_check kasan_init_slab_obj(
> return (void *)object;
> }
>
> +bool __kasan_slab_pre_free(struct kmem_cache *s, void *object,
> + unsigned long ip);
> +static __always_inline bool kasan_slab_pre_free(struct kmem_cache *s,
> + void *object)
> +{
> + if (kasan_enabled())
> + return __kasan_slab_pre_free(s, object, _RET_IP_);
> + return false;
> +}
Please add a documentation comment for this new hook; something like
what we have for kasan_mempool_poison_pages() and some of the others.
(I've been meaning to add them for all of them, but still didn't get
around to that.)
> +
> bool __kasan_slab_free(struct kmem_cache *s, void *object,
> unsigned long ip, bool init);
> static __always_inline bool kasan_slab_free(struct kmem_cache *s,
> @@ -371,6 +381,12 @@ static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
> {
> return (void *)object;
> }
> +
> +static inline bool kasan_slab_pre_free(struct kmem_cache *s, void *object)
> +{
> + return false;
> +}
> +
> static inline bool kasan_slab_free(struct kmem_cache *s, void *object, bool init)
> {
> return false;
> diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> index 85e7c6b4575c..7c7fc6ce7eb7 100644
> --- a/mm/kasan/common.c
> +++ b/mm/kasan/common.c
> @@ -208,31 +208,52 @@ void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
> return (void *)object;
> }
>
> -static inline bool poison_slab_object(struct kmem_cache *cache, void *object,
> - unsigned long ip, bool init)
> +enum free_validation_result {
> + KASAN_FREE_IS_IGNORED,
> + KASAN_FREE_IS_VALID,
> + KASAN_FREE_IS_INVALID
> +};
> +
> +static enum free_validation_result check_slab_free(struct kmem_cache *cache,
> + void *object, unsigned long ip)
> {
> - void *tagged_object;
> + void *tagged_object = object;
>
> - if (!kasan_arch_is_ready())
> - return false;
> + if (is_kfence_address(object) || !kasan_arch_is_ready())
> + return KASAN_FREE_IS_IGNORED;
>
> - tagged_object = object;
> object = kasan_reset_tag(object);
>
> if (unlikely(nearest_obj(cache, virt_to_slab(object), object) != object)) {
> kasan_report_invalid_free(tagged_object, ip, KASAN_REPORT_INVALID_FREE);
> - return true;
> + return KASAN_FREE_IS_INVALID;
> }
>
> - /* RCU slabs could be legally used after free within the RCU period. */
> - if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
> - return false;
> -
> if (!kasan_byte_accessible(tagged_object)) {
> kasan_report_invalid_free(tagged_object, ip, KASAN_REPORT_DOUBLE_FREE);
> - return true;
> + return KASAN_FREE_IS_INVALID;
> }
>
> + return KASAN_FREE_IS_VALID;
> +}
> +
> +static inline bool poison_slab_object(struct kmem_cache *cache, void *object,
> + unsigned long ip, bool init)
> +{
> + void *tagged_object = object;
> + enum free_validation_result valid = check_slab_free(cache, object, ip);
I believe we don't need check_slab_free() here, as it was already done
in kasan_slab_pre_free()? Checking just kasan_arch_is_ready() and
is_kfence_address() should save a bit on performance impact.
Though if we remove check_slab_free() from here, we do need to add it
to __kasan_mempool_poison_object().
> +
> + if (valid == KASAN_FREE_IS_IGNORED)
> + return false;
> + if (valid == KASAN_FREE_IS_INVALID)
> + return true;
> +
> + object = kasan_reset_tag(object);
> +
> + /* RCU slabs could be legally used after free within the RCU period. */
> + if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
> + return false;
I vaguely recall there was some reason why this check was done before
the kasan_byte_accessible() check, but I might be wrong. Could you try
booting the kernel with only this patch applied to see if anything
breaks?
> +
> kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
> KASAN_SLAB_FREE, init);
>
> @@ -242,6 +263,12 @@ static inline bool poison_slab_object(struct kmem_cache *cache, void *object,
> return false;
> }
>
> +bool __kasan_slab_pre_free(struct kmem_cache *cache, void *object,
> + unsigned long ip)
> +{
> + return check_slab_free(cache, object, ip) == KASAN_FREE_IS_INVALID;
> +}
> +
> bool __kasan_slab_free(struct kmem_cache *cache, void *object,
> unsigned long ip, bool init)
> {
> diff --git a/mm/slub.c b/mm/slub.c
> index 4927edec6a8c..34724704c52d 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -2170,6 +2170,13 @@ bool slab_free_hook(struct kmem_cache *s, void *x, bool init)
> if (kfence_free(x))
> return false;
>
> + /*
> + * Give KASAN a chance to notice an invalid free operation before we
> + * modify the object.
> + */
> + if (kasan_slab_pre_free(s, x))
> + return false;
> +
> /*
> * As memory initialization might be integrated into KASAN,
> * kasan_slab_free and initialization memset's must be
>
> --
> 2.45.2.1089.g2a221341d9-goog
>