Re: [PATCH RFC v3 22/35] arm64: mte: Enable tag storage if CMA areas have been activated

From: Evgenii Stepanov
Date: Fri Feb 02 2024 - 17:30:22 EST


On Thu, Jan 25, 2024 at 8:44 AM Alexandru Elisei
<alexandru.elisei@xxxxxxx> wrote:
>
> Before enabling MTE tag storage management, make sure that the CMA areas
> have been successfully activated. If a CMA area fails activation, the pages
> are kept as reserved. Reserved pages are never used by the page allocator.
>
> If this happens, the kernel would have to manage tag storage only for some
> of the memory, but not for all memory, and that would make the code
> unreasonably complicated.
>
> Choose to disable tag storage management altogether if a CMA area fails to
> be activated.
>
> Signed-off-by: Alexandru Elisei <alexandru.elisei@xxxxxxx>
> ---
>
> Changes since v2:
>
> * New patch.
>
> arch/arm64/include/asm/mte_tag_storage.h | 12 ++++++
> arch/arm64/kernel/mte_tag_storage.c | 50 ++++++++++++++++++++++++
> 2 files changed, 62 insertions(+)
>
> diff --git a/arch/arm64/include/asm/mte_tag_storage.h b/arch/arm64/include/asm/mte_tag_storage.h
> index 3c2cd29e053e..7b3f6bff8e6f 100644
> --- a/arch/arm64/include/asm/mte_tag_storage.h
> +++ b/arch/arm64/include/asm/mte_tag_storage.h
> @@ -6,8 +6,20 @@
> #define __ASM_MTE_TAG_STORAGE_H
>
> #ifdef CONFIG_ARM64_MTE_TAG_STORAGE
> +
> +DECLARE_STATIC_KEY_FALSE(tag_storage_enabled_key);
> +
> +static inline bool tag_storage_enabled(void)
> +{
> + return static_branch_likely(&tag_storage_enabled_key);
> +}
> +
> void mte_init_tag_storage(void);
> #else
> +static inline bool tag_storage_enabled(void)
> +{
> + return false;
> +}
> static inline void mte_init_tag_storage(void)
> {
> }
> diff --git a/arch/arm64/kernel/mte_tag_storage.c b/arch/arm64/kernel/mte_tag_storage.c
> index 9a1a8a45171e..d58c68b4a849 100644
> --- a/arch/arm64/kernel/mte_tag_storage.c
> +++ b/arch/arm64/kernel/mte_tag_storage.c
> @@ -19,6 +19,8 @@
>
> #include <asm/mte_tag_storage.h>
>
> +__ro_after_init DEFINE_STATIC_KEY_FALSE(tag_storage_enabled_key);
> +
> struct tag_region {
> struct range mem_range; /* Memory associated with the tag storage, in PFNs. */
> struct range tag_range; /* Tag storage memory, in PFNs. */
> @@ -314,3 +316,51 @@ void __init mte_init_tag_storage(void)
> num_tag_regions = 0;
> pr_info("MTE tag storage region management disabled");
> }
> +
> +static int __init mte_enable_tag_storage(void)
> +{
> + struct range *tag_range;
> + struct cma *cma;
> + int i, ret;
> +
> + if (num_tag_regions == 0)
> + return 0;
> +
> + for (i = 0; i < num_tag_regions; i++) {
> + tag_range = &tag_regions[i].tag_range;
> + cma = tag_regions[i].cma;
> + /*
> + * CMA will keep the pages as reserved when the region fails
> + * activation.
> + */
> + if (PageReserved(pfn_to_page(tag_range->start)))
> + goto out_disabled;
> + }
> +
> + static_branch_enable(&tag_storage_enabled_key);
> + pr_info("MTE tag storage region management enabled");
> +
> + return 0;
> +
> +out_disabled:
> + for (i = 0; i < num_tag_regions; i++) {
> + tag_range = &tag_regions[i].tag_range;
> + cma = tag_regions[i].cma;
> +
> + if (PageReserved(pfn_to_page(tag_range->start)))
> + continue;
> +
> + /* Try really hard to reserve the tag storage. */
> + ret = cma_alloc(cma, range_len(tag_range), 8, true);
> + /*
> + * Tag storage is still in use for data, memory and/or tag
> + * corruption will ensue.
> + */
> + WARN_ON_ONCE(ret);

cma_alloc returns (page *), so this condition needs to be inverted,
and the type of `ret` changed.
Not sure how it slipped through, this is a compile error with clang.

> + }
> + num_tag_regions = 0;
> + pr_info("MTE tag storage region management disabled");
> +
> + return -EINVAL;
> +}
> +arch_initcall(mte_enable_tag_storage);
> --
> 2.43.0
>