[PATCH] mm/vmalloc:arm64 support cont-pte huge vmalloc mapping

From: Haiwang Fu
Date: Fri Sep 06 2024 - 09:09:42 EST


From: fuhaiwang <fuhaiwang@xxxxxxxxxxxxx>

Arm64 support contiguous bit which is used to increase the mapping size
at the pmd and pte level.

Now huge vmalloc support PMD and PTE level mapping, and support
multi size at pte level.

arm64: implement the fllowing interface on arm64 to support
cont-pte huge vmalloc mapping.
arch_vmap_pte_supported_shift(*)
arch_vmap_pte_range_map_size(*)

Signed-off-by: fuhaiwang <fuhaiwang@xxxxxxxxxxxxx>
---
arch/arm64/include/asm/pgtable.h | 28 ++++++++++++++++++++++++++++
1 file changed, 28 insertions(+)

diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index c329ea061dc9..3f32e3150680 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -1814,6 +1814,34 @@ static inline void clear_young_dirty_ptes(struct vm_area_struct *vma,

#endif /* CONFIG_ARM64_CONTPTE */

+static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, unsigned long end,
+ u64 pfn, unsigned int max_page_shift)
+{
+ if (end - addr < CONT_PTE_SIZE)
+ return PAGE_SIZE;
+
+ if ((1UL << max_page_shift) < CONT_PTE_SIZE)
+ return PAGE_SIZE;
+
+ if (!IS_ALIGNED(addr, CONT_PTE_SIZE))
+ return PAGE_SIZE;
+
+ if (!IS_ALIGNED(PFN_PHYS(pfn), CONT_PTE_SIZE))
+ return PAGE_SIZE;
+
+ return CONT_PTE_SIZE;
+}
+#define arch_vmap_pte_range_map_size arch_vmap_pte_range_map_size
+
+static inline int arch_vmap_pte_supported_shift(unsigned long size)
+{
+ if (size >= CONT_PTE_SIZE)
+ return CONT_PTE_SHIFT;
+ else
+ return PAGE_SHIFT;
+}
+#define arch_vmap_pte_supported_shift arch_vmap_pte_supported_shift
+
#endif /* !__ASSEMBLY__ */

#endif /* __ASM_PGTABLE_H */
--
2.25.1