[PATCH 2/2] arm64/mm: drop HAVE_ARCH_PFN_VALID

From: Mike Rapoport
Date: Wed Sep 29 2021 - 21:30:57 EST


From: Anshuman Khandual <anshuman.khandual@xxxxxxx>

CONFIG_SPARSEMEM_VMEMMAP is now the only available memory model on arm64
platforms and free_unused_memmap() would just return without creating any
holes in the memmap mapping. There is no need for any special handling in
pfn_valid() and HAVE_ARCH_PFN_VALID can just be dropped. This also moves
the pfn upper bits sanity check into generic pfn_valid().

[rppt: rebased on v5.15-rc3]

Link: https://lkml.kernel.org/r/1621947349-25421-1-git-send-email-anshuman.khandual@xxxxxxx
Signed-off-by: Anshuman Khandual <anshuman.khandual@xxxxxxx>
Acked-by: David Hildenbrand <david@xxxxxxxxxx>
Acked-by: Mike Rapoport <rppt@xxxxxxxxxxxxx>
Cc: Catalin Marinas <catalin.marinas@xxxxxxx>
Cc: Will Deacon <will@xxxxxxxxxx>
Cc: David Hildenbrand <david@xxxxxxxxxx>
Cc: Mike Rapoport <rppt@xxxxxxxxxx>
Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
Signed-off-by: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx>
Signed-off-by: Mike Rapoport <rppt@xxxxxxxxxxxxx>
---
arch/arm64/Kconfig | 1 -
arch/arm64/include/asm/page.h | 1 -
arch/arm64/mm/init.c | 37 -----------------------------------
3 files changed, 39 deletions(-)

diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 5c7ae4c3954b..53dd23f305be 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -154,7 +154,6 @@ config ARM64
select HAVE_ARCH_KGDB
select HAVE_ARCH_MMAP_RND_BITS
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
- select HAVE_ARCH_PFN_VALID
select HAVE_ARCH_PREL32_RELOCATIONS
select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
select HAVE_ARCH_SECCOMP_FILTER
diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h
index f98c91bbd7c1..993a27ea6f54 100644
--- a/arch/arm64/include/asm/page.h
+++ b/arch/arm64/include/asm/page.h
@@ -41,7 +41,6 @@ void tag_clear_highpage(struct page *to);

typedef struct page *pgtable_t;

-int pfn_valid(unsigned long pfn);
int pfn_is_map_memory(unsigned long pfn);

#include <asm/memory.h>
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 37a81754d9b6..e60c6eb813b7 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -160,43 +160,6 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
free_area_init(max_zone_pfns);
}

-int pfn_valid(unsigned long pfn)
-{
- phys_addr_t addr = PFN_PHYS(pfn);
- struct mem_section *ms;
-
- /*
- * Ensure the upper PAGE_SHIFT bits are clear in the
- * pfn. Else it might lead to false positives when
- * some of the upper bits are set, but the lower bits
- * match a valid pfn.
- */
- if (PHYS_PFN(addr) != pfn)
- return 0;
-
- if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
- return 0;
-
- ms = __pfn_to_section(pfn);
- if (!valid_section(ms))
- return 0;
-
- /*
- * ZONE_DEVICE memory does not have the memblock entries.
- * memblock_is_map_memory() check for ZONE_DEVICE based
- * addresses will always fail. Even the normal hotplugged
- * memory will never have MEMBLOCK_NOMAP flag set in their
- * memblock entries. Skip memblock search for all non early
- * memory sections covering all of hotplug memory including
- * both normal and ZONE_DEVICE based.
- */
- if (!early_section(ms))
- return pfn_section_valid(ms, pfn);
-
- return memblock_is_memory(addr);
-}
-EXPORT_SYMBOL(pfn_valid);
-
int pfn_is_map_memory(unsigned long pfn)
{
phys_addr_t addr = PFN_PHYS(pfn);
--
2.28.0