[PATCH v2] LoongArch: Code cleanup with zero page

From: Bibo Mao
Date: Wed Sep 06 2023 - 21:22:16 EST


Like other popular architectures, zero page is defined in kernel
BSS data segment, rather than dynamically allocated. It is simpler.
Also this patch removes macro __HAVE_COLOR_ZERO_PAGE and variable
zero_page_mask since there is only one page for zeo_page usage.

Signed-off-by: Bibo Mao <maobibo@xxxxxxxxxxx>
---
Changes in v2:
1. Combine two simple patches into one
2. Refine implementation about macro ZERO_PAGE(vaddr)

---
arch/loongarch/include/asm/mmzone.h | 2 --
arch/loongarch/include/asm/pgtable.h | 9 ++-------
arch/loongarch/kernel/numa.c | 1 -
arch/loongarch/mm/init.c | 28 +---------------------------
4 files changed, 3 insertions(+), 37 deletions(-)

diff --git a/arch/loongarch/include/asm/mmzone.h b/arch/loongarch/include/asm/mmzone.h
index fe67d0b4b33d..2b9a90727e19 100644
--- a/arch/loongarch/include/asm/mmzone.h
+++ b/arch/loongarch/include/asm/mmzone.h
@@ -13,6 +13,4 @@ extern struct pglist_data *node_data[];

#define NODE_DATA(nid) (node_data[(nid)])

-extern void setup_zero_pages(void);
-
#endif /* _ASM_MMZONE_H_ */
diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h
index 06963a172319..189156d8435c 100644
--- a/arch/loongarch/include/asm/pgtable.h
+++ b/arch/loongarch/include/asm/pgtable.h
@@ -69,13 +69,8 @@ struct vm_area_struct;
* ZERO_PAGE is a global shared page that is always zero; used
* for zero-mapped memory areas etc..
*/
-
-extern unsigned long empty_zero_page;
-extern unsigned long zero_page_mask;
-
-#define ZERO_PAGE(vaddr) \
- (virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))))
-#define __HAVE_COLOR_ZERO_PAGE
+extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))

/*
* TLB refill handlers may also map the vmalloc area into xkvrange.
diff --git a/arch/loongarch/kernel/numa.c b/arch/loongarch/kernel/numa.c
index 708665895b47..6f464d49f0c2 100644
--- a/arch/loongarch/kernel/numa.c
+++ b/arch/loongarch/kernel/numa.c
@@ -470,7 +470,6 @@ void __init mem_init(void)
{
high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT);
memblock_free_all();
- setup_zero_pages(); /* This comes from node 0 */
}

int pcibus_to_node(struct pci_bus *bus)
diff --git a/arch/loongarch/mm/init.c b/arch/loongarch/mm/init.c
index 3b7d8129570b..628ebe42b519 100644
--- a/arch/loongarch/mm/init.c
+++ b/arch/loongarch/mm/init.c
@@ -35,33 +35,8 @@
#include <asm/pgalloc.h>
#include <asm/tlb.h>

-/*
- * We have up to 8 empty zeroed pages so we can map one of the right colour
- * when needed. Since page is never written to after the initialization we
- * don't have to care about aliases on other CPUs.
- */
-unsigned long empty_zero_page, zero_page_mask;
+unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
EXPORT_SYMBOL(empty_zero_page);
-EXPORT_SYMBOL(zero_page_mask);
-
-void setup_zero_pages(void)
-{
- unsigned int order, i;
- struct page *page;
-
- order = 0;
-
- empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
- if (!empty_zero_page)
- panic("Oh boy, that early out of memory?");
-
- page = virt_to_page((void *)empty_zero_page);
- split_page(page, order);
- for (i = 0; i < (1 << order); i++, page++)
- mark_page_reserved(page);
-
- zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
-}

void copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr, struct vm_area_struct *vma)
@@ -106,7 +81,6 @@ void __init mem_init(void)
high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);

memblock_free_all();
- setup_zero_pages(); /* Setup zeroed pages. */
}
#endif /* !CONFIG_NUMA */

--
2.27.0