[PATCH 1/2] LoongArch: Remove zero_page_mask symbol

From: Bibo Mao
Date: Tue Sep 05 2023 - 12:19:12 EST


On LoongArch system, there is only one page for zero page, and
there is no COLOR_ZERO_PAGE, so zero_page_mask is useless and
macro __HAVE_COLOR_ZERO_PAGE is not necessary. This patch removes
zero_page_mask and macro __HAVE_COLOR_ZERO_PAGE.

Signed-off-by: Bibo Mao <maobibo@xxxxxxxxxxx>
---
arch/loongarch/include/asm/pgtable.h | 4 +---
arch/loongarch/mm/init.c | 9 +--------
2 files changed, 2 insertions(+), 11 deletions(-)

diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h
index 06963a172319..342c5f9c25d2 100644
--- a/arch/loongarch/include/asm/pgtable.h
+++ b/arch/loongarch/include/asm/pgtable.h
@@ -71,11 +71,9 @@ struct vm_area_struct;
*/

extern unsigned long empty_zero_page;
-extern unsigned long zero_page_mask;

#define ZERO_PAGE(vaddr) \
- (virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))))
-#define __HAVE_COLOR_ZERO_PAGE
+ (virt_to_page((void *)(empty_zero_page)))

/*
* TLB refill handlers may also map the vmalloc area into xkvrange.
diff --git a/arch/loongarch/mm/init.c b/arch/loongarch/mm/init.c
index 3b7d8129570b..8ec668f97b00 100644
--- a/arch/loongarch/mm/init.c
+++ b/arch/loongarch/mm/init.c
@@ -35,14 +35,8 @@
#include <asm/pgalloc.h>
#include <asm/tlb.h>

-/*
- * We have up to 8 empty zeroed pages so we can map one of the right colour
- * when needed. Since page is never written to after the initialization we
- * don't have to care about aliases on other CPUs.
- */
-unsigned long empty_zero_page, zero_page_mask;
+unsigned long empty_zero_page;
EXPORT_SYMBOL(empty_zero_page);
-EXPORT_SYMBOL(zero_page_mask);

void setup_zero_pages(void)
{
@@ -60,7 +54,6 @@ void setup_zero_pages(void)
for (i = 0; i < (1 << order); i++, page++)
mark_page_reserved(page);

- zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
}

void copy_user_highpage(struct page *to, struct page *from,
--
2.27.0