[PATCH v2 06/25] mm: Allow hpages to be arbitrary order

From: Matthew Wilcox
Date: Tue Feb 11 2020 - 23:20:04 EST


From: "Matthew Wilcox (Oracle)" <willy@xxxxxxxxxxxxx>

Remove the assumption in hpage_nr_pages() that compound pages are
necessarily PMD sized. The return type needs to be signed as we need
to use the negative value, eg when calling update_lru_size().

Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx>
---
include/linux/huge_mm.h | 10 +++-------
1 file changed, 3 insertions(+), 7 deletions(-)

diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 5aca3d1bdb32..16367e2f771e 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -230,12 +230,8 @@ static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
else
return NULL;
}
-static inline int hpage_nr_pages(struct page *page)
-{
- if (unlikely(PageTransHuge(page)))
- return HPAGE_PMD_NR;
- return 1;
-}
+
+#define hpage_nr_pages(page) (long)compound_nr(page)

struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmd, int flags, struct dev_pagemap **pgmap);
@@ -289,7 +285,7 @@ static inline struct list_head *page_deferred_list(struct page *page)
#define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; })
#define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; })

-#define hpage_nr_pages(x) 1
+#define hpage_nr_pages(x) 1L

static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
{
--
2.25.0