[RFC PATCH] mm/cma: don't use one more bit on page flags for ZONE_CMA
From: Joonsoo Kim
Date: Wed May 10 2017 - 02:12:26 EST
This is just for showing my idea to solve the page flags problem
due to ZONE_CMA. There is some consensus that ZONE_CMA is a nicer
solution than MIGRATETYPE approach but there is also a worry about
using one more bit on page flags. This patch tries to solve this
worry. This patch is a temporary implementation and I will
optimize it more if everyone agree with this approach.
Adding a new zone (ZONE_CMA) needs one more bit on page flags
in some configuration. This resource is very precious so that
it's better not to consume it as much as possible. Therefore,
this patch implements following tricky magic not to use one more bit.
1. If the number of the zone are five due to the ZONE_CMA, start
encoding magic.
2. ZONE_MOVABLE is written on the pages for ZONE_CMA. Since we don't
use ZONE_CMA (enum value 4) that needs one more bit for encoding,
we can save one bit on page flags.
3. Check ZONE_MOVABLE when retrieving page's zone index.
If the zone index is ZONE_MOVABLE, we use a special handler function
to get the actual zone index.
I found no regression on kernel build test with applying this magic.
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@xxxxxxx>
---
include/linux/cma.h | 24 ++++++++++++++++++++++++
include/linux/gfp.h | 24 ++++++++++++++++++++++--
include/linux/mm.h | 8 +++++++-
include/linux/page-flags-layout.h | 15 ++++++++++++++-
mm/cma.c | 19 +++++++++++++++++++
mm/page_alloc.c | 10 +++++++++-
6 files changed, 95 insertions(+), 5 deletions(-)
diff --git a/include/linux/cma.h b/include/linux/cma.h
index 2433d5e..d36695c 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -3,6 +3,7 @@
#include <linux/init.h>
#include <linux/types.h>
+#include <linux/mmzone.h>
/*
* There is always at least global CMA area and a few optional
@@ -34,9 +35,32 @@ extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count);
#ifdef CONFIG_CMA
+extern unsigned cma_area_count;
extern unsigned long cma_get_free(void);
+extern bool is_zone_cma_page(const struct page *page);
+
+static inline enum zone_type page_zonenum_special(const struct page *page,
+ enum zone_type zone_type)
+{
+ if (!cma_area_count)
+ return zone_type;
+
+ if (zone_type != ZONE_MOVABLE)
+ return zone_type;
+
+ if (is_zone_cma_page(page))
+ return ZONE_CMA;
+
+ return ZONE_MOVABLE;
+}
+
#else
static inline unsigned long cma_get_free(void) { return 0; }
+static inline enum zone_type page_zonenum_special(const struct page *page,
+ enum zone_type zone_type)
+{
+ return zone_type;
+}
#endif
#endif
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 15987cc..bc5e443 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -348,13 +348,33 @@ static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
* 0xf => BAD (MOVABLE+DMA32+HIGHMEM+DMA)
*/
-#if defined(CONFIG_ZONE_DEVICE) && (MAX_NR_ZONES-1) <= 4
+#if MAX_NR_ZONES <= 4
+#define GFP_ZONES_SHIFT ZONES_SHIFT
+#elif (MAX_NR_ZONES-1) == 4
+
+/*
+ * ZONE_CMA is encoded as ZONE_MOVABLE and ZONES_SHIFT would be one less
+ * than what GFP_ZONES_SHIFT usually needs.
+ */
+#if defined(CONFIG_ZONE_DEVICE) && defined(CONFIG_CMA)
+#define GFP_ZONES_SHIFT ZONES_SHIFT
+
/* ZONE_DEVICE is not a valid GFP zone specifier */
-#define GFP_ZONES_SHIFT 2
+#elif defined(CONFIG_ZONE_DEVICE)
+#define GFP_ZONES_SHIFT Z
+
+#elif defined(CONFIG_CMA)
+#define GFP_ZONES_SHIFT (ZONES_SHIFT + 1)
+
+#else
+#define GFP_ZONES_SHIFT ZONES_SHIFT
+#endif
+
#else
#define GFP_ZONES_SHIFT ZONES_SHIFT
#endif
+
#if !defined(CONFIG_64BITS) && GFP_ZONES_SHIFT > 2
typedef unsigned long long GFP_ZONE_TABLE_TYPE;
#else
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 693dc6e..50228ef 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -23,6 +23,7 @@
#include <linux/page_ext.h>
#include <linux/err.h>
#include <linux/page_ref.h>
+#include <linux/cma.h>
struct mempolicy;
struct anon_vma;
@@ -780,7 +781,12 @@ int finish_mkwrite_fault(struct vm_fault *vmf);
static inline enum zone_type page_zonenum(const struct page *page)
{
- return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
+ enum zone_type zone_type = (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
+
+ if (ZONE_CMA_IN_PAGE_FLAGS)
+ return zone_type;
+
+ return page_zonenum_special(page, zone_type);
}
#ifdef CONFIG_ZONE_DEVICE
diff --git a/include/linux/page-flags-layout.h b/include/linux/page-flags-layout.h
index 77b078c..8a4ae38 100644
--- a/include/linux/page-flags-layout.h
+++ b/include/linux/page-flags-layout.h
@@ -17,12 +17,25 @@
#define ZONES_SHIFT 1
#elif MAX_NR_ZONES <= 4
#define ZONES_SHIFT 2
+
#elif MAX_NR_ZONES <= 8
-#define ZONES_SHIFT 3
+
+#if defined(CONFIG_CMA) && (MAX_NR_ZONES-1) <= 4
+#define ZONES_SHIFT 2
+#define ZONE_CMA_IN_PAGE_FLAGS 0
#else
+#define ZONES_SHIFT 3
+#endif
+
+#else /* MAX_NR_ZONES <= 8 */
+
#error ZONES_SHIFT -- too many zones configured adjust calculation
#endif
+#ifndef ZONE_CMA_IN_PAGE_FLAGS
+#define ZONE_CMA_IN_PAGE_FLAGS 1
+#endif
+
#ifdef CONFIG_SPARSEMEM
#include <asm/sparsemem.h>
diff --git a/mm/cma.c b/mm/cma.c
index adfda1c..1833973 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -110,6 +110,25 @@ static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
mutex_unlock(&cma->lock);
}
+bool is_zone_cma_page(const struct page *page)
+{
+ struct cma *cma;
+ int i;
+ unsigned long pfn = page_to_pfn(page);
+
+ for (i = 0; i < cma_area_count; i++) {
+ cma = &cma_areas[i];
+
+ if (pfn < cma->base_pfn)
+ continue;
+
+ if (pfn < cma->base_pfn + cma->count)
+ return true;
+ }
+
+ return false;
+}
+
static int __init cma_activate_area(struct cma *cma)
{
int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index cf29227..9399cc4 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1596,6 +1596,14 @@ void __init init_cma_reserved_pageblock(struct page *page)
unsigned long pfn = page_to_pfn(page);
struct page *p = page;
int nid = page_to_nid(page);
+ enum zone_type zone_type;
+
+ /*
+ * In some cases, we don't have enough place to encode ZONE_CMA
+ * in page flags. Use ZONE_MOVABLE in this case and do
+ * post-processing when getting the page's zone.
+ */
+ zone_type = ZONE_CMA_IN_PAGE_FLAGS ? ZONE_CMA : ZONE_MOVABLE;
/*
* ZONE_CMA will steal present pages from other zones by changing
@@ -1609,7 +1617,7 @@ void __init init_cma_reserved_pageblock(struct page *page)
set_page_count(p, 0);
/* Steal pages from other zones */
- set_page_links(p, ZONE_CMA, nid, pfn);
+ set_page_links(p, zone_type, nid, pfn);
} while (++p, ++pfn, --i);
adjust_present_page_count(page, pageblock_nr_pages);
--
2.7.4