[RFC v2 PATCH 3/4] mm: let user decide page reporting option
From: Liang Li
Date: Mon Dec 21 2020 - 13:21:54 EST
Some key parameters for page reporting are now hard coded, different
users of the framework may have their special requirements, make
these parameter configrable and let the user decide them.
Cc: Alexander Duyck <alexander.h.duyck@xxxxxxxxxxxxxxx>
Cc: Mel Gorman <mgorman@xxxxxxxxxxxxxxxxxxx>
Cc: Andrea Arcangeli <aarcange@xxxxxxxxxx>
Cc: Dan Williams <dan.j.williams@xxxxxxxxx>
Cc: Dave Hansen <dave.hansen@xxxxxxxxx>
Cc: David Hildenbrand <david@xxxxxxxxxx>
Cc: Michal Hocko <mhocko@xxxxxxxx>
Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
Cc: Alex Williamson <alex.williamson@xxxxxxxxxx>
Cc: Michael S. Tsirkin <mst@xxxxxxxxxx>
Signed-off-by: Liang Li <liliang324@xxxxxxxxx>
---
drivers/virtio/virtio_balloon.c | 3 +++
include/linux/page_reporting.h | 3 +++
mm/page_reporting.c | 18 ++++++++++--------
mm/page_reporting.h | 6 +++---
4 files changed, 19 insertions(+), 11 deletions(-)
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 8985fc2cea86..a298517079bb 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -993,6 +993,9 @@ static int virtballoon_probe(struct virtio_device *vdev)
goto out_unregister_oom;
}
+ vb->pr_dev_info.mini_order = 6;
+ vb->pr_dev_info.batch_size = 32 * 1024 * 1024; /* 32M */
+ vb->pr_dev_info.delay_jiffies = 2 * HZ; /* 2 seconds */
err = page_reporting_register(&vb->pr_dev_info);
if (err)
goto out_unregister_oom;
diff --git a/include/linux/page_reporting.h b/include/linux/page_reporting.h
index 3b99e0ec24f2..63e1e9fbcaa2 100644
--- a/include/linux/page_reporting.h
+++ b/include/linux/page_reporting.h
@@ -13,6 +13,9 @@ struct page_reporting_dev_info {
int (*report)(struct page_reporting_dev_info *prdev,
struct scatterlist *sg, unsigned int nents);
+ unsigned long batch_size;
+ unsigned long delay_jiffies;
+ int mini_order;
/* work struct for processing reports */
struct delayed_work work;
diff --git a/mm/page_reporting.c b/mm/page_reporting.c
index 2f8e3d032fab..20ec3fb1afc4 100644
--- a/mm/page_reporting.c
+++ b/mm/page_reporting.c
@@ -11,12 +11,10 @@
#include "page_reporting.h"
#include "internal.h"
-#define PAGE_REPORTING_DELAY (2 * HZ)
#define MAX_SCAN_NUM 1024
-
-unsigned long page_report_batch_size __read_mostly = 4 * 1024 * 1024UL;
-
static struct page_reporting_dev_info __rcu *pr_dev_info __read_mostly;
+int page_report_mini_order = pageblock_order;
+unsigned long page_report_batch_size = 32 * 1024 * 1024;
enum {
PAGE_REPORTING_IDLE = 0,
@@ -48,7 +46,7 @@ __page_reporting_request(struct page_reporting_dev_info *prdev)
* now we are limiting this to running no more than once every
* couple of seconds.
*/
- schedule_delayed_work(&prdev->work, PAGE_REPORTING_DELAY);
+ schedule_delayed_work(&prdev->work, prdev->delay_jiffies);
}
/* notify prdev of free page reporting request */
@@ -260,7 +258,7 @@ page_reporting_process_zone(struct page_reporting_dev_info *prdev,
/* Generate minimum watermark to be able to guarantee progress */
watermark = low_wmark_pages(zone) +
- (PAGE_REPORTING_CAPACITY << PAGE_REPORTING_MIN_ORDER);
+ (PAGE_REPORTING_CAPACITY << prdev->mini_order);
/*
* Cancel request if insufficient free memory or if we failed
@@ -270,7 +268,7 @@ page_reporting_process_zone(struct page_reporting_dev_info *prdev,
return err;
/* Process each free list starting from lowest order/mt */
- for (order = PAGE_REPORTING_MIN_ORDER; order < MAX_ORDER; order++) {
+ for (order = prdev->mini_order; order < MAX_ORDER; order++) {
for (mt = 0; mt < MIGRATE_TYPES; mt++) {
/* We do not pull pages from the isolate free list */
if (is_migrate_isolate(mt))
@@ -337,7 +335,7 @@ static void page_reporting_process(struct work_struct *work)
*/
state = atomic_cmpxchg(&prdev->state, state, PAGE_REPORTING_IDLE);
if (state == PAGE_REPORTING_REQUESTED)
- schedule_delayed_work(&prdev->work, PAGE_REPORTING_DELAY);
+ schedule_delayed_work(&prdev->work, prdev->delay_jiffies);
}
static DEFINE_MUTEX(page_reporting_mutex);
@@ -365,6 +363,8 @@ int page_reporting_register(struct page_reporting_dev_info *prdev)
/* Assign device to allow notifications */
rcu_assign_pointer(pr_dev_info, prdev);
+ page_report_mini_order = prdev->mini_order;
+ page_report_batch_size = prdev->batch_size;
/* enable page reporting notification */
if (!static_key_enabled(&page_reporting_enabled)) {
static_branch_enable(&page_reporting_enabled);
@@ -382,6 +382,8 @@ void page_reporting_unregister(struct page_reporting_dev_info *prdev)
mutex_lock(&page_reporting_mutex);
if (rcu_access_pointer(pr_dev_info) == prdev) {
+ if (static_key_enabled(&page_reporting_enabled))
+ static_branch_disable(&page_reporting_enabled);
/* Disable page reporting notification */
RCU_INIT_POINTER(pr_dev_info, NULL);
synchronize_rcu();
diff --git a/mm/page_reporting.h b/mm/page_reporting.h
index b8fb3bbb345f..86ac6ffad970 100644
--- a/mm/page_reporting.h
+++ b/mm/page_reporting.h
@@ -9,9 +9,9 @@
#include <linux/slab.h>
#include <linux/pgtable.h>
#include <linux/scatterlist.h>
+#include <linux/page_reporting.h>
-#define PAGE_REPORTING_MIN_ORDER pageblock_order
-
+extern int page_report_mini_order;
extern unsigned long page_report_batch_size;
#ifdef CONFIG_PAGE_REPORTING
@@ -42,7 +42,7 @@ static inline void page_reporting_notify_free(unsigned int order)
return;
/* Determine if we have crossed reporting threshold */
- if (order < PAGE_REPORTING_MIN_ORDER)
+ if (order < page_report_mini_order)
return;
batch_size += (1 << order) << PAGE_SHIFT;
--
2.18.2