[RFC PATCH 1/4] mm: reduce the impaction of page reporing worker

From: liliangleo
Date: Sun Apr 12 2020 - 05:13:33 EST


When scaning the free list, 'page_reporting_cycle' may hold the
zone->lock for a long time when there are no reported page in the
free list. Setting PAGE_REPORTING_MIN_ORDER to a lower oder will
make this issue worse.

Two ways were used to reduce the impact:
1. Release zone lock periodicly
2. Yield cpu voluntarily if needed.

Signed-off-by: liliangleo <liliangleo@xxxxxxxxxxxxxx>
---
mm/page_reporting.c | 35 ++++++++++++++++++++++++++++++++---
1 file changed, 32 insertions(+), 3 deletions(-)

diff --git a/mm/page_reporting.c b/mm/page_reporting.c
index 3bbd471cfc81..3a7084e508e1 100644
--- a/mm/page_reporting.c
+++ b/mm/page_reporting.c
@@ -6,11 +6,14 @@
#include <linux/export.h>
#include <linux/delay.h>
#include <linux/scatterlist.h>
+#include <linux/sched.h>

#include "page_reporting.h"
#include "internal.h"

#define PAGE_REPORTING_DELAY (2 * HZ)
+#define MAX_SCAN_NUM 1024
+
static struct page_reporting_dev_info __rcu *pr_dev_info __read_mostly;

enum {
@@ -115,7 +118,7 @@ page_reporting_cycle(struct page_reporting_dev_info *prdev, struct zone *zone,
unsigned int page_len = PAGE_SIZE << order;
struct page *page, *next;
long budget;
- int err = 0;
+ int err = 0, scan_cnt = 0;

/*
* Perform early check, if free area is empty there is
@@ -145,8 +148,14 @@ page_reporting_cycle(struct page_reporting_dev_info *prdev, struct zone *zone,
/* loop through free list adding unreported pages to sg list */
list_for_each_entry_safe(page, next, list, lru) {
/* We are going to skip over the reported pages. */
- if (PageReported(page))
+ if (PageReported(page)) {
+ if (++scan_cnt >= MAX_SCAN_NUM) {
+ err = scan_cnt;
+ break;
+ }
continue;
+ }
+

/*
* If we fully consumed our budget then update our
@@ -219,6 +228,26 @@ page_reporting_cycle(struct page_reporting_dev_info *prdev, struct zone *zone,
return err;
}

+static int
+reporting_order_type(struct page_reporting_dev_info *prdev, struct zone *zone,
+ unsigned int order, unsigned int mt,
+ struct scatterlist *sgl, unsigned int *offset)
+{
+ int ret = 0;
+ unsigned long total = 0;
+
+ might_sleep();
+ do {
+ cond_resched();
+ ret = page_reporting_cycle(prdev, zone, order, mt,
+ sgl, offset);
+ if (ret > 0)
+ total += ret;
+ } while (ret > 0 && total < zone->free_area[order].nr_free);
+
+ return ret;
+}
+
static int
page_reporting_process_zone(struct page_reporting_dev_info *prdev,
struct scatterlist *sgl, struct zone *zone)
@@ -245,7 +274,7 @@ page_reporting_process_zone(struct page_reporting_dev_info *prdev,
if (is_migrate_isolate(mt))
continue;

- err = page_reporting_cycle(prdev, zone, order, mt,
+ err = reporting_order_type(prdev, zone, order, mt,
sgl, &offset);
if (err)
return err;
--
2.14.1