[PATCH 2/7] mm: Group the processing of anonymous pages to be swapped in shrink_page_list

From: Tim Chen
Date: Tue May 03 2016 - 17:01:46 EST


This is a clean up patch to reorganize the processing of anonymous
pages in shrink_page_list.

We delay the processing of swapping anonymous pages in shrink_page_list
and put them together on a separate list.ÂÂThis prepares for batching
of pages to be swapped.ÂÂThe processing of the list of anonymous pages
to be swapped is consolidated in the function shrink_anon_page_list.

Functionally, there is no change in the logic of how pages are processed,
just the order of processing of the anonymous pages and file mapped
pages in shrink_page_list.

Signed-off-by: Tim Chen <tim.c.chen@xxxxxxxxxxxxxxx>
---
Âmm/vmscan.c | 82 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++----
Â1 file changed, 77 insertions(+), 5 deletions(-)

diff --git a/mm/vmscan.c b/mm/vmscan.c
index 5542005..132ba02 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1083,6 +1083,58 @@ static void pg_finish(struct page *page,
 }
Â}
Â
+static unsigned long shrink_anon_page_list(struct list_head *page_list,
+ struct zone *zone,
+ struct scan_control *sc,
+ struct list_head *swap_pages,
+ struct list_head *ret_pages,
+ struct list_head *free_pages,
+ enum ttu_flags ttu_flags,
+ int *pgactivate,
+ int n,
+ bool clean)
+{
+ unsigned long nr_reclaimed = 0;
+ enum pg_result pg_dispose;
+
+ while (n > 0) {
+ struct page *page;
+ int swap_ret = SWAP_SUCCESS;
+
+ --n;
+ if (list_empty(swap_pages))
+ ÂÂÂÂÂÂÂreturn nr_reclaimed;
+
+ page = lru_to_page(swap_pages);
+
+ list_del(&page->lru);
+
+ /*
+ * Anonymous process memory has backing store?
+ * Try to allocate it some swap space here.
+ */
+
+ if (!add_to_swap(page, page_list)) {
+ pg_finish(page, PG_ACTIVATE_LOCKED, swap_ret, &nr_reclaimed,
+ pgactivate, ret_pages, free_pages);
+ continue;
+ }
+
+ if (clean)
+ pg_dispose = handle_pgout(page_list, zone, sc, ttu_flags,
+ PAGEREF_RECLAIM_CLEAN, true, true, &swap_ret, page);
+ else
+ pg_dispose = handle_pgout(page_list, zone, sc, ttu_flags,
+ PAGEREF_RECLAIM, true, true, &swap_ret, page);
+
+ pg_finish(page, pg_dispose, swap_ret, &nr_reclaimed,
+ pgactivate, ret_pages, free_pages);
+ }
+ return nr_reclaimed;
+}
+
+
+
Â/*
 * shrink_page_list() returns the number of reclaimed pages
 */
@@ -1099,6 +1151,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
Â{
 LIST_HEAD(ret_pages);
 LIST_HEAD(free_pages);
+ LIST_HEAD(swap_pages);
+ LIST_HEAD(swap_pages_clean);
 int pgactivate = 0;
 unsigned long nr_unqueued_dirty = 0;
 unsigned long nr_dirty = 0;
@@ -1106,6 +1160,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
 unsigned long nr_reclaimed = 0;
 unsigned long nr_writeback = 0;
 unsigned long nr_immediate = 0;
+ unsigned long nr_swap = 0;
+ unsigned long nr_swap_clean = 0;
Â
 cond_resched();
Â
@@ -1271,12 +1327,17 @@ static unsigned long shrink_page_list(struct list_head *page_list,
 pg_dispose = PG_KEEP_LOCKED;
 goto finish;
 }
- if (!add_to_swap(page, page_list)) {
- pg_dispose = PG_ACTIVATE_LOCKED;
- goto finish;
+ if (references == PAGEREF_RECLAIM_CLEAN) {
+ list_add(&page->lru, &swap_pages_clean);
+ ++nr_swap_clean;
+ } else {
+ list_add(&page->lru, &swap_pages);
+ ++nr_swap;
 }
- lazyfree = true;
- may_enter_fs = 1;
+
+ pg_dispose = PG_NEXT;
+ goto finish;
+
 }
Â
 pg_dispose = handle_pgout(page_list, zone, sc, ttu_flags,
@@ -1288,6 +1349,17 @@ finish:
Â
 }
Â
+ nr_reclaimed += shrink_anon_page_list(page_list, zone, sc,
+ &swap_pages_clean, &ret_pages,
+ &free_pages, ttu_flags,
+ &pgactivate, nr_swap_clean,
+ true);
+ nr_reclaimed += shrink_anon_page_list(page_list, zone, sc,
+ &swap_pages, &ret_pages,
+ &free_pages, ttu_flags,
+ &pgactivate, nr_swap,
+ false);
+
 mem_cgroup_uncharge_list(&free_pages);
 try_to_unmap_flush();
 free_hot_cold_page_list(&free_pages, true);
--Â
2.5.5