[PATCH 3.14 72/77] mm: move zone->pages_scanned into a vmstat counter

From: Greg Kroah-Hartman
Date: Tue Jan 27 2015 - 20:40:41 EST


3.14-stable review patch. If anyone has any objections, please let me know.

------------------

From: Mel Gorman <mgorman@xxxxxxx>

commit 0d5d823ab4e608ec7b52ac4410de4cb74bbe0edd upstream.

zone->pages_scanned is a write-intensive cache line during page reclaim
and it's also updated during page free. Move the counter into vmstat to
take advantage of the per-cpu updates and do not update it in the free
paths unless necessary.

On a small UMA machine running tiobench the difference is marginal. On
a 4-node machine the overhead is more noticable. Note that automatic
NUMA balancing was disabled for this test as otherwise the system CPU
overhead is unpredictable.

3.16.0-rc3 3.16.0-rc3 3.16.0-rc3
vanillarearrange-v5 vmstat-v5
User 746.94 759.78 774.56
System 65336.22 58350.98 32847.27
Elapsed 27553.52 27282.02 27415.04

Note that the overhead reduction will vary depending on where exactly
pages are allocated and freed.

Signed-off-by: Mel Gorman <mgorman@xxxxxxx>
Acked-by: Johannes Weiner <hannes@xxxxxxxxxxx>
Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
Signed-off-by: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx>
Signed-off-by: Mel Gorman <mgorman@xxxxxxx>
Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx>

---
include/linux/mmzone.h | 2 +-
mm/page_alloc.c | 12 +++++++++---
mm/vmscan.c | 7 ++++---
mm/vmstat.c | 3 ++-
4 files changed, 16 insertions(+), 8 deletions(-)

--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -143,6 +143,7 @@ enum zone_stat_item {
NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */
NR_DIRTIED, /* page dirtyings since bootup */
NR_WRITTEN, /* page writings since bootup */
+ NR_PAGES_SCANNED, /* pages scanned since last reclaim */
#ifdef CONFIG_NUMA
NUMA_HIT, /* allocated in intended node */
NUMA_MISS, /* allocated in non intended node */
@@ -478,7 +479,6 @@ struct zone {

/* Fields commonly accessed by the page reclaim scanner */
spinlock_t lru_lock;
- unsigned long pages_scanned; /* since last reclaim */
struct lruvec lruvec;

/*
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -678,9 +678,12 @@ static void free_pcppages_bulk(struct zo
int migratetype = 0;
int batch_free = 0;
int to_free = count;
+ unsigned long nr_scanned;

spin_lock(&zone->lock);
- zone->pages_scanned = 0;
+ nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
+ if (nr_scanned)
+ __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);

while (to_free) {
struct page *page;
@@ -729,8 +732,11 @@ static void free_one_page(struct zone *z
unsigned int order,
int migratetype)
{
+ unsigned long nr_scanned;
spin_lock(&zone->lock);
- zone->pages_scanned = 0;
+ nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
+ if (nr_scanned)
+ __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);

__free_one_page(page, pfn, zone, order, migratetype);
if (unlikely(!is_migrate_isolate(migratetype)))
@@ -3251,7 +3257,7 @@ void show_free_areas(unsigned int filter
K(zone_page_state(zone, NR_BOUNCE)),
K(zone_page_state(zone, NR_FREE_CMA_PAGES)),
K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
- zone->pages_scanned,
+ K(zone_page_state(zone, NR_PAGES_SCANNED)),
(!zone_reclaimable(zone) ? "yes" : "no")
);
printk("lowmem_reserve[]:");
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -163,7 +163,8 @@ static unsigned long zone_reclaimable_pa

bool zone_reclaimable(struct zone *zone)
{
- return zone->pages_scanned < zone_reclaimable_pages(zone) * 6;
+ return zone_page_state(zone, NR_PAGES_SCANNED) <
+ zone_reclaimable_pages(zone) * 6;
}

static unsigned long get_lru_size(struct lruvec *lruvec, enum lru_list lru)
@@ -1470,7 +1471,7 @@ shrink_inactive_list(unsigned long nr_to
__mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);

if (global_reclaim(sc)) {
- zone->pages_scanned += nr_scanned;
+ __mod_zone_page_state(zone, NR_PAGES_SCANNED, nr_scanned);
if (current_is_kswapd())
__count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scanned);
else
@@ -1659,7 +1660,7 @@ static void shrink_active_list(unsigned
nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold,
&nr_scanned, sc, isolate_mode, lru);
if (global_reclaim(sc))
- zone->pages_scanned += nr_scanned;
+ __mod_zone_page_state(zone, NR_PAGES_SCANNED, nr_scanned);

reclaim_stat->recent_scanned[file] += nr_taken;

--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -761,6 +761,7 @@ const char * const vmstat_text[] = {
"nr_shmem",
"nr_dirtied",
"nr_written",
+ "nr_pages_scanned",

#ifdef CONFIG_NUMA
"numa_hit",
@@ -1055,7 +1056,7 @@ static void zoneinfo_show_print(struct s
min_wmark_pages(zone),
low_wmark_pages(zone),
high_wmark_pages(zone),
- zone->pages_scanned,
+ zone_page_state(zone, NR_PAGES_SCANNED),
zone->spanned_pages,
zone->present_pages,
zone->managed_pages);


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/