[PATCH 1/1] mmzone: code cleanup for LRU stats.
From: Maninder Singh
Date: Mon Jan 18 2016 - 02:18:47 EST
Replacing hardcoded values with enum lru_stats for LRU stats.
Signed-off-by: Maninder Singh <maninder1.s@xxxxxxxxxxx>
Signed-off-by: Vaneet Narang <v.narang@xxxxxxxxxxx>
---
include/linux/mmzone.h | 12 ++++++++----
mm/memcontrol.c | 20 ++++++++++----------
mm/vmscan.c | 20 ++++++++++----------
3 files changed, 28 insertions(+), 24 deletions(-)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 68cc063..fd993e4 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -198,17 +198,21 @@ static inline int is_active_lru(enum lru_list lru)
return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
}
+enum lru_stats {
+ LRU_ANON_STAT, /* anon LRU stats */
+ LRU_FILE_STAT, /* file LRU stats */
+ LRU_MAX_STAT
+};
+
struct zone_reclaim_stat {
/*
* The pageout code in vmscan.c keeps track of how many of the
* mem/swap backed and file backed pages are referenced.
* The higher the rotated/scanned ratio, the more valuable
* that cache is.
- *
- * The anon LRU stats live in [0], file LRU stats in [1]
*/
- unsigned long recent_rotated[2];
- unsigned long recent_scanned[2];
+ unsigned long recent_rotated[LRU_MAX_STAT];
+ unsigned long recent_scanned[LRU_MAX_STAT];
};
struct lruvec {
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 11e97e0..49c8e4d 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3373,23 +3373,23 @@ static int memcg_stat_show(struct seq_file *m, void *v)
int nid, zid;
struct mem_cgroup_per_zone *mz;
struct zone_reclaim_stat *rstat;
- unsigned long recent_rotated[2] = {0, 0};
- unsigned long recent_scanned[2] = {0, 0};
+ unsigned long recent_rotated[LRU_MAX_STAT] = {0, 0};
+ unsigned long recent_scanned[LRU_MAX_STAT] = {0, 0};
for_each_online_node(nid)
for (zid = 0; zid < MAX_NR_ZONES; zid++) {
mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
rstat = &mz->lruvec.reclaim_stat;
- recent_rotated[0] += rstat->recent_rotated[0];
- recent_rotated[1] += rstat->recent_rotated[1];
- recent_scanned[0] += rstat->recent_scanned[0];
- recent_scanned[1] += rstat->recent_scanned[1];
+ recent_rotated[LRU_ANON_STAT] += rstat->recent_rotated[LRU_ANON_STAT];
+ recent_rotated[LRU_FILE_STAT] += rstat->recent_rotated[LRU_FILE_STAT];
+ recent_scanned[LRU_ANON_STAT] += rstat->recent_scanned[LRU_ANON_STAT];
+ recent_scanned[LRU_FILE_STAT] += rstat->recent_scanned[LRU_FILE_STAT];
}
- seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]);
- seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]);
- seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]);
- seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]);
+ seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[LRU_ANON_STAT]);
+ seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[LRU_FILE_STAT]);
+ seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[LRU_ANON_STAT]);
+ seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[LRU_FILE_STAT]);
}
#endif
diff --git a/mm/vmscan.c b/mm/vmscan.c
index ee3bbd5..7a66554 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2176,14 +2176,14 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
get_lru_size(lruvec, LRU_INACTIVE_FILE);
spin_lock_irq(&zone->lru_lock);
- if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
- reclaim_stat->recent_scanned[0] /= 2;
- reclaim_stat->recent_rotated[0] /= 2;
+ if (unlikely(reclaim_stat->recent_scanned[LRU_ANON_STAT] > anon / 4)) {
+ reclaim_stat->recent_scanned[LRU_ANON_STAT] /= 2;
+ reclaim_stat->recent_rotated[LRU_ANON_STAT] /= 2;
}
- if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) {
- reclaim_stat->recent_scanned[1] /= 2;
- reclaim_stat->recent_rotated[1] /= 2;
+ if (unlikely(reclaim_stat->recent_scanned[LRU_FILE_STAT] > file / 4)) {
+ reclaim_stat->recent_scanned[LRU_FILE_STAT] /= 2;
+ reclaim_stat->recent_rotated[LRU_FILE_STAT] /= 2;
}
/*
@@ -2191,11 +2191,11 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
* proportional to the fraction of recently scanned pages on
* each list that were recently referenced and in active use.
*/
- ap = anon_prio * (reclaim_stat->recent_scanned[0] + 1);
- ap /= reclaim_stat->recent_rotated[0] + 1;
+ ap = anon_prio * (reclaim_stat->recent_scanned[LRU_ANON_STAT] + 1);
+ ap /= reclaim_stat->recent_rotated[LRU_ANON_STAT] + 1;
- fp = file_prio * (reclaim_stat->recent_scanned[1] + 1);
- fp /= reclaim_stat->recent_rotated[1] + 1;
+ fp = file_prio * (reclaim_stat->recent_scanned[LRU_FILE_STAT] + 1);
+ fp /= reclaim_stat->recent_rotated[LRU_FILE_STAT] + 1;
spin_unlock_irq(&zone->lru_lock);
fraction[0] = ap;
--
1.7.9.5