Currently we don't shrink/scan the anonymous lrus when swap is off.
This is problematic for volatile range purging on swapless systems/
This patch naievely changes the vmscan code to continue scanning
and shrinking the lrus even when there is no swap.
It obviously has performance issues.
Thoughts on how best to implement this would be appreciated.
Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
Cc: Android Kernel Team <kernel-team@xxxxxxxxxxx>
Cc: Johannes Weiner <hannes@xxxxxxxxxxx>
Cc: Robert Love <rlove@xxxxxxxxxx>
Cc: Mel Gorman <mel@xxxxxxxxx>
Cc: Hugh Dickins <hughd@xxxxxxxxxx>
Cc: Dave Hansen <dave@xxxxxxxx>
Cc: Rik van Riel <riel@xxxxxxxxxx>
Cc: Dmitry Adamushko <dmitry.adamushko@xxxxxxxxx>
Cc: Neil Brown <neilb@xxxxxxx>
Cc: Andrea Arcangeli <aarcange@xxxxxxxxxx>
Cc: Mike Hommey <mh@xxxxxxxxxxxx>
Cc: Taras Glek <tglek@xxxxxxxxxxx>
Cc: Jan Kara <jack@xxxxxxx>
Cc: KOSAKI Motohiro <kosaki.motohiro@xxxxxxxxx>
Cc: Michel Lespinasse <walken@xxxxxxxxxx>
Cc: Minchan Kim <minchan@xxxxxxxxxx>
Cc: linux-mm@xxxxxxxxx <linux-mm@xxxxxxxxx>
Signed-off-by: John Stultz <john.stultz@xxxxxxxxxx>
---
mm/vmscan.c | 26 ++++----------------------
1 file changed, 4 insertions(+), 22 deletions(-)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 34f159a..07b0a8c 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -155,9 +155,8 @@ static unsigned long zone_reclaimable_pages(struct zone *zone)
nr = zone_page_state(zone, NR_ACTIVE_FILE) +
zone_page_state(zone, NR_INACTIVE_FILE);
- if (get_nr_swap_pages() > 0)
- nr += zone_page_state(zone, NR_ACTIVE_ANON) +
- zone_page_state(zone, NR_INACTIVE_ANON);
+ nr += zone_page_state(zone, NR_ACTIVE_ANON) +
+ zone_page_state(zone, NR_INACTIVE_ANON);
return nr;
@@ -1764,13 +1763,6 @@ static int inactive_anon_is_low_global(struct zone *zone)
*/
static int inactive_anon_is_low(struct lruvec *lruvec)
{
- /*
- * If we don't have swap space, anonymous page deactivation
- * is pointless.
- */
- if (!total_swap_pages)
- return 0;
-
if (!mem_cgroup_disabled())
return mem_cgroup_inactive_anon_is_low(lruvec);
@@ -1880,12 +1872,6 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
if (!global_reclaim(sc))
force_scan = true;
- /* If we have no swap space, do not bother scanning anon pages. */
- if (!sc->may_swap || (get_nr_swap_pages() <= 0)) {
- scan_balance = SCAN_FILE;
- goto out;
- }
-
/*
@@ -2181,8 +2166,8 @@ static inline bool should_continue_reclaim(struct zone *zone,
*/
pages_for_compaction = (2UL << sc->order);
inactive_lru_pages = zone_page_state(zone, NR_INACTIVE_FILE);
- if (get_nr_swap_pages() > 0)
- inactive_lru_pages += zone_page_state(zone, NR_INACTIVE_ANON);
+ inactive_lru_pages += zone_page_state(zone, NR_INACTIVE_ANON);
+
if (sc->nr_reclaimed < pages_for_compaction &&
inactive_lru_pages > pages_for_compaction)
return true;
@@ -2726,9 +2711,6 @@ static void age_active_anon(struct zone *zone, struct scan_control *sc)
{
struct mem_cgroup *memcg;
- if (!total_swap_pages)
- return;
-