From: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
Subject: mm-vmscan-fix-the-page-state-calculation-in-too_many_isolated-fix
Move the zone_page_state_snapshot() fallback logic into
too_many_isolated(), so shrink_inactive_list() doesn't incorrectly call
congestion_wait().
Cc: Johannes Weiner <hannes@xxxxxxxxxxx>
Cc: Mel Gorman <mgorman@xxxxxxx>
Cc: Michal Hocko <mhocko@xxxxxxx>
Cc: Minchan Kim <minchan@xxxxxxxxxx>
Cc: Vinayak Menon <vinmenon@xxxxxxxxxxxxxx>
Cc: Vladimir Davydov <vdavydov@xxxxxxxxxxxxx>
Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
---
mm/vmscan.c | 23 +++++++++++------------
1 file changed, 11 insertions(+), 12 deletions(-)
diff -puN mm/vmscan.c~mm-vmscan-fix-the-page-state-calculation-in-too_many_isolated-fix mm/vmscan.c
--- a/mm/vmscan.c~mm-vmscan-fix-the-page-state-calculation-in-too_many_isolated-fix
+++ a/mm/vmscan.c
@@ -1402,7 +1402,7 @@ int isolate_lru_page(struct page *page)
}
static int __too_many_isolated(struct zone *zone, int file,
- struct scan_control *sc, int safe)
+ struct scan_control *sc, int safe)
{
unsigned long inactive, isolated;
@@ -1435,7 +1435,7 @@ static int __too_many_isolated(struct zo
* unnecessary swapping, thrashing and OOM.
*/
static int too_many_isolated(struct zone *zone, int file,
- struct scan_control *sc, int safe)
+ struct scan_control *sc)
{
if (current_is_kswapd())
return 0;
@@ -1443,12 +1443,14 @@ static int too_many_isolated(struct zone
if (!global_reclaim(sc))
return 0;
- if (unlikely(__too_many_isolated(zone, file, sc, 0))) {
- if (safe)
- return __too_many_isolated(zone, file, sc, safe);
- else
- return 1;
- }
+ /*
+ * __too_many_isolated(safe=0) is fast but inaccurate, because it
+ * doesn't account for the vm_stat_diff[] counters. So if it looks
+ * like too_many_isolated() is about to return true, fall back to the
+ * slower, more accurate zone_page_state_snapshot().
+ */
+ if (unlikely(__too_many_isolated(zone, file, sc, 0)))
+ return __too_many_isolated(zone, file, sc, safe);