fixup! mm, page_alloc: disallow migratetype fallback in fastpath

From: Vlastimil Babka
Date: Wed Oct 12 2016 - 10:36:35 EST


We want to force compaction to run even when the requested page is potentially
available, but of a wrong migratetype. This won't work unless the compaction
itself is modified to not declare imediatelly success when it sees such page.
---
mm/compaction.c | 22 +++++++++++++++++-----
mm/internal.h | 3 ++-
2 files changed, 19 insertions(+), 6 deletions(-)

diff --git a/mm/compaction.c b/mm/compaction.c
index eb4ccd403543..eeb9200f7b7e 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -1354,9 +1354,11 @@ static enum compact_result __compact_finished(struct zone *zone,
#endif
/*
* Job done if allocation would steal freepages from
- * other migratetype buddy lists.
+ * other migratetype buddy lists. This is not allowed
+ * for async direct compaction.
*/
- if (find_suitable_fallback(area, order, migratetype,
+ if (!cc->prevent_fallback &&
+ find_suitable_fallback(area, order, migratetype,
true, &can_steal) != -1)
return COMPACT_SUCCESS;
}
@@ -1509,8 +1511,17 @@ static enum compact_result compact_zone(struct zone *zone, struct compact_contro
cc->migratetype = gfpflags_to_migratetype(cc->gfp_mask);
ret = compaction_suitable(zone, cc->order, cc->alloc_flags,
cc->classzone_idx);
- /* Compaction is likely to fail */
- if (ret == COMPACT_SUCCESS || ret == COMPACT_SKIPPED)
+ /*
+ * Compaction should not be needed. If we don't allow stealing from
+ * pageblocks of different migratetype, the watermark checks cannot
+ * distinguish that, so assume we would need to steal, and leave the
+ * thorough check to compact_finished().
+ */
+ if (ret == COMPACT_SUCCESS && !cc->prevent_fallback)
+ return ret;
+
+ /* Compaction is likely to fail due to insufficient free pages */
+ if (ret == COMPACT_SKIPPED)
return ret;

/* huh, compaction_suitable is returning something unexpected */
@@ -1678,7 +1689,8 @@ static enum compact_result compact_zone_order(struct zone *zone, int order,
.direct_compaction = true,
.whole_zone = (prio == MIN_COMPACT_PRIORITY),
.ignore_skip_hint = (prio == MIN_COMPACT_PRIORITY),
- .ignore_block_suitable = (prio == MIN_COMPACT_PRIORITY)
+ .ignore_block_suitable = (prio == MIN_COMPACT_PRIORITY),
+ .prevent_fallback = (prio == COMPACT_PRIO_ASYNC)
};
INIT_LIST_HEAD(&cc.freepages);
INIT_LIST_HEAD(&cc.migratepages);
diff --git a/mm/internal.h b/mm/internal.h
index a46eab383e8d..bb01d9bd60a8 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -181,13 +181,14 @@ struct compact_control {
bool ignore_block_suitable; /* Scan blocks considered unsuitable */
bool direct_compaction; /* False from kcompactd or /proc/... */
bool whole_zone; /* Whole zone should/has been scanned */
+ bool contended; /* Signal lock or sched contention */
+ bool prevent_fallback; /* Stealing migratetypes not allowed */
int order; /* order a direct compactor needs */
int migratetype; /* migratetype of direct compactor */
const gfp_t gfp_mask; /* gfp mask of a direct compactor */
const unsigned int alloc_flags; /* alloc flags of a direct compactor */
const int classzone_idx; /* zone index of a direct compactor */
struct zone *zone;
- bool contended; /* Signal lock or sched contention */
};

unsigned long
--
2.10.0