[PATCH 1/2] Avoid lock contention on page draining
From: Andi Kleen
Date: Tue Mar 27 2012 - 20:41:03 EST
From: Andi Kleen <ak@xxxxxxxxxxxxxxx>
drain_all_pages asks all CPUs to drain their PCP lists. This causes a lot
of lock contention because they try to free into the same zones in lock
step.
Make half of the CPUs go through the zones forwards and the other half
backwards. This should lower the contention to half.
I opencoded the backwards walk: there were no macros for it, but it seemed
to obscure to create some extra for this.
Signed-off-by: Andi Kleen <ak@xxxxxxxxxxxxxxx>
---
mm/page_alloc.c | 56 +++++++++++++++++++++++++++++++++++++++++-------------
1 files changed, 42 insertions(+), 14 deletions(-)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a13ded1..8cd4f6a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1124,6 +1124,23 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
}
#endif
+static void do_drain_zone(struct zone *zone, int cpu)
+{
+ unsigned long flags;
+ struct per_cpu_pageset *pset;
+ struct per_cpu_pages *pcp;
+
+ local_irq_save(flags);
+ pset = per_cpu_ptr(zone->pageset, cpu);
+
+ pcp = &pset->pcp;
+ if (pcp->count) {
+ free_pcppages_bulk(zone, pcp->count, pcp);
+ pcp->count = 0;
+ }
+ local_irq_restore(flags);
+}
+
/*
* Drain pages of the indicated processor.
*
@@ -1133,22 +1150,33 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
*/
static void drain_pages(unsigned int cpu)
{
- unsigned long flags;
struct zone *zone;
- for_each_populated_zone(zone) {
- struct per_cpu_pageset *pset;
- struct per_cpu_pages *pcp;
-
- local_irq_save(flags);
- pset = per_cpu_ptr(zone->pageset, cpu);
-
- pcp = &pset->pcp;
- if (pcp->count) {
- free_pcppages_bulk(zone, pcp->count, pcp);
- pcp->count = 0;
- }
- local_irq_restore(flags);
+ /*
+ * Let half of the CPUs go through the zones forwards
+ * and the other half backwards. This reduces lock contention.
+ */
+ if ((cpu % 2) == 0) {
+ for_each_populated_zone(zone)
+ do_drain_zone(zone, cpu);
+ } else {
+ int i, j, k = 0;
+
+ /*
+ * Backwards zone walk. Opencoded because its quite obscure.
+ */
+ for (i = MAX_NUMNODES - 1; i >= 0; i--) {
+ if (!node_states[N_ONLINE].bits[i / BITS_PER_LONG]) {
+ i -= i % BITS_PER_LONG;
+ continue;
+ }
+ if (!node_isset(i, node_states[N_ONLINE]))
+ continue;
+ k++;
+ for (j = MAX_NR_ZONES - 1; j >= 0; j--)
+ do_drain_zone(&NODE_DATA(i)->node_zones[j], cpu);
+ }
+ WARN_ON(k != num_online_nodes());
}
}
--
1.7.7.6
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/