[PATCH 28/35] Batch free pages from migratetype per-cpu lists

From: Mel Gorman
Date: Mon Mar 16 2009 - 05:53:45 EST


When the PCP lists are too large, a number of pages are freed in bulk.
Currently the free lists are examined in a round-robin fashion but this
touches more cache lines than necessary. This patch frees pages from one
list at a time and uses the migratetype most recently used as the
starting point.

Signed-off-by: Mel Gorman <mel@xxxxxxxxx>
---
mm/page_alloc.c | 36 +++++++++++++++++++++++-------------
1 files changed, 23 insertions(+), 13 deletions(-)

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 3516b87..edadab1 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -547,32 +547,42 @@ static inline void bulk_add_pcp_page(struct per_cpu_pages *pcp,
* pinned" detection logic.
*/
static void free_pcppages_bulk(struct zone *zone, int count,
- struct per_cpu_pages *pcp)
+ struct per_cpu_pages *pcp,
+ int migratetype)
{
- int migratetype = 0;
unsigned int freed = 0;
+ unsigned int bulkcount;
+ struct list_head *list;

spin_lock(&zone->lock);
zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
zone->pages_scanned = 0;

+ list = &pcp->lists[migratetype];
+ bulkcount = 1 + (count / (MIGRATE_PCPTYPES * 2));
while (freed < count) {
struct page *page;
- struct list_head *list;
+ int thisfreed;

- /* Remove pages from lists in a round-robin fashion */
- do {
- if (migratetype == MIGRATE_PCPTYPES)
+ /*
+ * Move to another migratetype if this list is depleted or
+ * we've freed enough in this batch
+ */
+ while (list_empty(list) || bulkcount < 0) {
+ bulkcount = 1 + (count / (MIGRATE_PCPTYPES * 2));
+ if (++migratetype == MIGRATE_PCPTYPES)
migratetype = 0;
list = &pcp->lists[migratetype];
- migratetype++;
- } while (list_empty(list));
+ }

+ /* Remove from list and update counters */
page = list_entry(list->prev, struct page, lru);
rmv_pcp_page(pcp, page);
+ thisfreed = 1 << page->index;
+ freed += thisfreed;
+ bulkcount -= thisfreed;

- freed += 1 << page->index;
- __free_one_page(page, zone, page->index, page_private(page));
+ __free_one_page(page, zone, page->index, migratetype);
}
spin_unlock(&zone->lock);

@@ -969,7 +979,7 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
to_drain = pcp->batch;
else
to_drain = pcp->count;
- free_pcppages_bulk(zone, to_drain, pcp);
+ free_pcppages_bulk(zone, to_drain, pcp, 0);
local_irq_restore(flags);
}
#endif
@@ -997,7 +1007,7 @@ static void drain_pages(unsigned int cpu)

pcp = &pset->pcp;
local_irq_save(flags);
- free_pcppages_bulk(zone, pcp->count, pcp);
+ free_pcppages_bulk(zone, pcp->count, pcp, 0);
BUG_ON(pcp->count);
local_irq_restore(flags);
}
@@ -1110,7 +1120,7 @@ static void free_hot_cold_page(struct page *page, int order, int cold)
page->index = order;
add_pcp_page(pcp, page, cold);
if (pcp->count >= pcp->high)
- free_pcppages_bulk(zone, pcp->batch, pcp);
+ free_pcppages_bulk(zone, pcp->batch, pcp, migratetype);

out:
local_irq_restore(flags);
--
1.5.6.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/