[PATCH 1/5] mm: add tlb_flush_mmu_free_batches
From: Aaron Lu
Date: Fri Feb 24 2017 - 06:40:54 EST
There are two places doing page free where one is freeing pages pointed
by the mmu_gather_batch in tlb_flush_mmu_free and one for the batch page
itself in tlb_flush_mmu_finish. There will be yet another place in the
following patch to free both the pages pointed by the mmu_gather_batches
and the batch page itself in the parallel free worker thread. To avoid
code duplication, add a new function for this purpose.
Another reason to add this function is that after the following patch,
cond_resched will need to be added at places where more than 10K pages
can be freed, i.e. in tlb_flush_mmu_free and the worker function.
Instead of adding cond_resched at multiple places, using a single
function to reduce code duplication.
No functionality change.
Signed-off-by: Aaron Lu <aaron.lu@xxxxxxxxx>
---
mm/memory.c | 28 +++++++++++++++++-----------
1 file changed, 17 insertions(+), 11 deletions(-)
diff --git a/mm/memory.c b/mm/memory.c
index 6bf2b471e30c..2b88196841b9 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -251,14 +251,25 @@ static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
__tlb_reset_range(tlb);
}
-static void tlb_flush_mmu_free(struct mmu_gather *tlb)
+static void tlb_flush_mmu_free_batches(struct mmu_gather_batch *batch_start,
+ int free_batch_page)
{
- struct mmu_gather_batch *batch;
+ struct mmu_gather_batch *batch, *next;
- for (batch = &tlb->local; batch && batch->nr; batch = batch->next) {
- free_pages_and_swap_cache(batch->pages, batch->nr);
- batch->nr = 0;
+ for (batch = batch_start; batch; batch = next) {
+ next = batch->next;
+ if (batch->nr) {
+ free_pages_and_swap_cache(batch->pages, batch->nr);
+ batch->nr = 0;
+ }
+ if (free_batch_page)
+ free_pages((unsigned long)batch, 0);
}
+}
+
+static void tlb_flush_mmu_free(struct mmu_gather *tlb)
+{
+ tlb_flush_mmu_free_batches(&tlb->local, 0);
tlb->active = &tlb->local;
}
@@ -274,17 +285,12 @@ void tlb_flush_mmu(struct mmu_gather *tlb)
*/
void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
{
- struct mmu_gather_batch *batch, *next;
-
tlb_flush_mmu(tlb);
/* keep the page table cache within bounds */
check_pgt_cache();
- for (batch = tlb->local.next; batch; batch = next) {
- next = batch->next;
- free_pages((unsigned long)batch, 0);
- }
+ tlb_flush_mmu_free_batches(tlb->local.next, 1);
tlb->local.next = NULL;
}
--
2.9.3