[PATCH 7/7] mm: add preempt points into __purge_vmap_area_lazy

From: Christoph Hellwig
Date: Sat Oct 22 2016 - 11:17:55 EST


From: Joel Fernandes <joelaf@xxxxxxxxxx>

Use cond_resched_lock to avoid holding the vmap_area_lock for a
potentially long time and thus creating bad latencies for various
workloads.

Signed-off-by: Joel Fernandes <joelaf@xxxxxxxxxx>
[hch: split from a larger patch by Joel, wrote the crappy changelog]
Signed-off-by: Christoph Hellwig <hch@xxxxxx>
Tested-by: Jisheng Zhang <jszhang@xxxxxxxxxxx>
---
mm/vmalloc.c | 14 +++++++++-----
1 file changed, 9 insertions(+), 5 deletions(-)

diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 23d6797..6c8b921 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -628,7 +628,7 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
struct llist_node *valist;
struct vmap_area *va;
struct vmap_area *n_va;
- int nr = 0;
+ bool do_free = false;

lockdep_assert_held(&vmap_purge_lock);

@@ -638,18 +638,22 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
start = va->va_start;
if (va->va_end > end)
end = va->va_end;
- nr += (va->va_end - va->va_start) >> PAGE_SHIFT;
+ do_free = true;
}

- if (!nr)
+ if (!do_free)
return false;

- atomic_sub(nr, &vmap_lazy_nr);
flush_tlb_kernel_range(start, end);

spin_lock(&vmap_area_lock);
- llist_for_each_entry_safe(va, n_va, valist, purge_list)
+ llist_for_each_entry_safe(va, n_va, valist, purge_list) {
+ int nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
+
__free_vmap_area(va);
+ atomic_sub(nr, &vmap_lazy_nr);
+ cond_resched_lock(&vmap_area_lock);
+ }
spin_unlock(&vmap_area_lock);
return true;
}
--
2.1.4