[PATCH 1/1] kasan: fix livelock in qlist_move_cache

From: zhouzhouyi
Date: Mon Nov 27 2017 - 23:04:31 EST


From: Zhouyi Zhou <zhouzhouyi@xxxxxxxxx>

This patch fix livelock by conditionally release cpu to let others
has a chance to run.

Tested on x86_64.
Signed-off-by: Zhouyi Zhou <zhouzhouyi@xxxxxxxxx>
---
mm/kasan/quarantine.c | 12 +++++++++++-
1 file changed, 11 insertions(+), 1 deletion(-)

diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c
index 3a8ddf8..33eeff4 100644
--- a/mm/kasan/quarantine.c
+++ b/mm/kasan/quarantine.c
@@ -265,10 +265,13 @@ static void qlist_move_cache(struct qlist_head *from,
struct kmem_cache *cache)
{
struct qlist_node *curr;
+ struct qlist_head tmp_head;
+ unsigned long flags;

if (unlikely(qlist_empty(from)))
return;

+ qlist_init(&tmp_head);
curr = from->head;
qlist_init(from);
while (curr) {
@@ -278,10 +281,17 @@ static void qlist_move_cache(struct qlist_head *from,
if (obj_cache == cache)
qlist_put(to, curr, obj_cache->size);
else
- qlist_put(from, curr, obj_cache->size);
+ qlist_put(&tmp_head, curr, obj_cache->size);

curr = next;
+
+ if (need_resched()) {
+ spin_unlock_irqrestore(&quarantine_lock, flags);
+ cond_resched();
+ spin_lock_irqsave(&quarantine_lock, flags);
+ }
}
+ qlist_move_all(&tmp_head, from);
}

static void per_cpu_remove_cache(void *arg)
--
2.1.4