[PATCH] zram: fix possible race when checking idle_strm

From: Joonsoo Kim
Date: Fri Aug 07 2015 - 04:04:01 EST


Currently, when we enter the wait state due to lack of idle stream,
we check idle_strm list without holding the lock in expanding of
wait_event define. In this case, some one can see stale value and
process could fall into wait state without any upcoming wakeup process.
Although I didn't see any error related to this race, it should be fixed.

To fix it, we should check idle_strm with holding the lock and
this patch implements it.

Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@xxxxxxx>
---
drivers/block/zram/zcomp.c | 15 +++++++++++++--
1 file changed, 13 insertions(+), 2 deletions(-)

diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c
index 80a62e7..837e9c3 100644
--- a/drivers/block/zram/zcomp.c
+++ b/drivers/block/zram/zcomp.c
@@ -76,6 +76,17 @@ static struct zcomp_strm *zcomp_strm_alloc(struct zcomp *comp)
return zstrm;
}

+static bool avail_idle_strm(struct zcomp_strm_multi *zs)
+{
+ int avail;
+
+ spin_lock(&zs->strm_lock);
+ avail = !list_empty(&zs->idle_strm);
+ spin_unlock(&zs->strm_lock);
+
+ return avail;
+}
+
/*
* get idle zcomp_strm or wait until other process release
* (zcomp_strm_release()) one for us
@@ -97,7 +108,7 @@ static struct zcomp_strm *zcomp_strm_multi_find(struct zcomp *comp)
/* zstrm streams limit reached, wait for idle stream */
if (zs->avail_strm >= zs->max_strm) {
spin_unlock(&zs->strm_lock);
- wait_event(zs->strm_wait, !list_empty(&zs->idle_strm));
+ wait_event(zs->strm_wait, avail_idle_strm(zs));
continue;
}
/* allocate new zstrm stream */
@@ -109,7 +120,7 @@ static struct zcomp_strm *zcomp_strm_multi_find(struct zcomp *comp)
spin_lock(&zs->strm_lock);
zs->avail_strm--;
spin_unlock(&zs->strm_lock);
- wait_event(zs->strm_wait, !list_empty(&zs->idle_strm));
+ wait_event(zs->strm_wait, avail_idle_strm(zs));
continue;
}
break;
--
1.9.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/