[PATCH] zram: add single lock
From: Minchan Kim
Date: Mon Feb 17 2014 - 21:41:11 EST
Signed-off-by: Minchan Kim <minchan@xxxxxxxxxx>
---
drivers/block/zram/zcomp.c | 24 +++++-------------------
drivers/block/zram/zcomp.h | 2 +-
2 files changed, 6 insertions(+), 20 deletions(-)
diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c
index a032f49a52e2..03f4241083ac 100644
--- a/drivers/block/zram/zcomp.c
+++ b/drivers/block/zram/zcomp.c
@@ -56,32 +56,18 @@ struct zcomp_strm *zcomp_strm_get(struct zcomp *comp)
{
struct zcomp_strm *zstrm;
- while (1) {
- spin_lock(&comp->strm_lock);
- if (list_empty(&comp->idle_strm)) {
- spin_unlock(&comp->strm_lock);
- wait_event(comp->strm_wait,
- !list_empty(&comp->idle_strm));
- continue;
- }
-
- zstrm = list_entry(comp->idle_strm.next,
+ mutex_lock(&comp->strm_lock);
+ zstrm = list_entry(comp->idle_strm.next,
struct zcomp_strm, list);
- list_del(&zstrm->list);
- spin_unlock(&comp->strm_lock);
- break;
- }
+ list_del(&zstrm->list);
return zstrm;
}
/* add zcomp_strm back to idle list and wake up waiter (if any) */
void zcomp_strm_put(struct zcomp *comp, struct zcomp_strm *zstrm)
{
- spin_lock(&comp->strm_lock);
list_add(&zstrm->list, &comp->idle_strm);
- spin_unlock(&comp->strm_lock);
-
- wake_up(&comp->strm_wait);
+ mutex_unlock(&comp->strm_lock);
}
int zcomp_compress(struct zcomp *comp, struct zcomp_strm *zstrm,
@@ -138,7 +124,7 @@ struct zcomp *zcomp_create(const char *compress, int num_strm)
return NULL;
comp->backend = backend;
- spin_lock_init(&comp->strm_lock);
+ mutex_init(&comp->strm_lock);
INIT_LIST_HEAD(&comp->idle_strm);
init_waitqueue_head(&comp->strm_wait);
diff --git a/drivers/block/zram/zcomp.h b/drivers/block/zram/zcomp.h
index 52899de40ca5..bfcec1cf7d2e 100644
--- a/drivers/block/zram/zcomp.h
+++ b/drivers/block/zram/zcomp.h
@@ -35,7 +35,7 @@ struct zcomp_backend {
/* dynamic per-device compression frontend */
struct zcomp {
/* protect strm list */
- spinlock_t strm_lock;
+ struct mutex strm_lock;
/* list of available strms */
struct list_head idle_strm;
wait_queue_head_t strm_wait;
--
1.8.5.3
As you can see, your patch made zram too much regressed when it
used one stream buffer. The reason I guessed is overhead of
scheduling by sleep/wakeup when others couldn't find a idle stream
so I had an experiment with below simple patch to restore old behavior
so it works well again. The reason old behaivor was really good is
it uses mutex with spin_on_owner so that it could avoid frequent
sleep/wakeup.
A solution I could think is that we could grow up the number of
buffer dynamically up to max_buffers(ex, 2 * number of CPU) so we
could grab a idle stream easily for each CPU while we could release
buffers by shrinker when the memory pressure is severe.
Of course, we should keep one buffer to work.
Another solution I can imagaine is to define CONFIG_ZRAM_MULTI_STREAM
and CONFIG_ZRAM_SINGLE_STREAM(ie, default) so we couldn't break good
performance for old cases and new users who want write parallel
could use ZRAM_MULTI which should be several streams at a default
rather than a stream. And we could ehhacne it further by dynamic control
as I mentioned.
Thoughts?
>
> --
> 1.9.0.rc3.260.g4cf525c
>
> --
> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> the body of a message to majordomo@xxxxxxxxxxxxxxx
> More majordomo info at http://vger.kernel.org/majordomo-info.html
> Please read the FAQ at http://www.tux.org/lkml/
--
Kind regards,
Minchan Kim
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/