[PATCH v3 2/3] aio: make aio_ring->dead boolean
From: Oleg Nesterov
Date: Thu Jun 18 2015 - 13:53:51 EST
"atomic_t dead" makes no sense. atomic_read() is the plain LOAD,
it doesn't have some "additional" synchronization with xchg().
And now that kill_ioctx() sets "dead" under mm->ioctx_lock we do
not even need xchg().
Signed-off-by: Oleg Nesterov <oleg@xxxxxxxxxx>
Reviewed-by: Jeff Moyer <jmoyer@xxxxxxxxxx>
---
fs/aio.c | 9 +++++----
1 files changed, 5 insertions(+), 4 deletions(-)
diff --git a/fs/aio.c b/fs/aio.c
index 893d300..d63a889 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -84,7 +84,7 @@ struct ctx_rq_wait {
struct kioctx {
struct percpu_ref users;
- atomic_t dead;
+ bool dead;
struct percpu_ref reqs;
@@ -765,7 +765,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
err_cleanup:
aio_nr_sub(ctx->max_reqs);
err_ctx:
- atomic_set(&ctx->dead, 1);
+ ctx->dead = true; /* unneeded */
if (ctx->mmap_size)
vm_munmap(ctx->mmap_base, ctx->mmap_size);
aio_free_ring(ctx);
@@ -790,11 +790,12 @@ static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
struct kioctx_table *table;
spin_lock(&mm->ioctx_lock);
- if (atomic_xchg(&ctx->dead, 1)) {
+ if (unlikely(ctx->dead)) {
spin_unlock(&mm->ioctx_lock);
return -EINVAL;
}
+ ctx->dead = true;
table = rcu_dereference_raw(mm->ioctx_table);
WARN_ON(ctx != table->table[ctx->id]);
table->table[ctx->id] = NULL;
@@ -1236,7 +1237,7 @@ static bool aio_read_events(struct kioctx *ctx, long min_nr, long nr,
if (ret > 0)
*i += ret;
- if (unlikely(atomic_read(&ctx->dead)))
+ if (unlikely(ctx->dead))
ret = -EINVAL;
if (!*i)
--
1.5.5.1
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/