[PATCH 04/23] io_uring: deduce cq_mask from cq_entries

From: Pavel Begunkov
Date: Wed May 19 2021 - 10:14:27 EST


No need to cache cq_mask, it's exactly cq_entries - 1, so just deduce
it to not carry it around.

Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx>
---
fs/io_uring.c | 12 ++++--------
1 file changed, 4 insertions(+), 8 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 15dc5dad1f7d..067c89e63fea 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -361,7 +361,6 @@ struct io_ring_ctx {
u32 *sq_array;
unsigned cached_sq_head;
unsigned sq_entries;
- unsigned sq_mask;
unsigned sq_thread_idle;
unsigned cached_sq_dropped;
unsigned cached_cq_overflow;
@@ -407,7 +406,6 @@ struct io_ring_ctx {
struct {
unsigned cached_cq_tail;
unsigned cq_entries;
- unsigned cq_mask;
atomic_t cq_timeouts;
unsigned cq_last_tm_flush;
unsigned cq_extra;
@@ -1363,7 +1361,7 @@ static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx)
static inline struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
{
struct io_rings *rings = ctx->rings;
- unsigned tail;
+ unsigned tail, mask = ctx->cq_entries - 1;

/*
* writes to the cq entry need to come after reading head; the
@@ -1374,7 +1372,7 @@ static inline struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
return NULL;

tail = ctx->cached_cq_tail++;
- return &rings->cqes[tail & ctx->cq_mask];
+ return &rings->cqes[tail & mask];
}

static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx)
@@ -6677,7 +6675,7 @@ static void io_commit_sqring(struct io_ring_ctx *ctx)
static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx)
{
u32 *sq_array = ctx->sq_array;
- unsigned head;
+ unsigned head, mask = ctx->sq_entries - 1;

/*
* The cached sq head (or cq tail) serves two purposes:
@@ -6687,7 +6685,7 @@ static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx)
* 2) allows the kernel side to track the head on its own, even
* though the application is the one updating it.
*/
- head = READ_ONCE(sq_array[ctx->cached_sq_head++ & ctx->sq_mask]);
+ head = READ_ONCE(sq_array[ctx->cached_sq_head++ & mask]);
if (likely(head < ctx->sq_entries))
return &ctx->sq_sqes[head];

@@ -9493,8 +9491,6 @@ static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
rings->cq_ring_mask = p->cq_entries - 1;
rings->sq_ring_entries = p->sq_entries;
rings->cq_ring_entries = p->cq_entries;
- ctx->sq_mask = rings->sq_ring_mask;
- ctx->cq_mask = rings->cq_ring_mask;

size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
if (size == SIZE_MAX) {
--
2.31.1