[PATCH 5.17 532/772] crypto: cryptd - Protect per-CPU resource by disabling BH.
From: Greg Kroah-Hartman
Date: Tue Jun 07 2022 - 17:27:13 EST
From: Sebastian Andrzej Siewior <bigeasy@xxxxxxxxxxxxx>
[ Upstream commit 91e8bcd7b4da182e09ea19a2c73167345fe14c98 ]
The access to cryptd_queue::cpu_queue is synchronized by disabling
preemption in cryptd_enqueue_request() and disabling BH in
cryptd_queue_worker(). This implies that access is allowed from BH.
If cryptd_enqueue_request() is invoked from preemptible context _and_
soft interrupt then this can lead to list corruption since
cryptd_enqueue_request() is not protected against access from
soft interrupt.
Replace get_cpu() in cryptd_enqueue_request() with local_bh_disable()
to ensure BH is always disabled.
Remove preempt_disable() from cryptd_queue_worker() since it is not
needed because local_bh_disable() ensures synchronisation.
Fixes: 254eff771441 ("crypto: cryptd - Per-CPU thread implementation...")
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@xxxxxxxxxxxxx>
Signed-off-by: Herbert Xu <herbert@xxxxxxxxxxxxxxxxxxx>
Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx>
---
crypto/cryptd.c | 23 +++++++++++------------
1 file changed, 11 insertions(+), 12 deletions(-)
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index a1bea0f4baa8..668095eca0fa 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -39,6 +39,10 @@ struct cryptd_cpu_queue {
};
struct cryptd_queue {
+ /*
+ * Protected by disabling BH to allow enqueueing from softinterrupt and
+ * dequeuing from kworker (cryptd_queue_worker()).
+ */
struct cryptd_cpu_queue __percpu *cpu_queue;
};
@@ -125,28 +129,28 @@ static void cryptd_fini_queue(struct cryptd_queue *queue)
static int cryptd_enqueue_request(struct cryptd_queue *queue,
struct crypto_async_request *request)
{
- int cpu, err;
+ int err;
struct cryptd_cpu_queue *cpu_queue;
refcount_t *refcnt;
- cpu = get_cpu();
+ local_bh_disable();
cpu_queue = this_cpu_ptr(queue->cpu_queue);
err = crypto_enqueue_request(&cpu_queue->queue, request);
refcnt = crypto_tfm_ctx(request->tfm);
if (err == -ENOSPC)
- goto out_put_cpu;
+ goto out;
- queue_work_on(cpu, cryptd_wq, &cpu_queue->work);
+ queue_work_on(smp_processor_id(), cryptd_wq, &cpu_queue->work);
if (!refcount_read(refcnt))
- goto out_put_cpu;
+ goto out;
refcount_inc(refcnt);
-out_put_cpu:
- put_cpu();
+out:
+ local_bh_enable();
return err;
}
@@ -162,15 +166,10 @@ static void cryptd_queue_worker(struct work_struct *work)
cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
/*
* Only handle one request at a time to avoid hogging crypto workqueue.
- * preempt_disable/enable is used to prevent being preempted by
- * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent
- * cryptd_enqueue_request() being accessed from software interrupts.
*/
local_bh_disable();
- preempt_disable();
backlog = crypto_get_backlog(&cpu_queue->queue);
req = crypto_dequeue_request(&cpu_queue->queue);
- preempt_enable();
local_bh_enable();
if (!req)
--
2.35.1