[PATCH 03/16] i3c: mipi-i3c-hci: Prevent DMA enqueue while ring is aborting or in error

From: Adrian Hunter

Date: Thu Apr 16 2026 - 13:57:55 EST


Block the DMA enqueue path while a Ring abort is in progress or after an
error condition has been detected.

Previously, new transfers could be enqueued while the DMA Ring was being
aborted or while error handling was underway. This allowed enqueue and
error-recovery paths to run concurrently, potentially interfering with
each other and corrupting Ring state.

Introduce explicit enqueue blocking and a wait queue to serialize access:
enqueue operations now wait until abort or error handling has completed
before proceeding. Enqueue is unblocked once the Ring is safely restarted.

Signed-off-by: Adrian Hunter <adrian.hunter@xxxxxxxxx>
---
drivers/i3c/master/mipi-i3c-hci/core.c | 1 +
drivers/i3c/master/mipi-i3c-hci/dma.c | 25 +++++++++++++++++++++++--
drivers/i3c/master/mipi-i3c-hci/hci.h | 2 ++
3 files changed, 26 insertions(+), 2 deletions(-)

diff --git a/drivers/i3c/master/mipi-i3c-hci/core.c b/drivers/i3c/master/mipi-i3c-hci/core.c
index bb8f2d830b0d..5e1bc6d819cf 100644
--- a/drivers/i3c/master/mipi-i3c-hci/core.c
+++ b/drivers/i3c/master/mipi-i3c-hci/core.c
@@ -976,6 +976,7 @@ static int i3c_hci_probe(struct platform_device *pdev)

spin_lock_init(&hci->lock);
mutex_init(&hci->control_mutex);
+ init_waitqueue_head(&hci->enqueue_wait_queue);

/*
* Multi-bus instances share the same MMIO address range, but not
diff --git a/drivers/i3c/master/mipi-i3c-hci/dma.c b/drivers/i3c/master/mipi-i3c-hci/dma.c
index 4cd32e3afa7b..314635e6e190 100644
--- a/drivers/i3c/master/mipi-i3c-hci/dma.c
+++ b/drivers/i3c/master/mipi-i3c-hci/dma.c
@@ -484,6 +484,12 @@ static int hci_dma_queue_xfer(struct i3c_hci *hci,

spin_lock_irq(&hci->lock);

+ while (unlikely(hci->enqueue_blocked)) {
+ spin_unlock_irq(&hci->lock);
+ wait_event(hci->enqueue_wait_queue, !READ_ONCE(hci->enqueue_blocked));
+ spin_lock_irq(&hci->lock);
+ }
+
if (n > rh->xfer_space) {
spin_unlock_irq(&hci->lock);
hci_dma_unmap_xfer(hci, xfer_list, n);
@@ -539,6 +545,14 @@ static int hci_dma_queue_xfer(struct i3c_hci *hci,
return 0;
}

+static void hci_dma_unblock_enqueue(struct i3c_hci *hci)
+{
+ if (hci->enqueue_blocked) {
+ hci->enqueue_blocked = false;
+ wake_up_all(&hci->enqueue_wait_queue);
+ }
+}
+
static bool hci_dma_dequeue_xfer(struct i3c_hci *hci,
struct hci_xfer *xfer_list, int n)
{
@@ -550,12 +564,17 @@ static bool hci_dma_dequeue_xfer(struct i3c_hci *hci,

guard(mutex)(&hci->control_mutex);

+ spin_lock_irq(&hci->lock);
+
ring_status = rh_reg_read(RING_STATUS);
if (ring_status & RING_STATUS_RUNNING) {
+ hci->enqueue_blocked = true;
+ spin_unlock_irq(&hci->lock);
/* stop the ring */
reinit_completion(&rh->op_done);
rh_reg_write(RING_CONTROL, rh_reg_read(RING_CONTROL) | RING_CTRL_ABORT);
wait_for_completion_timeout(&rh->op_done, HZ);
+ spin_lock_irq(&hci->lock);
ring_status = rh_reg_read(RING_STATUS);
if (ring_status & RING_STATUS_RUNNING) {
/*
@@ -567,8 +586,6 @@ static bool hci_dma_dequeue_xfer(struct i3c_hci *hci,
}
}

- spin_lock_irq(&hci->lock);
-
for (i = 0; i < n; i++) {
struct hci_xfer *xfer = xfer_list + i;
int idx = xfer->ring_entry;
@@ -604,6 +621,8 @@ static bool hci_dma_dequeue_xfer(struct i3c_hci *hci,
rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE);
rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE | RING_CTRL_RUN_STOP);

+ hci_dma_unblock_enqueue(hci);
+
spin_unlock_irq(&hci->lock);

return did_unqueue;
@@ -647,6 +666,8 @@ static void hci_dma_xfer_done(struct i3c_hci *hci, struct hci_rh_data *rh)
}
if (xfer->completion)
complete(xfer->completion);
+ if (RESP_STATUS(resp))
+ hci->enqueue_blocked = true;
}

done_ptr = (done_ptr + 1) % rh->xfer_entries;
diff --git a/drivers/i3c/master/mipi-i3c-hci/hci.h b/drivers/i3c/master/mipi-i3c-hci/hci.h
index f17f43494c1b..d630400ec945 100644
--- a/drivers/i3c/master/mipi-i3c-hci/hci.h
+++ b/drivers/i3c/master/mipi-i3c-hci/hci.h
@@ -54,6 +54,8 @@ struct i3c_hci {
struct mutex control_mutex;
atomic_t next_cmd_tid;
bool irq_inactive;
+ bool enqueue_blocked;
+ wait_queue_head_t enqueue_wait_queue;
u32 caps;
unsigned int quirks;
unsigned int DAT_entries;
--
2.51.0