[PATCH 16/23] dmaengine: sdxi: Generic descriptor manipulation helpers

From: Nathan Lynch via B4 Relay

Date: Fri Apr 10 2026 - 09:12:27 EST


From: Nathan Lynch <nathan.lynch@xxxxxxx>

Introduce small helper functions for manipulating certain common
properties of descriptors after their operation-specific encoding has
been performed but before they are submitted.

sdxi_desc_set_csb() associates an optional completion status block
with a descriptor.

sdxi_desc_set_fence() forces retirement of any prior descriptors in
the ring before the target descriptor is executed. This is useful for
interrupt descriptors that signal the completion of an operation.

sdxi_desc_set_sequential() ensures that all writes from prior
descriptor operations in the same context are made globally visible
prior to making writes from the target descriptor globally visible.

sdxi_desc_make_valid() sets the descriptor validity bit, transferring
ownership of the descriptor from software to the SDXI
implementation. (The implementation is allowed to execute the
descriptor at this point, but the caller is still obligated to push
the doorbell to ensure execution occurs.)

Each of the preceding functions will warn if invoked on a descriptor
that has already been released to the SDXI implementation (i.e. had
its validity bit set).

Co-developed-by: Wei Huang <wei.huang2@xxxxxxx>
Signed-off-by: Wei Huang <wei.huang2@xxxxxxx>
Signed-off-by: Nathan Lynch <nathan.lynch@xxxxxxx>
---
drivers/dma/sdxi/descriptor.h | 64 +++++++++++++++++++++++++++++++++++++++++++
drivers/dma/sdxi/hw.h | 9 ++++++
2 files changed, 73 insertions(+)

diff --git a/drivers/dma/sdxi/descriptor.h b/drivers/dma/sdxi/descriptor.h
new file mode 100644
index 000000000000..c0f01b1be726
--- /dev/null
+++ b/drivers/dma/sdxi/descriptor.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef DMA_SDXI_DESCRIPTOR_H
+#define DMA_SDXI_DESCRIPTOR_H
+
+/*
+ * Facilities for encoding SDXI descriptors.
+ *
+ * Copyright Advanced Micro Devices, Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/ratelimit.h>
+#include <linux/types.h>
+#include <asm/byteorder.h>
+
+#include "hw.h"
+
+static inline void sdxi_desc_vl_expect(const struct sdxi_desc *desc, bool expected)
+{
+ u8 vl = FIELD_GET(SDXI_DSC_VL, le32_to_cpu(desc->opcode));
+
+ WARN_RATELIMIT(vl != expected, "expected vl=%u but got %u\n", expected, vl);
+}
+
+static inline void sdxi_desc_set_csb(struct sdxi_desc *desc, dma_addr_t addr)
+{
+ sdxi_desc_vl_expect(desc, 0);
+ desc->csb_ptr = cpu_to_le64(FIELD_PREP(SDXI_DSC_CSB_PTR, addr >> 5));
+}
+
+static inline void sdxi_desc_make_valid(struct sdxi_desc *desc)
+{
+ u32 opcode = le32_to_cpu(desc->opcode);
+
+ sdxi_desc_vl_expect(desc, 0);
+ FIELD_MODIFY(SDXI_DSC_VL, &opcode, 1);
+ /*
+ * Once vl is set, no more modifications to the descriptor
+ * payload are allowed. Ensure the vl update is ordered after
+ * all other initialization of the descriptor.
+ */
+ dma_wmb();
+ WRITE_ONCE(desc->opcode, cpu_to_le32(opcode));
+}
+
+static inline void sdxi_desc_set_fence(struct sdxi_desc *desc)
+{
+ u32 opcode = le32_to_cpu(desc->opcode);
+
+ sdxi_desc_vl_expect(desc, 0);
+ FIELD_MODIFY(SDXI_DSC_FE, &opcode, 1);
+ desc->opcode = cpu_to_le32(opcode);
+}
+
+static inline void sdxi_desc_set_sequential(struct sdxi_desc *desc)
+{
+ u32 opcode = le32_to_cpu(desc->opcode);
+
+ sdxi_desc_vl_expect(desc, 0);
+ FIELD_MODIFY(SDXI_DSC_SE, &opcode, 1);
+ desc->opcode = cpu_to_le32(opcode);
+}
+
+#endif /* DMA_SDXI_DESCRIPTOR_H */
diff --git a/drivers/dma/sdxi/hw.h b/drivers/dma/sdxi/hw.h
index 46424376f26f..cb1bed2f83f2 100644
--- a/drivers/dma/sdxi/hw.h
+++ b/drivers/dma/sdxi/hw.h
@@ -140,6 +140,15 @@ struct sdxi_desc {
__u8 operation[52];
__le64 csb_ptr;
);
+
+/* For opcode field */
+#define SDXI_DSC_VL BIT(0)
+#define SDXI_DSC_SE BIT(1)
+#define SDXI_DSC_FE BIT(2)
+
+/* For csb_ptr field */
+#define SDXI_DSC_CSB_PTR GENMASK_ULL(63, 5)
+
};
} __packed;
static_assert(sizeof(struct sdxi_desc) == 64);

--
2.53.0