[PATCH v8 17/20] dlb: add static queue map register operations

From: Mike Ximing Chen
Date: Mon Jan 04 2021 - 22:00:19 EST


Add the register accesses that implement the static queue map operation and
handle an unmap request when a queue map operation is in progress.

If a queue map operation is requested before the domain is started, it is a
synchronous procedure on "static"/unchanging hardware. (The "dynamic"
operation, when traffic is flowing in the device, will be added in a later
commit.)

Signed-off-by: Gage Eads <gage.eads@xxxxxxxxx>
Signed-off-by: Mike Ximing Chen <mike.ximing.chen@xxxxxxxxx>
Reviewed-by: Björn Töpel <bjorn.topel@xxxxxxxxx>
Reviewed-by: Dan Williams <dan.j.williams@xxxxxxxxx>
---
drivers/misc/dlb/dlb_resource.c | 146 +++++++++++++++++++++++++++++++-
1 file changed, 143 insertions(+), 3 deletions(-)

diff --git a/drivers/misc/dlb/dlb_resource.c b/drivers/misc/dlb/dlb_resource.c
index 3acb9ada964e..67ecbd4150c5 100644
--- a/drivers/misc/dlb/dlb_resource.c
+++ b/drivers/misc/dlb/dlb_resource.c
@@ -2208,12 +2208,146 @@ static int dlb_configure_dir_port(struct dlb_hw *hw,
return 0;
}

+static int dlb_ldb_port_map_qid_static(struct dlb_hw *hw,
+ struct dlb_ldb_port *p,
+ struct dlb_ldb_queue *q,
+ u8 priority)
+{
+ enum dlb_qid_map_state state;
+ u32 lsp_qid2cq2;
+ u32 lsp_qid2cq;
+ u32 atm_qid2cq;
+ u32 cq2priov;
+ u32 cq2qid;
+ int i;
+
+ /* Look for a pending or already mapped slot, else an unused slot */
+ if (!dlb_port_find_slot_queue(p, DLB_QUEUE_MAP_IN_PROG, q, &i) &&
+ !dlb_port_find_slot_queue(p, DLB_QUEUE_MAPPED, q, &i) &&
+ !dlb_port_find_slot(p, DLB_QUEUE_UNMAPPED, &i)) {
+ DLB_HW_ERR(hw,
+ "[%s():%d] Internal error: CQ has no available QID mapping slots\n",
+ __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ /* Read-modify-write the priority and valid bit register */
+ cq2priov = DLB_CSR_RD(hw, LSP_CQ2PRIOV(p->id.phys_id));
+
+ cq2priov |= (1U << (i + LSP_CQ2PRIOV_V_LOC)) & LSP_CQ2PRIOV_V;
+ cq2priov |= ((priority & 0x7) << (i + LSP_CQ2PRIOV_PRIO_LOC) * 3)
+ & LSP_CQ2PRIOV_PRIO;
+
+ DLB_CSR_WR(hw, LSP_CQ2PRIOV(p->id.phys_id), cq2priov);
+
+ /* Read-modify-write the QID map register */
+ if (i < 4)
+ cq2qid = DLB_CSR_RD(hw, LSP_CQ2QID0(p->id.phys_id));
+ else
+ cq2qid = DLB_CSR_RD(hw, LSP_CQ2QID1(p->id.phys_id));
+
+ if (i == 0 || i == 4)
+ BITS_SET(cq2qid, q->id.phys_id, LSP_CQ2QID0_QID_P0);
+ if (i == 1 || i == 5)
+ BITS_SET(cq2qid, q->id.phys_id, LSP_CQ2QID0_QID_P1);
+ if (i == 2 || i == 6)
+ BITS_SET(cq2qid, q->id.phys_id, LSP_CQ2QID0_QID_P2);
+ if (i == 3 || i == 7)
+ BITS_SET(cq2qid, q->id.phys_id, LSP_CQ2QID0_QID_P3);
+
+ if (i < 4)
+ DLB_CSR_WR(hw, LSP_CQ2QID0(p->id.phys_id), cq2qid);
+ else
+ DLB_CSR_WR(hw, LSP_CQ2QID1(p->id.phys_id), cq2qid);
+
+ atm_qid2cq = DLB_CSR_RD(hw,
+ ATM_QID2CQIDIX(q->id.phys_id,
+ p->id.phys_id / 4));
+
+ lsp_qid2cq = DLB_CSR_RD(hw,
+ LSP_QID2CQIDIX(q->id.phys_id,
+ p->id.phys_id / 4));
+
+ lsp_qid2cq2 = DLB_CSR_RD(hw,
+ LSP_QID2CQIDIX2(q->id.phys_id,
+ p->id.phys_id / 4));
+
+ switch (p->id.phys_id % 4) {
+ case 0:
+ BIT_SET(atm_qid2cq, 1 << (i + ATM_QID2CQIDIX_00_CQ_P0_LOC));
+ BIT_SET(lsp_qid2cq, 1 << (i + LSP_QID2CQIDIX_00_CQ_P0_LOC));
+ BIT_SET(lsp_qid2cq2, 1 << (i + LSP_QID2CQIDIX2_00_CQ_P0_LOC));
+ break;
+
+ case 1:
+ BIT_SET(atm_qid2cq, 1 << (i + ATM_QID2CQIDIX_00_CQ_P1_LOC));
+ BIT_SET(lsp_qid2cq, 1 << (i + LSP_QID2CQIDIX_00_CQ_P1_LOC));
+ BIT_SET(lsp_qid2cq2, 1 << (i + LSP_QID2CQIDIX2_00_CQ_P1_LOC));
+ break;
+
+ case 2:
+ BIT_SET(atm_qid2cq, 1 << (i + ATM_QID2CQIDIX_00_CQ_P2_LOC));
+ BIT_SET(lsp_qid2cq, 1 << (i + LSP_QID2CQIDIX_00_CQ_P2_LOC));
+ BIT_SET(lsp_qid2cq2, 1 << (i + LSP_QID2CQIDIX2_00_CQ_P2_LOC));
+ break;
+
+ case 3:
+ BIT_SET(atm_qid2cq, 1 << (i + ATM_QID2CQIDIX_00_CQ_P3_LOC));
+ BIT_SET(lsp_qid2cq, 1 << (i + LSP_QID2CQIDIX_00_CQ_P3_LOC));
+ BIT_SET(lsp_qid2cq2, 1 << (i + LSP_QID2CQIDIX2_00_CQ_P3_LOC));
+ break;
+ }
+
+ DLB_CSR_WR(hw,
+ ATM_QID2CQIDIX(q->id.phys_id, p->id.phys_id / 4),
+ atm_qid2cq);
+
+ DLB_CSR_WR(hw,
+ LSP_QID2CQIDIX(q->id.phys_id, p->id.phys_id / 4),
+ lsp_qid2cq);
+
+ DLB_CSR_WR(hw,
+ LSP_QID2CQIDIX2(q->id.phys_id, p->id.phys_id / 4),
+ lsp_qid2cq2);
+
+ dlb_flush_csr(hw);
+
+ p->qid_map[i].qid = q->id.phys_id;
+ p->qid_map[i].priority = priority;
+
+ state = DLB_QUEUE_MAPPED;
+
+ return dlb_port_slot_state_transition(hw, p, q, i, state);
+}
+
static void dlb_ldb_port_change_qid_priority(struct dlb_hw *hw,
struct dlb_ldb_port *port,
int slot,
struct dlb_map_qid_args *args)
{
- /* Placeholder */
+ u32 cq2priov;
+
+ /* Read-modify-write the priority and valid bit register */
+ cq2priov = DLB_CSR_RD(hw, LSP_CQ2PRIOV(port->id.phys_id));
+
+ cq2priov |= (1 << (slot + LSP_CQ2PRIOV_V_LOC)) & LSP_CQ2PRIOV_V;
+ cq2priov |= ((args->priority & 0x7) << slot * 3) & LSP_CQ2PRIOV_PRIO;
+
+ DLB_CSR_WR(hw, LSP_CQ2PRIOV(port->id.phys_id), cq2priov);
+
+ dlb_flush_csr(hw);
+
+ port->qid_map[slot].priority = args->priority;
+}
+
+static void dlb_ldb_queue_set_inflight_limit(struct dlb_hw *hw,
+ struct dlb_ldb_queue *queue)
+{
+ u32 infl_lim = 0;
+
+ BITS_SET(infl_lim, queue->num_qid_inflights, LSP_QID_LDB_INFL_LIM_LIMIT);
+
+ DLB_CSR_WR(hw, LSP_QID_LDB_INFL_LIM(queue->id.phys_id), infl_lim);
}

static int dlb_ldb_port_map_qid(struct dlb_hw *hw,
@@ -2222,8 +2356,7 @@ static int dlb_ldb_port_map_qid(struct dlb_hw *hw,
struct dlb_ldb_queue *queue,
u8 prio)
{
- /* Placeholder */
- return 0;
+ return dlb_ldb_port_map_qid_static(hw, port, queue, prio);
}

static void
@@ -3071,6 +3204,13 @@ int dlb_hw_unmap_qid(struct dlb_hw *hw,
*/
st = DLB_QUEUE_MAP_IN_PROG;
if (dlb_port_find_slot_queue(port, st, queue, &i)) {
+ /*
+ * Since the in-progress map was aborted, re-enable the QID's
+ * inflights.
+ */
+ if (queue->num_pending_additions == 0)
+ dlb_ldb_queue_set_inflight_limit(hw, queue);
+
st = DLB_QUEUE_UNMAPPED;
ret = dlb_port_slot_state_transition(hw, port, queue, i, st);
if (ret)
--
2.17.1