[PATCH v1, 1/1] mailbox: mtk-cmdq: fix gce timeout issue
From: Yongqiang Niu
Date: Sun Aug 14 2022 - 23:08:12 EST
From: Yongqiang Niu <yongqiang.niu@xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx>
1. enable gce ddr enable(gce reigster offset 0x48, bit 16 to 18) when gce work,
and disable gce ddr enable when gce work job done
2. split cmdq clk enable/disable api, and control gce ddr enable/disable
in clk enable/disable function to make sure it could protect when cmdq
is multiple used by display and mdp
Signed-off-by: Yongqiang Niu <yongqiang.niu@xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx>
Signed-off-by: Allen-kh Cheng <allen-kh.cheng@xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx>
---
drivers/mailbox/mtk-cmdq-mailbox.c | 57 ++++++++++++++++++++++++++----
1 file changed, 51 insertions(+), 6 deletions(-)
diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
index 2578e5aaa935..64a47470f062 100644
--- a/drivers/mailbox/mtk-cmdq-mailbox.c
+++ b/drivers/mailbox/mtk-cmdq-mailbox.c
@@ -81,6 +81,8 @@ struct cmdq {
u8 shift_pa;
bool control_by_sw;
u32 gce_num;
+ atomic_t usage;
+ spinlock_t lock;
};
struct gce_plat {
@@ -90,6 +92,46 @@ struct gce_plat {
u32 gce_num;
};
+static s32 cmdq_clk_enable(struct cmdq *cmdq)
+{
+ s32 usage, ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cmdq->lock, flags);
+
+ usage = atomic_inc_return(&cmdq->usage);
+
+ ret = clk_bulk_enable(cmdq->gce_num, cmdq->clocks);
+ if (usage <=0 || ret < 0) {
+ dev_err(cmdq->mbox.dev, "ref count %d ret %d suspend %d\n",
+ usage, ret, cmdq->suspended);
+ } else if (usage == 1) {
+ if (cmdq->control_by_sw)
+ writel((0x7 << 16) + 0x7, cmdq->base + GCE_GCTL_VALUE);
+ }
+
+ spin_unlock_irqrestore(&cmdq->lock, flags);
+
+ return ret;
+}
+
+static void cmdq_clk_disable(struct cmdq *cmdq)
+{
+ s32 usage;
+
+ usage = atomic_dec_return(&cmdq->usage);
+
+ if (usage < 0) {
+ dev_err(cmdq->mbox.dev, "ref count %d suspend %d\n",
+ usage, cmdq->suspended);
+ } else if (usage == 0) {
+ if (cmdq->control_by_sw)
+ writel(0x7, cmdq->base + GCE_GCTL_VALUE);
+ }
+
+ clk_bulk_disable(cmdq->gce_num, cmdq->clocks);
+}
+
u8 cmdq_get_shift_pa(struct mbox_chan *chan)
{
struct cmdq *cmdq = container_of(chan->mbox, struct cmdq, mbox);
@@ -271,7 +313,8 @@ static void cmdq_thread_irq_handler(struct cmdq *cmdq,
if (list_empty(&thread->task_busy_list)) {
cmdq_thread_disable(cmdq, thread);
- clk_bulk_disable(cmdq->gce_num, cmdq->clocks);
+
+ cmdq_clk_disable(cmdq);
}
}
@@ -360,8 +403,7 @@ static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
task->pkt = pkt;
if (list_empty(&thread->task_busy_list)) {
- WARN_ON(clk_bulk_enable(cmdq->gce_num, cmdq->clocks));
-
+ WARN_ON(cmdq_clk_enable(cmdq) < 0);
/*
* The thread reset will clear thread related register to 0,
* including pc, end, priority, irq, suspend and enable. Thus
@@ -433,7 +475,7 @@ static void cmdq_mbox_shutdown(struct mbox_chan *chan)
}
cmdq_thread_disable(cmdq, thread);
- clk_bulk_disable(cmdq->gce_num, cmdq->clocks);
+ cmdq_clk_disable(cmdq);
done:
/*
@@ -479,7 +521,8 @@ static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
cmdq_thread_resume(thread);
cmdq_thread_disable(cmdq, thread);
- clk_bulk_disable(cmdq->gce_num, cmdq->clocks);
+
+ cmdq_clk_disable(cmdq);
out:
spin_unlock_irqrestore(&thread->chan->lock, flags);
@@ -490,7 +533,8 @@ static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
spin_unlock_irqrestore(&thread->chan->lock, flags);
if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_ENABLE_TASK,
enable, enable == 0, 1, timeout)) {
- dev_err(cmdq->mbox.dev, "Fail to wait GCE thread 0x%x done\n",
+ dev_err(cmdq->mbox.dev,
+ "Fail to wait GCE thread 0x%x done\n",
(u32)(thread->base - cmdq->base));
return -EFAULT;
@@ -626,6 +670,7 @@ static int cmdq_probe(struct platform_device *pdev)
WARN_ON(clk_bulk_prepare(cmdq->gce_num, cmdq->clocks));
+ spin_lock_init(&cmdq->lock);
cmdq_init(cmdq);
return 0;
--
2.25.1