[PATCH 5.14 030/168] drm/etnaviv: reference MMU context when setting up hardware state
From: Greg Kroah-Hartman
Date: Mon Sep 20 2021 - 14:44:05 EST
From: Lucas Stach <l.stach@xxxxxxxxxxxxxx>
commit d6408538f091fb22d47f792d4efa58143d56c3fb upstream.
Move the refcount manipulation of the MMU context to the point where the
hardware state is programmed. At that point it is also known if a previous
MMU state is still there, or the state needs to be reprogrammed with a
potentially different context.
Cc: stable@xxxxxxxxxxxxxxx # 5.4
Signed-off-by: Lucas Stach <l.stach@xxxxxxxxxxxxxx>
Tested-by: Michael Walle <michael@xxxxxxxx>
Tested-by: Marek Vasut <marex@xxxxxxx>
Reviewed-by: Christian Gmeiner <christian.gmeiner@xxxxxxxxx>
Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx>
---
drivers/gpu/drm/etnaviv/etnaviv_gpu.c | 24 ++++++++++++------------
drivers/gpu/drm/etnaviv/etnaviv_iommu.c | 4 ++++
drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c | 8 ++++++++
3 files changed, 24 insertions(+), 12 deletions(-)
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -641,17 +641,19 @@ void etnaviv_gpu_start_fe(struct etnaviv
gpu->fe_running = true;
}
-static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu)
+static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu,
+ struct etnaviv_iommu_context *context)
{
- u32 address = etnaviv_cmdbuf_get_va(&gpu->buffer,
- &gpu->mmu_context->cmdbuf_mapping);
u16 prefetch;
+ u32 address;
/* setup the MMU */
- etnaviv_iommu_restore(gpu, gpu->mmu_context);
+ etnaviv_iommu_restore(gpu, context);
/* Start command processor */
prefetch = etnaviv_buffer_init(gpu);
+ address = etnaviv_cmdbuf_get_va(&gpu->buffer,
+ &gpu->mmu_context->cmdbuf_mapping);
etnaviv_gpu_start_fe(gpu, address, prefetch);
}
@@ -1369,14 +1371,12 @@ struct dma_fence *etnaviv_gpu_submit(str
goto out_unlock;
}
- if (!gpu->fe_running) {
- gpu->mmu_context = etnaviv_iommu_context_get(submit->mmu_context);
- etnaviv_gpu_start_fe_idleloop(gpu);
- } else {
- if (submit->prev_mmu_context)
- etnaviv_iommu_context_put(submit->prev_mmu_context);
- submit->prev_mmu_context = etnaviv_iommu_context_get(gpu->mmu_context);
- }
+ if (!gpu->fe_running)
+ etnaviv_gpu_start_fe_idleloop(gpu, submit->mmu_context);
+
+ if (submit->prev_mmu_context)
+ etnaviv_iommu_context_put(submit->prev_mmu_context);
+ submit->prev_mmu_context = etnaviv_iommu_context_get(gpu->mmu_context);
if (submit->nr_pmrs) {
gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre;
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
@@ -92,6 +92,10 @@ static void etnaviv_iommuv1_restore(stru
struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
u32 pgtable;
+ if (gpu->mmu_context)
+ etnaviv_iommu_context_put(gpu->mmu_context);
+ gpu->mmu_context = etnaviv_iommu_context_get(context);
+
/* set base addresses */
gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, context->global->memory_base);
gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, context->global->memory_base);
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
@@ -172,6 +172,10 @@ static void etnaviv_iommuv2_restore_nons
if (gpu_read(gpu, VIVS_MMUv2_CONTROL) & VIVS_MMUv2_CONTROL_ENABLE)
return;
+ if (gpu->mmu_context)
+ etnaviv_iommu_context_put(gpu->mmu_context);
+ gpu->mmu_context = etnaviv_iommu_context_get(context);
+
prefetch = etnaviv_buffer_config_mmuv2(gpu,
(u32)v2_context->mtlb_dma,
(u32)context->global->bad_page_dma);
@@ -192,6 +196,10 @@ static void etnaviv_iommuv2_restore_sec(
if (gpu_read(gpu, VIVS_MMUv2_SEC_CONTROL) & VIVS_MMUv2_SEC_CONTROL_ENABLE)
return;
+ if (gpu->mmu_context)
+ etnaviv_iommu_context_put(gpu->mmu_context);
+ gpu->mmu_context = etnaviv_iommu_context_get(context);
+
gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_LOW,
lower_32_bits(context->global->v2.pta_dma));
gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_HIGH,