[PATCH v2 5/8] drm/panthor: Use a local iomem base for GPU registers

From: Karunika Choo

Date: Sun Apr 12 2026 - 10:37:56 EST


Add a GPU_CONTROL-local iomem pointer to struct panthor_gpu and use it
for GPU register accesses.

This limits GPU register accesses to the GPU block instead of using the
device-wide MMIO mapping directly. Interrupt register accesses continue
to use the IRQ-local base provided by the common IRQ helpers. Update
panthor_gpu_info_init() to also use a local iomem offset for GPU
features and capability.

This is a refactoring only and does not change behaviour.

v2:
- Update panthor_gpu_info_init() to use block-local iomem pointer.

Signed-off-by: Karunika Choo <karunika.choo@xxxxxxx>
---
drivers/gpu/drm/panthor/panthor_gpu.c | 61 +++++++++++++---------
drivers/gpu/drm/panthor/panthor_gpu_regs.h | 4 --
drivers/gpu/drm/panthor/panthor_hw.c | 38 +++++++-------
3 files changed, 56 insertions(+), 47 deletions(-)

diff --git a/drivers/gpu/drm/panthor/panthor_gpu.c b/drivers/gpu/drm/panthor/panthor_gpu.c
index f00f3d9be240..e52c5675981f 100644
--- a/drivers/gpu/drm/panthor/panthor_gpu.c
+++ b/drivers/gpu/drm/panthor/panthor_gpu.c
@@ -29,6 +29,9 @@
* struct panthor_gpu - GPU block management data.
*/
struct panthor_gpu {
+ /** @iomem: CPU mapping of GPU_CONTROL iomem region */
+ void __iomem *iomem;
+
/** @irq: GPU irq. */
struct panthor_irq irq;

@@ -56,12 +59,13 @@ struct panthor_gpu {

static void panthor_gpu_coherency_set(struct panthor_device *ptdev)
{
- gpu_write(ptdev->iomem, GPU_COHERENCY_PROTOCOL,
+ gpu_write(ptdev->gpu->iomem, GPU_COHERENCY_PROTOCOL,
ptdev->gpu_info.selected_coherency);
}

static void panthor_gpu_l2_config_set(struct panthor_device *ptdev)
{
+ struct panthor_gpu *gpu = ptdev->gpu;
const struct panthor_soc_data *data = ptdev->soc_data;
u32 l2_config;
u32 i;
@@ -75,26 +79,28 @@ static void panthor_gpu_l2_config_set(struct panthor_device *ptdev)
}

for (i = 0; i < ARRAY_SIZE(data->asn_hash); i++)
- gpu_write(ptdev->iomem, GPU_ASN_HASH(i), data->asn_hash[i]);
+ gpu_write(gpu->iomem, GPU_ASN_HASH(i), data->asn_hash[i]);

- l2_config = gpu_read(ptdev->iomem, GPU_L2_CONFIG);
+ l2_config = gpu_read(gpu->iomem, GPU_L2_CONFIG);
l2_config |= GPU_L2_CONFIG_ASN_HASH_ENABLE;
- gpu_write(ptdev->iomem, GPU_L2_CONFIG, l2_config);
+ gpu_write(gpu->iomem, GPU_L2_CONFIG, l2_config);
}

static void panthor_gpu_irq_handler(struct panthor_device *ptdev, u32 status)
{
- gpu_write(ptdev->iomem, GPU_INT_CLEAR, status);
+ struct panthor_gpu *gpu = ptdev->gpu;
+
+ gpu_write(gpu->irq.iomem, INT_CLEAR, status);

if (tracepoint_enabled(gpu_power_status) && (status & GPU_POWER_INTERRUPTS_MASK))
trace_gpu_power_status(ptdev->base.dev,
- gpu_read64(ptdev->iomem, SHADER_READY),
- gpu_read64(ptdev->iomem, TILER_READY),
- gpu_read64(ptdev->iomem, L2_READY));
+ gpu_read64(gpu->iomem, SHADER_READY),
+ gpu_read64(gpu->iomem, TILER_READY),
+ gpu_read64(gpu->iomem, L2_READY));

if (status & GPU_IRQ_FAULT) {
- u32 fault_status = gpu_read(ptdev->iomem, GPU_FAULT_STATUS);
- u64 address = gpu_read64(ptdev->iomem, GPU_FAULT_ADDR);
+ u32 fault_status = gpu_read(gpu->iomem, GPU_FAULT_STATUS);
+ u64 address = gpu_read64(gpu->iomem, GPU_FAULT_ADDR);

drm_warn(&ptdev->base, "GPU Fault 0x%08x (%s) at 0x%016llx\n",
fault_status, panthor_exception_name(ptdev, fault_status & 0xFF),
@@ -147,6 +153,7 @@ int panthor_gpu_init(struct panthor_device *ptdev)
if (!gpu)
return -ENOMEM;

+ gpu->iomem = ptdev->iomem + GPU_CONTROL_BASE;
spin_lock_init(&gpu->reqs_lock);
init_waitqueue_head(&gpu->reqs_acked);
mutex_init(&gpu->cache_flush_lock);
@@ -203,10 +210,11 @@ int panthor_gpu_block_power_off(struct panthor_device *ptdev,
u32 pwroff_reg, u32 pwrtrans_reg,
u64 mask, u32 timeout_us)
{
+ struct panthor_gpu *gpu = ptdev->gpu;
u32 val;
int ret;

- ret = gpu_read64_relaxed_poll_timeout(ptdev->iomem, pwrtrans_reg, val,
+ ret = gpu_read64_relaxed_poll_timeout(gpu->iomem, pwrtrans_reg, val,
!(mask & val), 100, timeout_us);
if (ret) {
drm_err(&ptdev->base,
@@ -215,9 +223,9 @@ int panthor_gpu_block_power_off(struct panthor_device *ptdev,
return ret;
}

- gpu_write64(ptdev->iomem, pwroff_reg, mask);
+ gpu_write64(gpu->iomem, pwroff_reg, mask);

- ret = gpu_read64_relaxed_poll_timeout(ptdev->iomem, pwrtrans_reg, val,
+ ret = gpu_read64_relaxed_poll_timeout(gpu->iomem, pwrtrans_reg, val,
!(mask & val), 100, timeout_us);
if (ret) {
drm_err(&ptdev->base,
@@ -246,10 +254,11 @@ int panthor_gpu_block_power_on(struct panthor_device *ptdev,
u32 pwron_reg, u32 pwrtrans_reg,
u32 rdy_reg, u64 mask, u32 timeout_us)
{
+ struct panthor_gpu *gpu = ptdev->gpu;
u32 val;
int ret;

- ret = gpu_read64_relaxed_poll_timeout(ptdev->iomem, pwrtrans_reg, val,
+ ret = gpu_read64_relaxed_poll_timeout(gpu->iomem, pwrtrans_reg, val,
!(mask & val), 100, timeout_us);
if (ret) {
drm_err(&ptdev->base,
@@ -258,9 +267,9 @@ int panthor_gpu_block_power_on(struct panthor_device *ptdev,
return ret;
}

- gpu_write64(ptdev->iomem, pwron_reg, mask);
+ gpu_write64(gpu->iomem, pwron_reg, mask);

- ret = gpu_read64_relaxed_poll_timeout(ptdev->iomem, rdy_reg, val,
+ ret = gpu_read64_relaxed_poll_timeout(gpu->iomem, rdy_reg, val,
(mask & val) == val,
100, timeout_us);
if (ret) {
@@ -319,6 +328,7 @@ int panthor_gpu_l2_power_on(struct panthor_device *ptdev)
int panthor_gpu_flush_caches(struct panthor_device *ptdev,
u32 l2, u32 lsc, u32 other)
{
+ struct panthor_gpu *gpu = ptdev->gpu;
unsigned long flags;
int ret = 0;

@@ -328,7 +338,7 @@ int panthor_gpu_flush_caches(struct panthor_device *ptdev,
spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags);
if (!(ptdev->gpu->pending_reqs & GPU_IRQ_CLEAN_CACHES_COMPLETED)) {
ptdev->gpu->pending_reqs |= GPU_IRQ_CLEAN_CACHES_COMPLETED;
- gpu_write(ptdev->iomem, GPU_CMD, GPU_FLUSH_CACHES(l2, lsc, other));
+ gpu_write(gpu->iomem, GPU_CMD, GPU_FLUSH_CACHES(l2, lsc, other));
} else {
ret = -EIO;
}
@@ -342,7 +352,7 @@ int panthor_gpu_flush_caches(struct panthor_device *ptdev,
msecs_to_jiffies(100))) {
spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags);
if ((ptdev->gpu->pending_reqs & GPU_IRQ_CLEAN_CACHES_COMPLETED) != 0 &&
- !(gpu_read(ptdev->iomem, GPU_INT_RAWSTAT) & GPU_IRQ_CLEAN_CACHES_COMPLETED))
+ !(gpu_read(gpu->irq.iomem, INT_RAWSTAT) & GPU_IRQ_CLEAN_CACHES_COMPLETED))
ret = -ETIMEDOUT;
else
ptdev->gpu->pending_reqs &= ~GPU_IRQ_CLEAN_CACHES_COMPLETED;
@@ -365,6 +375,7 @@ int panthor_gpu_flush_caches(struct panthor_device *ptdev,
*/
int panthor_gpu_soft_reset(struct panthor_device *ptdev)
{
+ struct panthor_gpu *gpu = ptdev->gpu;
bool timedout = false;
unsigned long flags;

@@ -372,8 +383,8 @@ int panthor_gpu_soft_reset(struct panthor_device *ptdev)
if (!drm_WARN_ON(&ptdev->base,
ptdev->gpu->pending_reqs & GPU_IRQ_RESET_COMPLETED)) {
ptdev->gpu->pending_reqs |= GPU_IRQ_RESET_COMPLETED;
- gpu_write(ptdev->iomem, GPU_INT_CLEAR, GPU_IRQ_RESET_COMPLETED);
- gpu_write(ptdev->iomem, GPU_CMD, GPU_SOFT_RESET);
+ gpu_write(gpu->irq.iomem, INT_CLEAR, GPU_IRQ_RESET_COMPLETED);
+ gpu_write(gpu->iomem, GPU_CMD, GPU_SOFT_RESET);
}
spin_unlock_irqrestore(&ptdev->gpu->reqs_lock, flags);

@@ -382,7 +393,7 @@ int panthor_gpu_soft_reset(struct panthor_device *ptdev)
msecs_to_jiffies(100))) {
spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags);
if ((ptdev->gpu->pending_reqs & GPU_IRQ_RESET_COMPLETED) != 0 &&
- !(gpu_read(ptdev->iomem, GPU_INT_RAWSTAT) & GPU_IRQ_RESET_COMPLETED))
+ !(gpu_read(gpu->irq.iomem, INT_RAWSTAT) & GPU_IRQ_RESET_COMPLETED))
timedout = true;
else
ptdev->gpu->pending_reqs &= ~GPU_IRQ_RESET_COMPLETED;
@@ -431,17 +442,17 @@ void panthor_gpu_resume(struct panthor_device *ptdev)

u64 panthor_gpu_get_timestamp(struct panthor_device *ptdev)
{
- return gpu_read64_counter(ptdev->iomem, GPU_TIMESTAMP);
+ return gpu_read64_counter(ptdev->gpu->iomem, GPU_TIMESTAMP);
}

u64 panthor_gpu_get_timestamp_offset(struct panthor_device *ptdev)
{
- return gpu_read64(ptdev->iomem, GPU_TIMESTAMP_OFFSET);
+ return gpu_read64(ptdev->gpu->iomem, GPU_TIMESTAMP_OFFSET);
}

u64 panthor_gpu_get_cycle_count(struct panthor_device *ptdev)
{
- return gpu_read64_counter(ptdev->iomem, GPU_CYCLE_COUNT);
+ return gpu_read64_counter(ptdev->gpu->iomem, GPU_CYCLE_COUNT);
}

int panthor_gpu_coherency_init(struct panthor_device *ptdev)
@@ -460,7 +471,7 @@ int panthor_gpu_coherency_init(struct panthor_device *ptdev)
/* Check if the ACE-Lite coherency protocol is actually supported by the GPU.
* ACE protocol has never been supported for command stream frontend GPUs.
*/
- if ((gpu_read(ptdev->iomem, GPU_COHERENCY_FEATURES) &
+ if ((gpu_read(ptdev->gpu->iomem, GPU_COHERENCY_FEATURES) &
GPU_COHERENCY_PROT_BIT(ACE_LITE))) {
ptdev->gpu_info.selected_coherency = GPU_COHERENCY_ACE_LITE;
return 0;
diff --git a/drivers/gpu/drm/panthor/panthor_gpu_regs.h b/drivers/gpu/drm/panthor/panthor_gpu_regs.h
index 3f60c45985a7..4c5b953796e4 100644
--- a/drivers/gpu/drm/panthor/panthor_gpu_regs.h
+++ b/drivers/gpu/drm/panthor/panthor_gpu_regs.h
@@ -31,10 +31,6 @@
#define GPU_CSF_ID 0x1C

#define GPU_INT_BASE 0x20
-#define GPU_INT_RAWSTAT 0x20
-#define GPU_INT_CLEAR 0x24
-#define GPU_INT_MASK 0x28
-#define GPU_INT_STAT 0x2c
#define GPU_IRQ_FAULT BIT(0)
#define GPU_IRQ_PROTM_FAULT BIT(1)
#define GPU_IRQ_RESET_COMPLETED BIT(8)
diff --git a/drivers/gpu/drm/panthor/panthor_hw.c b/drivers/gpu/drm/panthor/panthor_hw.c
index 9431f16d950f..80aa151d5936 100644
--- a/drivers/gpu/drm/panthor/panthor_hw.c
+++ b/drivers/gpu/drm/panthor/panthor_hw.c
@@ -195,28 +195,30 @@ static int panthor_gpu_info_init(struct panthor_device *ptdev)
{
unsigned int i;

- ptdev->gpu_info.csf_id = gpu_read(ptdev->iomem, GPU_CSF_ID);
- ptdev->gpu_info.gpu_rev = gpu_read(ptdev->iomem, GPU_REVID);
- ptdev->gpu_info.core_features = gpu_read(ptdev->iomem, GPU_CORE_FEATURES);
- ptdev->gpu_info.l2_features = gpu_read(ptdev->iomem, GPU_L2_FEATURES);
- ptdev->gpu_info.tiler_features = gpu_read(ptdev->iomem, GPU_TILER_FEATURES);
- ptdev->gpu_info.mem_features = gpu_read(ptdev->iomem, GPU_MEM_FEATURES);
- ptdev->gpu_info.mmu_features = gpu_read(ptdev->iomem, GPU_MMU_FEATURES);
- ptdev->gpu_info.thread_features = gpu_read(ptdev->iomem, GPU_THREAD_FEATURES);
- ptdev->gpu_info.max_threads = gpu_read(ptdev->iomem, GPU_THREAD_MAX_THREADS);
+ void __iomem *gpu_iomem = ptdev->iomem + GPU_CONTROL_BASE;
+
+ ptdev->gpu_info.csf_id = gpu_read(gpu_iomem, GPU_CSF_ID);
+ ptdev->gpu_info.gpu_rev = gpu_read(gpu_iomem, GPU_REVID);
+ ptdev->gpu_info.core_features = gpu_read(gpu_iomem, GPU_CORE_FEATURES);
+ ptdev->gpu_info.l2_features = gpu_read(gpu_iomem, GPU_L2_FEATURES);
+ ptdev->gpu_info.tiler_features = gpu_read(gpu_iomem, GPU_TILER_FEATURES);
+ ptdev->gpu_info.mem_features = gpu_read(gpu_iomem, GPU_MEM_FEATURES);
+ ptdev->gpu_info.mmu_features = gpu_read(gpu_iomem, GPU_MMU_FEATURES);
+ ptdev->gpu_info.thread_features = gpu_read(gpu_iomem, GPU_THREAD_FEATURES);
+ ptdev->gpu_info.max_threads = gpu_read(gpu_iomem, GPU_THREAD_MAX_THREADS);
ptdev->gpu_info.thread_max_workgroup_size =
- gpu_read(ptdev->iomem, GPU_THREAD_MAX_WORKGROUP_SIZE);
+ gpu_read(gpu_iomem, GPU_THREAD_MAX_WORKGROUP_SIZE);
ptdev->gpu_info.thread_max_barrier_size =
- gpu_read(ptdev->iomem, GPU_THREAD_MAX_BARRIER_SIZE);
- ptdev->gpu_info.coherency_features = gpu_read(ptdev->iomem, GPU_COHERENCY_FEATURES);
+ gpu_read(gpu_iomem, GPU_THREAD_MAX_BARRIER_SIZE);
+ ptdev->gpu_info.coherency_features = gpu_read(gpu_iomem, GPU_COHERENCY_FEATURES);
for (i = 0; i < 4; i++)
ptdev->gpu_info.texture_features[i] =
- gpu_read(ptdev->iomem, GPU_TEXTURE_FEATURES(i));
+ gpu_read(gpu_iomem, GPU_TEXTURE_FEATURES(i));

- ptdev->gpu_info.as_present = gpu_read(ptdev->iomem, GPU_AS_PRESENT);
+ ptdev->gpu_info.as_present = gpu_read(gpu_iomem, GPU_AS_PRESENT);

/* Introduced in arch 11.x */
- ptdev->gpu_info.gpu_features = gpu_read64(ptdev->iomem, GPU_FEATURES);
+ ptdev->gpu_info.gpu_features = gpu_read64(gpu_iomem, GPU_FEATURES);

if (panthor_hw_has_pwr_ctrl(ptdev)) {
/* Introduced in arch 14.x */
@@ -224,9 +226,9 @@ static int panthor_gpu_info_init(struct panthor_device *ptdev)
ptdev->gpu_info.tiler_present = gpu_read64(ptdev->iomem, PWR_TILER_PRESENT);
ptdev->gpu_info.shader_present = gpu_read64(ptdev->iomem, PWR_SHADER_PRESENT);
} else {
- ptdev->gpu_info.shader_present = gpu_read64(ptdev->iomem, GPU_SHADER_PRESENT);
- ptdev->gpu_info.tiler_present = gpu_read64(ptdev->iomem, GPU_TILER_PRESENT);
- ptdev->gpu_info.l2_present = gpu_read64(ptdev->iomem, GPU_L2_PRESENT);
+ ptdev->gpu_info.shader_present = gpu_read64(gpu_iomem, GPU_SHADER_PRESENT);
+ ptdev->gpu_info.tiler_present = gpu_read64(gpu_iomem, GPU_TILER_PRESENT);
+ ptdev->gpu_info.l2_present = gpu_read64(gpu_iomem, GPU_L2_PRESENT);
}

return overload_shader_present(ptdev);
--
2.43.0