[PATCH v2 2/3] drm/panthor: Add DRM fdinfo support

From: Adrián Larumbe
Date: Tue Apr 23 2024 - 17:34:01 EST


Drawing from the FW-calculated values in the previous commit, we can
increase the numbers for an open file by collecting them from finished jobs
when updating their group synchronisation objects.

Signed-off-by: Adrián Larumbe <adrian.larumbe@xxxxxxxxxxxxx>
---
drivers/gpu/drm/panthor/panthor_devfreq.c | 10 +++++
drivers/gpu/drm/panthor/panthor_device.h | 11 ++++++
drivers/gpu/drm/panthor/panthor_drv.c | 31 +++++++++++++++
drivers/gpu/drm/panthor/panthor_sched.c | 46 +++++++++++++++++++++++
4 files changed, 98 insertions(+)

diff --git a/drivers/gpu/drm/panthor/panthor_devfreq.c b/drivers/gpu/drm/panthor/panthor_devfreq.c
index c6d3c327cc24..5eededaeade7 100644
--- a/drivers/gpu/drm/panthor/panthor_devfreq.c
+++ b/drivers/gpu/drm/panthor/panthor_devfreq.c
@@ -91,6 +91,7 @@ static int panthor_devfreq_get_dev_status(struct device *dev,
spin_lock_irqsave(&pdevfreq->lock, irqflags);

panthor_devfreq_update_utilization(pdevfreq);
+ ptdev->current_frequency = status->current_frequency;

status->total_time = ktime_to_ns(ktime_add(pdevfreq->busy_time,
pdevfreq->idle_time));
@@ -130,6 +131,7 @@ int panthor_devfreq_init(struct panthor_device *ptdev)
struct panthor_devfreq *pdevfreq;
struct dev_pm_opp *opp;
unsigned long cur_freq;
+ unsigned long freq = ULONG_MAX;
int ret;

pdevfreq = drmm_kzalloc(&ptdev->base, sizeof(*ptdev->devfreq), GFP_KERNEL);
@@ -204,6 +206,14 @@ int panthor_devfreq_init(struct panthor_device *ptdev)

dev_pm_opp_put(opp);

+ /* Find the fastest defined rate */
+ opp = dev_pm_opp_find_freq_floor(dev, &freq);
+ if (IS_ERR(opp))
+ return PTR_ERR(opp);
+ ptdev->fast_rate = freq;
+
+ dev_pm_opp_put(opp);
+
/*
* Setup default thresholds for the simple_ondemand governor.
* The values are chosen based on experiments.
diff --git a/drivers/gpu/drm/panthor/panthor_device.h b/drivers/gpu/drm/panthor/panthor_device.h
index 2fdd671b38fd..b5b5dfe3cafe 100644
--- a/drivers/gpu/drm/panthor/panthor_device.h
+++ b/drivers/gpu/drm/panthor/panthor_device.h
@@ -162,6 +162,14 @@ struct panthor_device {
*/
struct page *dummy_latest_flush;
} pm;
+
+ unsigned long current_frequency;
+ unsigned long fast_rate;
+};
+
+struct panthor_gpu_usage {
+ u64 time;
+ u64 cycles;
};

/**
@@ -176,6 +184,9 @@ struct panthor_file {

/** @groups: Scheduling group pool attached to this file. */
struct panthor_group_pool *groups;
+
+ /** @stats: cycle and timestamp measures for job execution. */
+ struct panthor_gpu_usage stats;
};

int panthor_device_init(struct panthor_device *ptdev);
diff --git a/drivers/gpu/drm/panthor/panthor_drv.c b/drivers/gpu/drm/panthor/panthor_drv.c
index b8a84f26b3ef..6d25385e02a1 100644
--- a/drivers/gpu/drm/panthor/panthor_drv.c
+++ b/drivers/gpu/drm/panthor/panthor_drv.c
@@ -3,12 +3,17 @@
/* Copyright 2019 Linaro, Ltd., Rob Herring <robh@xxxxxxxxxx> */
/* Copyright 2019 Collabora ltd. */

+#ifdef CONFIG_ARM_ARCH_TIMER
+#include <asm/arch_timer.h>
+#endif
+
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/pagemap.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/time64.h>

#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
@@ -1351,6 +1356,30 @@ static int panthor_mmap(struct file *filp, struct vm_area_struct *vma)
return ret;
}

+static void panthor_gpu_show_fdinfo(struct panthor_device *ptdev,
+ struct panthor_file *pfile,
+ struct drm_printer *p)
+{
+#ifdef CONFIG_ARM_ARCH_TIMER
+ drm_printf(p, "drm-engine-panthor:\t%llu ns\n",
+ DIV_ROUND_UP_ULL((pfile->stats.time * NSEC_PER_SEC),
+ arch_timer_get_cntfrq()));
+#endif
+ drm_printf(p, "drm-cycles-panthor:\t%llu\n", pfile->stats.cycles);
+ drm_printf(p, "drm-maxfreq-panthor:\t%lu Hz\n", ptdev->fast_rate);
+ drm_printf(p, "drm-curfreq-panthor:\t%lu Hz\n", ptdev->current_frequency);
+}
+
+static void panthor_show_fdinfo(struct drm_printer *p, struct drm_file *file)
+{
+ struct drm_device *dev = file->minor->dev;
+ struct panthor_device *ptdev = container_of(dev, struct panthor_device, base);
+
+ panthor_gpu_show_fdinfo(ptdev, file->driver_priv, p);
+
+ drm_show_memory_stats(p, file);
+}
+
static const struct file_operations panthor_drm_driver_fops = {
.open = drm_open,
.release = drm_release,
@@ -1360,6 +1389,7 @@ static const struct file_operations panthor_drm_driver_fops = {
.read = drm_read,
.llseek = noop_llseek,
.mmap = panthor_mmap,
+ .show_fdinfo = drm_show_fdinfo,
};

#ifdef CONFIG_DEBUG_FS
@@ -1378,6 +1408,7 @@ static const struct drm_driver panthor_drm_driver = {
DRIVER_SYNCOBJ_TIMELINE | DRIVER_GEM_GPUVA,
.open = panthor_open,
.postclose = panthor_postclose,
+ .show_fdinfo = panthor_show_fdinfo,
.ioctls = panthor_drm_driver_ioctls,
.num_ioctls = ARRAY_SIZE(panthor_drm_driver_ioctls),
.fops = &panthor_drm_driver_fops,
diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
index 320dfa0388ba..9f1810f5cf4b 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.c
+++ b/drivers/gpu/drm/panthor/panthor_sched.c
@@ -598,6 +598,18 @@ struct panthor_group {
size_t times_offset;
} syncobjs;

+ /** @fdinfo: Per-file total cycle and timestamp values reference. */
+ struct {
+ /** @data: Pointer to actual per-file sample data. */
+ struct panthor_gpu_usage *data;
+
+ /**
+ * @lock: Mutex to govern concurrent access from drm file's fdinfo callback
+ * and job post-completion processing function
+ */
+ struct mutex lock;
+ } fdinfo;
+
/** @state: Group state. */
enum panthor_group_state state;

@@ -859,6 +871,8 @@ static void group_release_work(struct work_struct *work)
struct panthor_device *ptdev = group->ptdev;
u32 i;

+ mutex_destroy(&group->fdinfo.lock);
+
for (i = 0; i < group->queue_count; i++)
group_free_queue(group, group->queues[i]);

@@ -2741,6 +2755,30 @@ void panthor_sched_post_reset(struct panthor_device *ptdev)
sched_queue_work(sched, sync_upd);
}

+static void update_fdinfo_stats(struct panthor_job *job)
+{
+ struct panthor_group *group = job->group;
+ struct panthor_queue *queue = group->queues[job->queue_idx];
+ struct panthor_device *ptdev = group->ptdev;
+ struct panthor_gpu_usage *fdinfo;
+ struct panthor_job_times *times;
+
+ drm_WARN_ON(&ptdev->base, job->ringbuf_idx >=
+ panthor_kernel_bo_size(queue->ringbuf) / (SLOTSIZE));
+
+ times = (struct panthor_job_times *)
+ ((unsigned long)group->syncobjs.bo->kmap + queue->time_offset +
+ (job->ringbuf_idx * sizeof(struct panthor_job_times)));
+
+ mutex_lock(&group->fdinfo.lock);
+ if ((group->fdinfo.data)) {
+ fdinfo = group->fdinfo.data;
+ fdinfo->cycles += times->cycles.after - times->cycles.before;
+ fdinfo->time += times->time.after - times->time.before;
+ }
+ mutex_unlock(&group->fdinfo.lock);
+}
+
static void group_sync_upd_work(struct work_struct *work)
{
struct panthor_group *group =
@@ -2776,6 +2814,7 @@ static void group_sync_upd_work(struct work_struct *work)
dma_fence_end_signalling(cookie);

list_for_each_entry_safe(job, job_tmp, &done_jobs, node) {
+ update_fdinfo_stats(job);
list_del_init(&job->node);
panthor_job_put(&job->base);
}
@@ -3240,6 +3279,9 @@ int panthor_group_create(struct panthor_file *pfile,
}
mutex_unlock(&sched->reset.lock);

+ group->fdinfo.data = &pfile->stats;
+ mutex_init(&group->fdinfo.lock);
+
return gid;

err_put_group:
@@ -3279,6 +3321,10 @@ int panthor_group_destroy(struct panthor_file *pfile, u32 group_handle)
mutex_unlock(&sched->lock);
mutex_unlock(&sched->reset.lock);

+ mutex_lock(&group->fdinfo.lock);
+ group->fdinfo.data = NULL;
+ mutex_unlock(&group->fdinfo.lock);
+
group_put(group);
return 0;
}
--
2.44.0