Re: [PATCH v2 11/16] coresight: Expose map arguments in trace ID API

From: Suzuki K Poulose
Date: Thu Jun 06 2024 - 09:50:27 EST


On 04/06/2024 15:30, James Clark wrote:
The trace ID API is currently hard coded to always use the global map.
Add public versions that allow the map to be passed in so that Perf
mode can use per-sink maps. Keep the non-map versions so that sysfs
mode can continue to use the default global map.

System ID functions are unchanged because they will always use the
default map.

Signed-off-by: James Clark <james.clark@xxxxxxx>
---
.../hwtracing/coresight/coresight-trace-id.c | 36 ++++++++++++++-----
.../hwtracing/coresight/coresight-trace-id.h | 20 +++++++++--
2 files changed, 45 insertions(+), 11 deletions(-)

diff --git a/drivers/hwtracing/coresight/coresight-trace-id.c b/drivers/hwtracing/coresight/coresight-trace-id.c
index 19005b5b4dc4..5561989a03fa 100644
--- a/drivers/hwtracing/coresight/coresight-trace-id.c
+++ b/drivers/hwtracing/coresight/coresight-trace-id.c
@@ -12,7 +12,7 @@
#include "coresight-trace-id.h"
-/* Default trace ID map. Used on systems that don't require per sink mappings */
+/* Default trace ID map. Used in sysfs mode and for system sources */
static struct coresight_trace_id_map id_map_default;
/* maintain a record of the mapping of IDs and pending releases per cpu */
@@ -47,7 +47,7 @@ static void coresight_trace_id_dump_table(struct coresight_trace_id_map *id_map,
#endif
/* unlocked read of current trace ID value for given CPU */
-static int _coresight_trace_id_read_cpu_id(int cpu)
+static int _coresight_trace_id_read_cpu_id(int cpu, struct coresight_trace_id_map *id_map)
{
return atomic_read(&per_cpu(cpu_id, cpu));
}
@@ -152,7 +152,7 @@ static void coresight_trace_id_release_all_pending(void)
DUMP_ID_MAP(id_map);
}
-static int coresight_trace_id_map_get_cpu_id(int cpu, struct coresight_trace_id_map *id_map)
+static int _coresight_trace_id_get_cpu_id(int cpu, struct coresight_trace_id_map *id_map)
{
unsigned long flags;
int id;
@@ -160,7 +160,7 @@ static int coresight_trace_id_map_get_cpu_id(int cpu, struct coresight_trace_id_
spin_lock_irqsave(&id_map_lock, flags);

Could we also reduce the contention on the id_map_lock, by moving the spinlock per map ? It can be a separate patch.

This patch as such looks good to me.

Suzuki


/* check for existing allocation for this CPU */
- id = _coresight_trace_id_read_cpu_id(cpu);
+ id = _coresight_trace_id_read_cpu_id(cpu, id_map);
if (id)
goto get_cpu_id_clr_pend;
@@ -196,13 +196,13 @@ static int coresight_trace_id_map_get_cpu_id(int cpu, struct coresight_trace_id_
return id;
}
-static void coresight_trace_id_map_put_cpu_id(int cpu, struct coresight_trace_id_map *id_map)
+static void _coresight_trace_id_put_cpu_id(int cpu, struct coresight_trace_id_map *id_map)
{
unsigned long flags;
int id;
/* check for existing allocation for this CPU */
- id = _coresight_trace_id_read_cpu_id(cpu);
+ id = _coresight_trace_id_read_cpu_id(cpu, id_map);
if (!id)
return;
@@ -254,22 +254,40 @@ static void coresight_trace_id_map_put_system_id(struct coresight_trace_id_map *
int coresight_trace_id_get_cpu_id(int cpu)
{
- return coresight_trace_id_map_get_cpu_id(cpu, &id_map_default);
+ return _coresight_trace_id_get_cpu_id(cpu, &id_map_default);
}
EXPORT_SYMBOL_GPL(coresight_trace_id_get_cpu_id);
+int coresight_trace_id_get_cpu_id_map(int cpu, struct coresight_trace_id_map *id_map)
+{
+ return _coresight_trace_id_get_cpu_id(cpu, id_map);
+}
+EXPORT_SYMBOL_GPL(coresight_trace_id_get_cpu_id_map);
+
void coresight_trace_id_put_cpu_id(int cpu)
{
- coresight_trace_id_map_put_cpu_id(cpu, &id_map_default);
+ _coresight_trace_id_put_cpu_id(cpu, &id_map_default);
}
EXPORT_SYMBOL_GPL(coresight_trace_id_put_cpu_id);
+void coresight_trace_id_put_cpu_id_map(int cpu, struct coresight_trace_id_map *id_map)
+{
+ _coresight_trace_id_put_cpu_id(cpu, id_map);
+}
+EXPORT_SYMBOL_GPL(coresight_trace_id_put_cpu_id_map);
+
int coresight_trace_id_read_cpu_id(int cpu)
{
- return _coresight_trace_id_read_cpu_id(cpu);
+ return _coresight_trace_id_read_cpu_id(cpu, &id_map_default);
}
EXPORT_SYMBOL_GPL(coresight_trace_id_read_cpu_id);
+int coresight_trace_id_read_cpu_id_map(int cpu, struct coresight_trace_id_map *id_map)
+{
+ return _coresight_trace_id_read_cpu_id(cpu, id_map);
+}
+EXPORT_SYMBOL_GPL(coresight_trace_id_read_cpu_id_map);
+
int coresight_trace_id_get_system_id(void)
{
return coresight_trace_id_map_get_system_id(&id_map_default);
diff --git a/drivers/hwtracing/coresight/coresight-trace-id.h b/drivers/hwtracing/coresight/coresight-trace-id.h
index 49438a96fcc6..840babdd0794 100644
--- a/drivers/hwtracing/coresight/coresight-trace-id.h
+++ b/drivers/hwtracing/coresight/coresight-trace-id.h
@@ -42,8 +42,6 @@
#define IS_VALID_CS_TRACE_ID(id) \
((id > CORESIGHT_TRACE_ID_RES_0) && (id < CORESIGHT_TRACE_ID_RES_TOP))
-/* Allocate and release IDs for a single default trace ID map */
-
/**
* Read and optionally allocate a CoreSight trace ID and associate with a CPU.
*
@@ -59,6 +57,12 @@
*/
int coresight_trace_id_get_cpu_id(int cpu);
+/**
+ * Version of coresight_trace_id_get_cpu_id() that allows the ID map to operate
+ * on to be provided.
+ */
+int coresight_trace_id_get_cpu_id_map(int cpu, struct coresight_trace_id_map *id_map);
+
/**
* Release an allocated trace ID associated with the CPU.
*
@@ -72,6 +76,12 @@ int coresight_trace_id_get_cpu_id(int cpu);
*/
void coresight_trace_id_put_cpu_id(int cpu);
+/**
+ * Version of coresight_trace_id_put_cpu_id() that allows the ID map to operate
+ * on to be provided.
+ */
+void coresight_trace_id_put_cpu_id_map(int cpu, struct coresight_trace_id_map *id_map);
+
/**
* Read the current allocated CoreSight Trace ID value for the CPU.
*
@@ -92,6 +102,12 @@ void coresight_trace_id_put_cpu_id(int cpu);
*/
int coresight_trace_id_read_cpu_id(int cpu);
+/**
+ * Version of coresight_trace_id_read_cpu_id() that allows the ID map to operate
+ * on to be provided.
+ */
+int coresight_trace_id_read_cpu_id_map(int cpu, struct coresight_trace_id_map *id_map);
+
/**
* Allocate a CoreSight trace ID for a system component.
*