Current code syncs the buffer range is [offset, offset+len), it doesn't
consider the case when the trace data is wrapped around, in this case
'offset+len' is bigger than 'etr_buf->size'. Thus it syncs buffer out
of the memory buffer, and it also misses to sync buffer from the start
of the memory.
This patch corrects the memory sync ranges, when detects the wrapping
around case, it splits into two chunks: one chunk is the tail of the
buffer and another chunk is from the start of the buffer after wrapping
around.
Signed-off-by: Leo Yan <leo.yan@xxxxxxxxxx>
---
.../hwtracing/coresight/coresight-tmc-etr.c | 18 +++++++++++++++++-
1 file changed, 17 insertions(+), 1 deletion(-)
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index 888b0f929d33..a1afefcbf175 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -780,7 +780,23 @@ static void tmc_etr_sync_sg_buf(struct etr_buf *etr_buf, u64 rrp, u64 rwp)
else
etr_buf->len = ((w_offset < r_offset) ? etr_buf->size : 0) +
w_offset - r_offset;
- tmc_sg_table_sync_data_range(table, r_offset, etr_buf->len);
+
+ if (r_offset + etr_buf->len > etr_buf->size) {
+ int len1, len2;
+
+ /*
+ * If trace data is wrapped around, sync AUX bounce buffer
+ * for two chunks: "len1" is for the trace date length at
+ * the tail of bounce buffer, and "len2" is the length from
+ * the start of the buffer after wrapping around.
+ */
+ len1 = etr_buf->size - r_offset;
+ len2 = etr_buf->len - len1;
+ tmc_sg_table_sync_data_range(table, r_offset, len1);
+ tmc_sg_table_sync_data_range(table, 0, len2);
+ } else {
+ tmc_sg_table_sync_data_range(table, r_offset, etr_buf->len);
+ }
}
static const struct etr_buf_operations etr_sg_buf_ops = {