[PATCH v5 02/50] libperf: Lazily allocate/size mmap event copy
From: Ian Rogers
Date: Mon Nov 27 2023 - 17:09:34 EST
The event copy in the mmap is used to have storage to read an
event. Not all users of mmaps read the events, such as perf
record. The amount of buffer was also statically set to
PERF_SAMPLE_MAX_SIZE rather than the amount necessary from the
header's event size. Switch to a model where the event_copy is
reallocated if too small to the event's size. This adds the potential
for the event to move, so if a copy of the event pointer were stored
it could be broken. All the current users do:
while(event = perf_mmap__read_event()) { ... }
and so they would be broken due to the event being overwritten if they
had stored the pointer. Manual inspection and address sanitizer
testing also shows the event pointer not being stored.
Signed-off-by: Ian Rogers <irogers@xxxxxxxxxx>
---
tools/lib/perf/include/internal/mmap.h | 3 ++-
tools/lib/perf/mmap.c | 21 ++++++++++++++++++---
2 files changed, 20 insertions(+), 4 deletions(-)
diff --git a/tools/lib/perf/include/internal/mmap.h b/tools/lib/perf/include/internal/mmap.h
index 5a062af8e9d8..5f08cab61ece 100644
--- a/tools/lib/perf/include/internal/mmap.h
+++ b/tools/lib/perf/include/internal/mmap.h
@@ -33,7 +33,8 @@ struct perf_mmap {
bool overwrite;
u64 flush;
libperf_unmap_cb_t unmap_cb;
- char event_copy[PERF_SAMPLE_MAX_SIZE] __aligned(8);
+ void *event_copy;
+ size_t event_copy_sz;
struct perf_mmap *next;
};
diff --git a/tools/lib/perf/mmap.c b/tools/lib/perf/mmap.c
index 2184814b37dd..c829db7bf1fa 100644
--- a/tools/lib/perf/mmap.c
+++ b/tools/lib/perf/mmap.c
@@ -19,6 +19,7 @@
void perf_mmap__init(struct perf_mmap *map, struct perf_mmap *prev,
bool overwrite, libperf_unmap_cb_t unmap_cb)
{
+ /* Assume fields were zero initialized. */
map->fd = -1;
map->overwrite = overwrite;
map->unmap_cb = unmap_cb;
@@ -51,13 +52,19 @@ int perf_mmap__mmap(struct perf_mmap *map, struct perf_mmap_param *mp,
void perf_mmap__munmap(struct perf_mmap *map)
{
- if (map && map->base != NULL) {
+ if (!map)
+ return;
+
+ free(map->event_copy);
+ map->event_copy = NULL;
+ map->event_copy_sz = 0;
+ if (map->base) {
munmap(map->base, perf_mmap__mmap_len(map));
map->base = NULL;
map->fd = -1;
refcount_set(&map->refcnt, 0);
}
- if (map && map->unmap_cb)
+ if (map->unmap_cb)
map->unmap_cb(map);
}
@@ -223,9 +230,17 @@ static union perf_event *perf_mmap__read(struct perf_mmap *map,
*/
if ((*startp & map->mask) + size != ((*startp + size) & map->mask)) {
unsigned int offset = *startp;
- unsigned int len = min(sizeof(*event), size), cpy;
+ unsigned int len = size, cpy;
void *dst = map->event_copy;
+ if (size > map->event_copy_sz) {
+ dst = realloc(map->event_copy, size);
+ if (!dst)
+ return NULL;
+ map->event_copy = dst;
+ map->event_copy_sz = size;
+ }
+
do {
cpy = min(map->mask + 1 - (offset & map->mask), len);
memcpy(dst, &data[offset & map->mask], cpy);
--
2.43.0.rc1.413.gea7ed67945-goog