Re: [PATCH v2] libperf evsel: Make use of FD robust.
From: Jiri Olsa
Date: Thu Sep 16 2021 - 04:04:00 EST
On Wed, Sep 15, 2021 at 11:22:39PM -0700, Ian Rogers wrote:
SNIP
> v2. is a rebase.
>
> Signed-off-by: Ian Rogers <irogers@xxxxxxxxxx>
> ---
> tools/lib/perf/evsel.c | 64 +++++++++++++++++++++++++++---------------
> 1 file changed, 41 insertions(+), 23 deletions(-)
>
> diff --git a/tools/lib/perf/evsel.c b/tools/lib/perf/evsel.c
> index d8886720e83d..ede7af4d183c 100644
> --- a/tools/lib/perf/evsel.c
> +++ b/tools/lib/perf/evsel.c
> @@ -43,7 +43,7 @@ void perf_evsel__delete(struct perf_evsel *evsel)
> free(evsel);
> }
>
> -#define FD(e, x, y) (*(int *) xyarray__entry(e->fd, x, y))
> +#define FD(e, x, y) (int *) xyarray__entry(e->fd, x, y)
> #define MMAP(e, x, y) (e->mmap ? ((struct perf_mmap *) xyarray__entry(e->mmap, x, y)) : NULL)
>
> int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
> @@ -54,7 +54,10 @@ int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
> int cpu, thread;
> for (cpu = 0; cpu < ncpus; cpu++) {
> for (thread = 0; thread < nthreads; thread++) {
> - FD(evsel, cpu, thread) = -1;
> + int *fd = FD(evsel, cpu, thread);
> +
> + if (fd)
> + *fd = -1;
so it's the perf_thread open that uses empty cpus and evlist__close
will have more cpus to work on
I'd expect some symetric fix on close path, but evlist__close is
generic, so it's probably the best fix ;-)
Acked-by: Jiri Olsa <jolsa@xxxxxxxxxx>
thanks,
jirka
> }
> }
> }
> @@ -80,7 +83,7 @@ sys_perf_event_open(struct perf_event_attr *attr,
> static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread, int *group_fd)
> {
> struct perf_evsel *leader = evsel->leader;
> - int fd;
> + int *fd;
>
> if (evsel == leader) {
> *group_fd = -1;
> @@ -95,10 +98,10 @@ static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread, int *grou
> return -ENOTCONN;
>
> fd = FD(leader, cpu, thread);
> - if (fd == -1)
> + if (fd == NULL || *fd == -1)
> return -EBADF;
>
> - *group_fd = fd;
> + *group_fd = *fd;
>
> return 0;
> }
> @@ -138,7 +141,11 @@ int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
>
> for (cpu = 0; cpu < cpus->nr; cpu++) {
> for (thread = 0; thread < threads->nr; thread++) {
> - int fd, group_fd;
> + int fd, group_fd, *evsel_fd;
> +
> + evsel_fd = FD(evsel, cpu, thread);
> + if (evsel_fd == NULL)
> + return -EINVAL;
>
> err = get_group_fd(evsel, cpu, thread, &group_fd);
> if (err < 0)
> @@ -151,7 +158,7 @@ int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
> if (fd < 0)
> return -errno;
>
> - FD(evsel, cpu, thread) = fd;
> + *evsel_fd = fd;
> }
> }
>
> @@ -163,9 +170,12 @@ static void perf_evsel__close_fd_cpu(struct perf_evsel *evsel, int cpu)
> int thread;
>
> for (thread = 0; thread < xyarray__max_y(evsel->fd); ++thread) {
> - if (FD(evsel, cpu, thread) >= 0)
> - close(FD(evsel, cpu, thread));
> - FD(evsel, cpu, thread) = -1;
> + int *fd = FD(evsel, cpu, thread);
> +
> + if (fd && *fd >= 0) {
> + close(*fd);
> + *fd = -1;
> + }
> }
> }
>
> @@ -209,13 +219,12 @@ void perf_evsel__munmap(struct perf_evsel *evsel)
>
> for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) {
> for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
> - int fd = FD(evsel, cpu, thread);
> - struct perf_mmap *map = MMAP(evsel, cpu, thread);
> + int *fd = FD(evsel, cpu, thread);
>
> - if (fd < 0)
> + if (fd == NULL || *fd < 0)
> continue;
>
> - perf_mmap__munmap(map);
> + perf_mmap__munmap(MMAP(evsel, cpu, thread));
> }
> }
>
> @@ -239,15 +248,16 @@ int perf_evsel__mmap(struct perf_evsel *evsel, int pages)
>
> for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) {
> for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
> - int fd = FD(evsel, cpu, thread);
> - struct perf_mmap *map = MMAP(evsel, cpu, thread);
> + int *fd = FD(evsel, cpu, thread);
> + struct perf_mmap *map;
>
> - if (fd < 0)
> + if (fd == NULL || *fd < 0)
> continue;
>
> + map = MMAP(evsel, cpu, thread);
> perf_mmap__init(map, NULL, false, NULL);
>
> - ret = perf_mmap__mmap(map, &mp, fd, cpu);
> + ret = perf_mmap__mmap(map, &mp, *fd, cpu);
> if (ret) {
> perf_evsel__munmap(evsel);
> return ret;
> @@ -260,7 +270,9 @@ int perf_evsel__mmap(struct perf_evsel *evsel, int pages)
>
> void *perf_evsel__mmap_base(struct perf_evsel *evsel, int cpu, int thread)
> {
> - if (FD(evsel, cpu, thread) < 0 || MMAP(evsel, cpu, thread) == NULL)
> + int *fd = FD(evsel, cpu, thread);
> +
> + if (fd == NULL || *fd < 0 || MMAP(evsel, cpu, thread) == NULL)
> return NULL;
>
> return MMAP(evsel, cpu, thread)->base;
> @@ -295,17 +307,18 @@ int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
> struct perf_counts_values *count)
> {
> size_t size = perf_evsel__read_size(evsel);
> + int *fd = FD(evsel, cpu, thread);
>
> memset(count, 0, sizeof(*count));
>
> - if (FD(evsel, cpu, thread) < 0)
> + if (fd == NULL || *fd < 0)
> return -EINVAL;
>
> if (MMAP(evsel, cpu, thread) &&
> !perf_mmap__read_self(MMAP(evsel, cpu, thread), count))
> return 0;
>
> - if (readn(FD(evsel, cpu, thread), count->values, size) <= 0)
> + if (readn(*fd, count->values, size) <= 0)
> return -errno;
>
> return 0;
> @@ -318,8 +331,13 @@ static int perf_evsel__run_ioctl(struct perf_evsel *evsel,
> int thread;
>
> for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
> - int fd = FD(evsel, cpu, thread),
> - err = ioctl(fd, ioc, arg);
> + int err;
> + int *fd = FD(evsel, cpu, thread);
> +
> + if (fd || *fd < 0)
> + return -1;
> +
> + err = ioctl(*fd, ioc, arg);
>
> if (err)
> return err;
> --
> 2.33.0.309.g3052b89438-goog
>