[PATCH V7 11/25] perf auxtrace: Add helpers for queuing AUX area tracing data
From: Adrian Hunter
Date: Tue Mar 31 2015 - 07:46:41 EST
Provide functions to queue AUX area tracing data
buffers for processing. A AUX area decoder
need not use the queues, however Intel BTS and Intel PT
will use them.
There is one queue for each of the mmap buffers that
were used for recording. Because those mmaps were
associated with per-cpu or per-thread contexts, the
data is time-ordered with respect to those contexts.
Signed-off-by: Adrian Hunter <adrian.hunter@xxxxxxxxx>
---
tools/perf/util/auxtrace.c | 305 +++++++++++++++++++++++++++++++++++++++++++++
tools/perf/util/auxtrace.h | 87 +++++++++++++
2 files changed, 392 insertions(+)
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index a1f0eef..afcb09d 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -22,11 +22,15 @@
#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/log2.h>
+#include <linux/string.h>
+#include <sys/param.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
+#include <limits.h>
#include <errno.h>
+#include <linux/list.h>
#include "../perf.h"
#include "util.h"
@@ -117,6 +121,241 @@ void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
}
}
+#define AUXTRACE_INIT_NR_QUEUES 32
+
+static struct auxtrace_queue *auxtrace_alloc_queue_array(unsigned int nr_queues)
+{
+ struct auxtrace_queue *queue_array;
+ unsigned int max_nr_queues, i;
+
+ max_nr_queues = MIN(UINT_MAX, SIZE_MAX) / sizeof(struct auxtrace_queue);
+ if (nr_queues > max_nr_queues)
+ return NULL;
+
+ queue_array = calloc(nr_queues, sizeof(struct auxtrace_queue));
+ if (!queue_array)
+ return NULL;
+
+ for (i = 0; i < nr_queues; i++) {
+ INIT_LIST_HEAD(&queue_array[i].head);
+ queue_array[i].priv = NULL;
+ }
+
+ return queue_array;
+}
+
+int auxtrace_queues__init(struct auxtrace_queues *queues)
+{
+ queues->nr_queues = AUXTRACE_INIT_NR_QUEUES;
+ queues->queue_array = auxtrace_alloc_queue_array(queues->nr_queues);
+ if (!queues->queue_array)
+ return -ENOMEM;
+ return 0;
+}
+
+static int auxtrace_queues__grow(struct auxtrace_queues *queues,
+ unsigned int new_nr_queues)
+{
+ unsigned int nr_queues = queues->nr_queues;
+ struct auxtrace_queue *queue_array;
+ unsigned int i;
+
+ if (!nr_queues)
+ nr_queues = AUXTRACE_INIT_NR_QUEUES;
+
+ while (nr_queues && nr_queues < new_nr_queues)
+ nr_queues <<= 1;
+
+ if (nr_queues < queues->nr_queues || nr_queues < new_nr_queues)
+ return -EINVAL;
+
+ queue_array = auxtrace_alloc_queue_array(nr_queues);
+ if (!queue_array)
+ return -ENOMEM;
+
+ for (i = 0; i < queues->nr_queues; i++) {
+ list_splice_tail(&queues->queue_array[i].head,
+ &queue_array[i].head);
+ queue_array[i].priv = queues->queue_array[i].priv;
+ }
+
+ queues->nr_queues = nr_queues;
+ queues->queue_array = queue_array;
+
+ return 0;
+}
+
+static void *auxtrace_copy_data(u64 size, struct perf_session *session)
+{
+ int fd = perf_data_file__fd(session->file);
+ void *p;
+ ssize_t ret;
+
+ if (size > SSIZE_MAX)
+ return NULL;
+
+ p = malloc(size);
+ if (!p)
+ return NULL;
+
+ ret = readn(fd, p, size);
+ if (ret != (ssize_t)size) {
+ free(p);
+ return NULL;
+ }
+
+ return p;
+}
+
+static int auxtrace_queues__add_buffer(struct auxtrace_queues *queues,
+ unsigned int idx,
+ struct auxtrace_buffer *buffer)
+{
+ struct auxtrace_queue *queue;
+ int err;
+
+ if (idx >= queues->nr_queues) {
+ err = auxtrace_queues__grow(queues, idx + 1);
+ if (err)
+ return err;
+ }
+
+ queue = &queues->queue_array[idx];
+
+ if (!queue->set) {
+ queue->set = true;
+ queue->tid = buffer->tid;
+ queue->cpu = buffer->cpu;
+ } else if (buffer->cpu != queue->cpu || buffer->tid != queue->tid) {
+ pr_err("auxtrace queue conflict: cpu %d, tid %d vs cpu %d, tid %d\n",
+ queue->cpu, queue->tid, buffer->cpu, buffer->tid);
+ return -EINVAL;
+ }
+
+ buffer->buffer_nr = queues->next_buffer_nr++;
+
+ list_add_tail(&buffer->list, &queue->head);
+
+ queues->new_data = true;
+ queues->populated = true;
+
+ return 0;
+}
+
+/* Limit buffers to 32MiB on 32-bit */
+#define BUFFER_LIMIT_FOR_32_BIT (32 * 1024 * 1024)
+
+static int auxtrace_queues__split_buffer(struct auxtrace_queues *queues,
+ unsigned int idx,
+ struct auxtrace_buffer *buffer)
+{
+ u64 sz = buffer->size;
+ bool consecutive = false;
+ struct auxtrace_buffer *b;
+ int err;
+
+ while (sz > BUFFER_LIMIT_FOR_32_BIT) {
+ b = memdup(buffer, sizeof(struct auxtrace_buffer));
+ if (!b)
+ return -ENOMEM;
+ b->size = BUFFER_LIMIT_FOR_32_BIT;
+ b->consecutive = consecutive;
+ err = auxtrace_queues__add_buffer(queues, idx, b);
+ if (err) {
+ auxtrace_buffer__free(b);
+ return err;
+ }
+ buffer->data_offset += BUFFER_LIMIT_FOR_32_BIT;
+ sz -= BUFFER_LIMIT_FOR_32_BIT;
+ consecutive = true;
+ }
+
+ buffer->size = sz;
+ buffer->consecutive = consecutive;
+
+ return 0;
+}
+
+static int auxtrace_queues__add_event_buffer(struct auxtrace_queues *queues,
+ struct perf_session *session,
+ unsigned int idx,
+ struct auxtrace_buffer *buffer)
+{
+ if (session->one_mmap) {
+ buffer->data = buffer->data_offset - session->one_mmap_offset +
+ session->one_mmap_addr;
+ } else if (perf_data_file__is_pipe(session->file)) {
+ buffer->data = auxtrace_copy_data(buffer->size, session);
+ if (!buffer->data)
+ return -ENOMEM;
+ buffer->data_needs_freeing = true;
+ } else if (BITS_PER_LONG == 32 &&
+ buffer->size > BUFFER_LIMIT_FOR_32_BIT) {
+ int err;
+
+ err = auxtrace_queues__split_buffer(queues, idx, buffer);
+ if (err)
+ return err;
+ }
+
+ return auxtrace_queues__add_buffer(queues, idx, buffer);
+}
+
+int auxtrace_queues__add_event(struct auxtrace_queues *queues,
+ struct perf_session *session,
+ union perf_event *event, off_t data_offset,
+ struct auxtrace_buffer **buffer_ptr)
+{
+ struct auxtrace_buffer *buffer;
+ unsigned int idx;
+ int err;
+
+ buffer = zalloc(sizeof(struct auxtrace_buffer));
+ if (!buffer)
+ return -ENOMEM;
+
+ buffer->pid = -1;
+ buffer->tid = event->auxtrace.tid;
+ buffer->cpu = event->auxtrace.cpu;
+ buffer->data_offset = data_offset;
+ buffer->offset = event->auxtrace.offset;
+ buffer->reference = event->auxtrace.reference;
+ buffer->size = event->auxtrace.size;
+ idx = event->auxtrace.idx;
+
+ err = auxtrace_queues__add_event_buffer(queues, session, idx, buffer);
+ if (err)
+ goto out_err;
+
+ if (buffer_ptr)
+ *buffer_ptr = buffer;
+
+ return 0;
+
+out_err:
+ auxtrace_buffer__free(buffer);
+ return err;
+}
+
+void auxtrace_queues__free(struct auxtrace_queues *queues)
+{
+ unsigned int i;
+
+ for (i = 0; i < queues->nr_queues; i++) {
+ while (!list_empty(&queues->queue_array[i].head)) {
+ struct auxtrace_buffer *buffer;
+
+ buffer = list_entry(queues->queue_array[i].head.next,
+ struct auxtrace_buffer, list);
+ list_del(&buffer->list);
+ auxtrace_buffer__free(buffer);
+ }
+ }
+
+ zfree(&queues->queue_array);
+ queues->nr_queues = 0;
+}
+
size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr)
{
if (itr)
@@ -169,6 +408,72 @@ auxtrace_record__init(struct perf_evlist *evlist __maybe_unused, int *err)
return NULL;
}
+struct auxtrace_buffer *auxtrace_buffer__next(struct auxtrace_queue *queue,
+ struct auxtrace_buffer *buffer)
+{
+ if (buffer) {
+ if (list_is_last(&buffer->list, &queue->head))
+ return NULL;
+ return list_entry(buffer->list.next, struct auxtrace_buffer,
+ list);
+ } else {
+ if (list_empty(&queue->head))
+ return NULL;
+ return list_entry(queue->head.next, struct auxtrace_buffer,
+ list);
+ }
+}
+
+void *auxtrace_buffer__get_data(struct auxtrace_buffer *buffer, int fd)
+{
+ size_t adj = buffer->data_offset & (page_size - 1);
+ size_t size = buffer->size + adj;
+ off_t file_offset = buffer->data_offset - adj;
+ void *addr;
+
+ if (buffer->data)
+ return buffer->data;
+
+ addr = mmap(NULL, size, PROT_READ, MAP_SHARED, fd, file_offset);
+ if (addr == MAP_FAILED)
+ return NULL;
+
+ buffer->mmap_addr = addr;
+ buffer->mmap_size = size;
+
+ buffer->data = addr + adj;
+
+ return buffer->data;
+}
+
+void auxtrace_buffer__put_data(struct auxtrace_buffer *buffer)
+{
+ if (!buffer->data || !buffer->mmap_addr)
+ return;
+ munmap(buffer->mmap_addr, buffer->mmap_size);
+ buffer->mmap_addr = NULL;
+ buffer->mmap_size = 0;
+ buffer->data = NULL;
+ buffer->use_data = NULL;
+}
+
+void auxtrace_buffer__drop_data(struct auxtrace_buffer *buffer)
+{
+ auxtrace_buffer__put_data(buffer);
+ if (buffer->data_needs_freeing) {
+ buffer->data_needs_freeing = false;
+ zfree(&buffer->data);
+ buffer->use_data = NULL;
+ buffer->size = 0;
+ }
+}
+
+void auxtrace_buffer__free(struct auxtrace_buffer *buffer)
+{
+ auxtrace_buffer__drop_data(buffer);
+ free(buffer);
+}
+
void auxtrace_synth_error(struct auxtrace_error_event *auxtrace_error, int type,
int code, int cpu, pid_t pid, pid_t tid, u64 ip,
const char *msg)
diff --git a/tools/perf/util/auxtrace.h b/tools/perf/util/auxtrace.h
index a7dfb97..8849bc0 100644
--- a/tools/perf/util/auxtrace.h
+++ b/tools/perf/util/auxtrace.h
@@ -19,6 +19,7 @@
#include <sys/types.h>
#include <stdbool.h>
#include <stddef.h>
+#include <linux/list.h>
#include <linux/perf_event.h>
#include <linux/types.h>
@@ -93,6 +94,80 @@ struct auxtrace {
};
/**
+ * struct auxtrace_buffer - a buffer containing AUX area tracing data.
+ * @list: buffers are queued in a list held by struct auxtrace_queue
+ * @size: size of the buffer in bytes
+ * @pid: in per-thread mode, the pid this buffer is associated with
+ * @tid: in per-thread mode, the tid this buffer is associated with
+ * @cpu: in per-cpu mode, the cpu this buffer is associated with
+ * @data: actual buffer data (can be null if the data has not been loaded)
+ * @data_offset: file offset at which the buffer can be read
+ * @mmap_addr: mmap address at which the buffer can be read
+ * @mmap_size: size of the mmap at @mmap_addr
+ * @data_needs_freeing: @data was malloc'd so free it when it is no longer
+ * needed
+ * @consecutive: the original data was split up and this buffer is consecutive
+ * to the previous buffer
+ * @offset: offset as determined by aux_head / aux_tail members of struct
+ * perf_event_mmap_page
+ * @reference: an implementation-specific reference determined when the data is
+ * recorded
+ * @buffer_nr: used to number each buffer
+ * @use_size: implementation actually only uses this number of bytes
+ * @use_data: implementation actually only uses data starting at this address
+ */
+struct auxtrace_buffer {
+ struct list_head list;
+ size_t size;
+ pid_t pid;
+ pid_t tid;
+ int cpu;
+ void *data;
+ off_t data_offset;
+ void *mmap_addr;
+ size_t mmap_size;
+ bool data_needs_freeing;
+ bool consecutive;
+ u64 offset;
+ u64 reference;
+ u64 buffer_nr;
+ size_t use_size;
+ void *use_data;
+};
+
+/**
+ * struct auxtrace_queue - a queue of AUX area tracing data buffers.
+ * @head: head of buffer list
+ * @tid: in per-thread mode, the tid this queue is associated with
+ * @cpu: in per-cpu mode, the cpu this queue is associated with
+ * @set: %true once this queue has been dedicated to a specific thread or cpu
+ * @priv: implementation-specific data
+ */
+struct auxtrace_queue {
+ struct list_head head;
+ pid_t tid;
+ int cpu;
+ bool set;
+ void *priv;
+};
+
+/**
+ * struct auxtrace_queues - an array of AUX area tracing queues.
+ * @queue_array: array of queues
+ * @nr_queues: number of queues
+ * @new_data: set whenever new data is queued
+ * @populated: queues have been fully populated using the auxtrace_index
+ * @next_buffer_nr: used to number each buffer
+ */
+struct auxtrace_queues {
+ struct auxtrace_queue *queue_array;
+ unsigned int nr_queues;
+ bool new_data;
+ bool populated;
+ u64 next_buffer_nr;
+};
+
+/**
* struct auxtrace_mmap - records an mmap of the auxtrace buffer.
* @base: address of mapped area
* @userpg: pointer to buffer's perf_event_mmap_page
@@ -190,6 +265,18 @@ typedef int (*process_auxtrace_t)(struct perf_tool *tool,
int auxtrace_mmap__read(struct auxtrace_mmap *mm, struct auxtrace_record *itr,
struct perf_tool *tool, process_auxtrace_t fn);
+int auxtrace_queues__init(struct auxtrace_queues *queues);
+int auxtrace_queues__add_event(struct auxtrace_queues *queues,
+ struct perf_session *session,
+ union perf_event *event, off_t data_offset,
+ struct auxtrace_buffer **buffer_ptr);
+void auxtrace_queues__free(struct auxtrace_queues *queues);
+struct auxtrace_buffer *auxtrace_buffer__next(struct auxtrace_queue *queue,
+ struct auxtrace_buffer *buffer);
+void *auxtrace_buffer__get_data(struct auxtrace_buffer *buffer, int fd);
+void auxtrace_buffer__put_data(struct auxtrace_buffer *buffer);
+void auxtrace_buffer__drop_data(struct auxtrace_buffer *buffer);
+void auxtrace_buffer__free(struct auxtrace_buffer *buffer);
struct auxtrace_record *auxtrace_record__init(struct perf_evlist *evlist,
int *err);
--
1.9.1
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/