[RFC/PATCH 17/38] perf tools: Maintain map groups list in a leader thread
From: Namhyung Kim
Date: Fri Oct 02 2015 - 01:27:59 EST
To support multi-threaded perf report, we need to maintain time-sorted
map groups. Add ->mg_list member to struct thread and sort the list
by time. Now leader threads have one more refcnt for map groups in
the list so also update the thread-mg-share test case.
Currently only add a new map groups when an exec (comm) event is
received.
Cc: Frederic Weisbecker <fweisbec@xxxxxxxxx>
Signed-off-by: Namhyung Kim <namhyung@xxxxxxxxxx>
---
tools/perf/tests/thread-mg-share.c | 7 ++-
tools/perf/util/event.c | 2 +
tools/perf/util/machine.c | 15 ++++-
tools/perf/util/map.c | 3 +
tools/perf/util/map.h | 2 +
tools/perf/util/thread.c | 111 ++++++++++++++++++++++++++++++++++++-
tools/perf/util/thread.h | 3 +
7 files changed, 138 insertions(+), 5 deletions(-)
diff --git a/tools/perf/tests/thread-mg-share.c b/tools/perf/tests/thread-mg-share.c
index 01fabb19d746..b258d5298b9b 100644
--- a/tools/perf/tests/thread-mg-share.c
+++ b/tools/perf/tests/thread-mg-share.c
@@ -23,6 +23,9 @@ int test__thread_mg_share(void)
* with several threads and checks they properly share and
* maintain map groups info (struct map_groups).
*
+ * Note that a leader thread has one more refcnt for its
+ * (current) map groups.
+ *
* thread group (pid: 0, tids: 0, 1, 2, 3)
* other group (pid: 4, tids: 4, 5)
*/
@@ -43,7 +46,7 @@ int test__thread_mg_share(void)
leader && t1 && t2 && t3 && other);
mg = leader->mg;
- TEST_ASSERT_EQUAL("wrong refcnt", atomic_read(&mg->refcnt), 4);
+ TEST_ASSERT_EQUAL("wrong refcnt", atomic_read(&mg->refcnt), 5);
/* test the map groups pointer is shared */
TEST_ASSERT_VAL("map groups don't match", mg == t1->mg);
@@ -71,7 +74,7 @@ int test__thread_mg_share(void)
machine__remove_thread(machine, other_leader);
other_mg = other->mg;
- TEST_ASSERT_EQUAL("wrong refcnt", atomic_read(&other_mg->refcnt), 2);
+ TEST_ASSERT_EQUAL("wrong refcnt", atomic_read(&other_mg->refcnt), 3);
TEST_ASSERT_VAL("map groups don't match", other_mg == other_leader->mg);
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 3dff1b5cd4cc..887f18266ab5 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -914,6 +914,8 @@ void thread__find_addr_map(struct thread *thread, u8 cpumode,
return;
}
+ BUG_ON(mg == NULL);
+
if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
al->level = 'k';
mg = &machine->kmaps;
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 7cfaa2c3f131..3373e8455945 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -349,8 +349,19 @@ static void machine__update_thread_pid(struct machine *machine,
if (!leader)
goto out_err;
- if (!leader->mg)
- leader->mg = map_groups__new(machine);
+ if (!leader->mg) {
+ struct map_groups *mg = map_groups__new(machine);
+
+ if (mg == NULL) {
+ pr_err("Not enough memory for map groups\n");
+ return;
+ }
+
+ if (thread__set_map_groups(leader, mg, 0) < 0) {
+ map_groups__put(mg);
+ goto out_err;
+ }
+ }
if (!leader->mg)
goto out_err;
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index 4e38c396a897..addd4b323027 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -471,6 +471,8 @@ void map_groups__init(struct map_groups *mg, struct machine *machine)
}
mg->machine = machine;
atomic_set(&mg->refcnt, 1);
+ mg->timestamp = 0;
+ INIT_LIST_HEAD(&mg->list);
}
static void __maps__purge(struct maps *maps)
@@ -527,6 +529,7 @@ struct map_groups *map_groups__new(struct machine *machine)
void map_groups__delete(struct map_groups *mg)
{
map_groups__exit(mg);
+ list_del(&mg->list);
free(mg);
}
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index 7309d64ce39e..1e3313a22d3a 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -68,6 +68,8 @@ struct map_groups {
struct maps maps[MAP__NR_TYPES];
struct machine *machine;
atomic_t refcnt;
+ u64 timestamp;
+ struct list_head list;
};
struct map_groups *map_groups__new(struct machine *machine);
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index ad7c2a00bff8..33de8b010282 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -11,13 +11,79 @@
#include "unwind.h"
#include "machine.h"
+struct map_groups *thread__get_map_groups(struct thread *thread, u64 timestamp)
+{
+ struct map_groups *mg;
+ struct thread *leader = thread;
+
+ BUG_ON(thread->mg == NULL);
+
+ if (thread->tid != thread->pid_) {
+ leader = machine__find_thread_by_time(thread->mg->machine,
+ thread->pid_, thread->pid_,
+ timestamp);
+ if (leader == NULL)
+ goto out;
+ }
+
+ list_for_each_entry(mg, &leader->mg_list, list)
+ if (timestamp >= mg->timestamp)
+ return mg;
+
+out:
+ return thread->mg;
+}
+
+int thread__set_map_groups(struct thread *thread, struct map_groups *mg,
+ u64 timestamp)
+{
+ struct list_head *pos;
+ struct map_groups *old;
+
+ if (mg == NULL)
+ return -ENOMEM;
+
+ /*
+ * Only a leader thread can have map groups list - others
+ * reference it through map_groups__get. This means the
+ * leader thread will have one more refcnt than others.
+ */
+ if (thread->tid != thread->pid_)
+ return -EINVAL;
+
+ if (thread->mg) {
+ BUG_ON(atomic_read(&thread->mg->refcnt) <= 1);
+ map_groups__put(thread->mg);
+ }
+
+ /* sort by time */
+ list_for_each(pos, &thread->mg_list) {
+ old = list_entry(pos, struct map_groups, list);
+ if (timestamp > old->timestamp)
+ break;
+ }
+
+ list_add_tail(&mg->list, pos);
+ mg->timestamp = timestamp;
+
+ /* set current ->mg to most recent one */
+ thread->mg = list_first_entry(&thread->mg_list, struct map_groups, list);
+ /* increase one more refcnt for current */
+ map_groups__get(thread->mg);
+
+ return 0;
+}
+
int thread__init_map_groups(struct thread *thread, struct machine *machine)
{
struct thread *leader;
pid_t pid = thread->pid_;
if (pid == thread->tid || pid == -1) {
- thread->mg = map_groups__new(machine);
+ struct map_groups *mg = map_groups__new(machine);
+
+ if (thread__set_map_groups(thread, mg, 0) < 0)
+ map_groups__put(mg);
} else {
leader = __machine__findnew_thread(machine, pid, pid);
if (leader)
@@ -39,6 +105,7 @@ struct thread *thread__new(pid_t pid, pid_t tid)
thread->ppid = -1;
thread->cpu = -1;
INIT_LIST_HEAD(&thread->comm_list);
+ INIT_LIST_HEAD(&thread->mg_list);
if (unwind__prepare_access(thread) < 0)
goto err_thread;
@@ -69,6 +136,7 @@ err_thread:
void thread__delete(struct thread *thread)
{
struct comm *comm, *tmp;
+ struct map_groups *mg, *tmp_mg;
BUG_ON(!RB_EMPTY_NODE(&thread->rb_node));
BUG_ON(!list_empty(&thread->tid_list));
@@ -79,6 +147,10 @@ void thread__delete(struct thread *thread)
map_groups__put(thread->mg);
thread->mg = NULL;
}
+ /* only leader threads have mg list */
+ list_for_each_entry_safe(mg, tmp_mg, &thread->mg_list, list)
+ map_groups__put(mg);
+
list_for_each_entry_safe(comm, tmp, &thread->comm_list, list) {
list_del(&comm->list);
comm__free(comm);
@@ -152,6 +224,9 @@ struct comm *thread__comm_by_time(const struct thread *thread, u64 timestamp)
return list_last_entry(&thread->comm_list, struct comm, list);
}
+static int thread__clone_map_groups(struct thread *thread,
+ struct thread *parent);
+
int __thread__set_comm(struct thread *thread, const char *str, u64 timestamp,
bool exec)
{
@@ -182,6 +257,40 @@ int __thread__set_comm(struct thread *thread, const char *str, u64 timestamp,
unwind__flush_access(thread);
}
+ if (exec) {
+ struct machine *machine;
+
+ BUG_ON(thread->mg == NULL || thread->mg->machine == NULL);
+
+ machine = thread->mg->machine;
+
+ if (thread->tid != thread->pid_) {
+ struct map_groups *old = thread->mg;
+ struct thread *leader;
+
+ leader = machine__findnew_thread(machine, thread->pid_,
+ thread->pid_);
+
+ /* now it'll be a new leader */
+ thread->pid_ = thread->tid;
+
+ thread->mg = map_groups__new(old->machine);
+ if (thread->mg == NULL)
+ return -ENOMEM;
+
+ /* save current mg in the new leader */
+ thread__clone_map_groups(thread, leader);
+
+ /* current mg of leader thread needs one more refcnt */
+ map_groups__get(thread->mg);
+
+ thread__set_map_groups(thread, thread->mg, old->timestamp);
+ }
+
+ /* create a new mg for newly executed binary */
+ thread__set_map_groups(thread, map_groups__new(machine), timestamp);
+ }
+
thread->comm_set = true;
return 0;
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
index 97026a9660ec..c8463d08a6dd 100644
--- a/tools/perf/util/thread.h
+++ b/tools/perf/util/thread.h
@@ -16,6 +16,7 @@ struct thread {
struct rb_node rb_node;
struct list_head tid_list;
struct map_groups *mg;
+ struct list_head mg_list;
pid_t pid_; /* Not all tools update this */
pid_t tid;
pid_t ppid;
@@ -71,6 +72,8 @@ struct comm *thread__exec_comm(const struct thread *thread);
struct comm *thread__comm_by_time(const struct thread *thread, u64 timestamp);
const char *thread__comm_str(const struct thread *thread);
const char *thread__comm_str_by_time(const struct thread *thread, u64 timestamp);
+struct map_groups *thread__get_map_groups(struct thread *thread, u64 timestamp);
+int thread__set_map_groups(struct thread *thread, struct map_groups *mg, u64 timestamp);
void thread__insert_map(struct thread *thread, struct map *map);
int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp);
size_t thread__fprintf(struct thread *thread, FILE *fp);
--
2.6.0
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/