Re: [PATCH 13/18] perf symbols: Protect dso cache tree using dso->lock

From: Arnaldo Carvalho de Melo
Date: Fri Jul 29 2016 - 11:09:12 EST


Em Fri, Jul 29, 2016 at 08:56:32PM +0800, Bryton Lee escreveu:
> Sorry for disturb you! maybe my question isn't a good question, but I
> do want to know what situation made dwarf callchain unwind process
> concurrently, can you guys give a example to elaborate on that. thank
> you very much!

IIRC that was for things like 'perf top --call-graph dwarf', where there
are two threads, one traversing a set of histograms to present them on
the screen and another creating another set of histograms from events it
reads from the perf mmap, Jiri?

- Arnaldo

> On Mon, May 18, 2015 at 11:50 PM, Arnaldo Carvalho de Melo
> <acme@xxxxxxxxxx> wrote:
> > From: Namhyung Kim <namhyung@xxxxxxxxxx>
> >
> > The dso cache is accessed during dwarf callchain unwind and it might be
> > processed concurrently. Protect it under dso->lock.
> >
> > Note that it doesn't protect dso_cache__find(). I think it's safe to
> > access to the cache tree without the lock since we don't delete nodes.
> >
> > It it missed an existing node due to rotation, it'll find it during
> > dso_cache__insert() anyway.
> >
> > Signed-off-by: Namhyung Kim <namhyung@xxxxxxxxxx>
> > Cc: Adrian Hunter <adrian.hunter@xxxxxxxxx>
> > Cc: Andi Kleen <andi@xxxxxxxxxxxxxx>
> > Cc: David Ahern <dsahern@xxxxxxxxx>
> > Cc: Frederic Weisbecker <fweisbec@xxxxxxxxx>
> > Cc: Jiri Olsa <jolsa@xxxxxxxxxx>
> > Cc: Peter Zijlstra <a.p.zijlstra@xxxxxxxxx>
> > Cc: Stephane Eranian <eranian@xxxxxxxxxx>
> > Link: http://lkml.kernel.org/r/1431909055-21442-27-git-send-email-namhyung@xxxxxxxxxx
> > Signed-off-by: Arnaldo Carvalho de Melo <acme@xxxxxxxxxx>
> > ---
> > tools/perf/util/dso.c | 34 +++++++++++++++++++++++++++-------
> > 1 file changed, 27 insertions(+), 7 deletions(-)
> >
> > diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
> > index 482d6024ef13..666e1db44541 100644
> > --- a/tools/perf/util/dso.c
> > +++ b/tools/perf/util/dso.c
> > @@ -495,10 +495,12 @@ bool dso__data_status_seen(struct dso *dso, enum dso_data_status_seen by)
> > }
> >
> > static void
> > -dso_cache__free(struct rb_root *root)
> > +dso_cache__free(struct dso *dso)
> > {
> > + struct rb_root *root = &dso->data.cache;
> > struct rb_node *next = rb_first(root);
> >
> > + pthread_mutex_lock(&dso->lock);
> > while (next) {
> > struct dso_cache *cache;
> >
> > @@ -507,10 +509,12 @@ dso_cache__free(struct rb_root *root)
> > rb_erase(&cache->rb_node, root);
> > free(cache);
> > }
> > + pthread_mutex_unlock(&dso->lock);
> > }
> >
> > -static struct dso_cache *dso_cache__find(const struct rb_root *root, u64 offset)
> > +static struct dso_cache *dso_cache__find(struct dso *dso, u64 offset)
> > {
> > + const struct rb_root *root = &dso->data.cache;
> > struct rb_node * const *p = &root->rb_node;
> > const struct rb_node *parent = NULL;
> > struct dso_cache *cache;
> > @@ -529,17 +533,20 @@ static struct dso_cache *dso_cache__find(const struct rb_root *root, u64 offset)
> > else
> > return cache;
> > }
> > +
> > return NULL;
> > }
> >
> > -static void
> > -dso_cache__insert(struct rb_root *root, struct dso_cache *new)
> > +static struct dso_cache *
> > +dso_cache__insert(struct dso *dso, struct dso_cache *new)
> > {
> > + struct rb_root *root = &dso->data.cache;
> > struct rb_node **p = &root->rb_node;
> > struct rb_node *parent = NULL;
> > struct dso_cache *cache;
> > u64 offset = new->offset;
> >
> > + pthread_mutex_lock(&dso->lock);
> > while (*p != NULL) {
> > u64 end;
> >
> > @@ -551,10 +558,17 @@ dso_cache__insert(struct rb_root *root, struct dso_cache *new)
> > p = &(*p)->rb_left;
> > else if (offset >= end)
> > p = &(*p)->rb_right;
> > + else
> > + goto out;
> > }
> >
> > rb_link_node(&new->rb_node, parent, p);
> > rb_insert_color(&new->rb_node, root);
> > +
> > + cache = NULL;
> > +out:
> > + pthread_mutex_unlock(&dso->lock);
> > + return cache;
> > }
> >
> > static ssize_t
> > @@ -572,6 +586,7 @@ static ssize_t
> > dso_cache__read(struct dso *dso, u64 offset, u8 *data, ssize_t size)
> > {
> > struct dso_cache *cache;
> > + struct dso_cache *old;
> > ssize_t ret;
> >
> > do {
> > @@ -591,7 +606,12 @@ dso_cache__read(struct dso *dso, u64 offset, u8 *data, ssize_t size)
> >
> > cache->offset = cache_offset;
> > cache->size = ret;
> > - dso_cache__insert(&dso->data.cache, cache);
> > + old = dso_cache__insert(dso, cache);
> > + if (old) {
> > + /* we lose the race */
> > + free(cache);
> > + cache = old;
> > + }
> >
> > ret = dso_cache__memcpy(cache, offset, data, size);
> >
> > @@ -608,7 +628,7 @@ static ssize_t dso_cache_read(struct dso *dso, u64 offset,
> > {
> > struct dso_cache *cache;
> >
> > - cache = dso_cache__find(&dso->data.cache, offset);
> > + cache = dso_cache__find(dso, offset);
> > if (cache)
> > return dso_cache__memcpy(cache, offset, data, size);
> > else
> > @@ -964,7 +984,7 @@ void dso__delete(struct dso *dso)
> >
> > dso__data_close(dso);
> > auxtrace_cache__free(dso->auxtrace_cache);
> > - dso_cache__free(&dso->data.cache);
> > + dso_cache__free(dso);
> > dso__free_a2l(dso);
> > zfree(&dso->symsrc_filename);
> > pthread_mutex_destroy(&dso->lock);
> > --
> > 2.1.0
> >
> > --
> > To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> > the body of a message to majordomo@xxxxxxxxxxxxxxx
> > More majordomo info at http://vger.kernel.org/majordomo-info.html
> > Please read the FAQ at http://www.tux.org/lkml/
>
>
>
> --
> Best Regards
>
> Bryton.Lee