[PATCH RFC 2/6] perf dso: Refactor dso_cache__read()

From: Adrian Hunter
Date: Fri Oct 25 2019 - 09:01:17 EST


Refactor dso_cache__read() to separate populating the cache from copying
data from it. This is preparation for adding a cache "write" that will
update the data in the cache.

Signed-off-by: Adrian Hunter <adrian.hunter@xxxxxxxxx>
---
tools/perf/util/dso.c | 64 +++++++++++++++++++++++++------------------
1 file changed, 37 insertions(+), 27 deletions(-)

diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index e11ddf86f2b3..460330d125b6 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -768,7 +768,7 @@ dso_cache__free(struct dso *dso)
pthread_mutex_unlock(&dso->lock);
}

-static struct dso_cache *dso_cache__find(struct dso *dso, u64 offset)
+static struct dso_cache *__dso_cache__find(struct dso *dso, u64 offset)
{
const struct rb_root *root = &dso->data.cache;
struct rb_node * const *p = &root->rb_node;
@@ -863,54 +863,64 @@ static ssize_t file_read(struct dso *dso, struct machine *machine,
return ret;
}

-static ssize_t
-dso_cache__read(struct dso *dso, struct machine *machine,
- u64 offset, u8 *data, ssize_t size)
+static struct dso_cache *dso_cache__populate(struct dso *dso,
+ struct machine *machine,
+ u64 offset, ssize_t *ret)
{
u64 cache_offset = offset & DSO__DATA_CACHE_MASK;
struct dso_cache *cache;
struct dso_cache *old;
- ssize_t ret;

cache = zalloc(sizeof(*cache) + DSO__DATA_CACHE_SIZE);
- if (!cache)
- return -ENOMEM;
+ if (!cache) {
+ *ret = -ENOMEM;
+ return NULL;
+ }

if (dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO)
- ret = bpf_read(dso, cache_offset, cache->data);
+ *ret = bpf_read(dso, cache_offset, cache->data);
else
- ret = file_read(dso, machine, cache_offset, cache->data);
+ *ret = file_read(dso, machine, cache_offset, cache->data);

- if (ret > 0) {
- cache->offset = cache_offset;
- cache->size = ret;
+ if (*ret <= 0) {
+ free(cache);
+ return NULL;
+ }

- old = dso_cache__insert(dso, cache);
- if (old) {
- /* we lose the race */
- free(cache);
- cache = old;
- }
+ cache->offset = cache_offset;
+ cache->size = *ret;

- ret = dso_cache__memcpy(cache, offset, data, size);
+ old = dso_cache__insert(dso, cache);
+ if (old) {
+ /* we lose the race */
+ free(cache);
+ cache = old;
}

- if (ret <= 0)
- free(cache);
+ return cache;
+}

- return ret;
+static struct dso_cache *dso_cache__find(struct dso *dso,
+ struct machine *machine,
+ u64 offset,
+ ssize_t *ret)
+{
+ struct dso_cache *cache = __dso_cache__find(dso, offset);
+
+ return cache ? cache : dso_cache__populate(dso, machine, offset, ret);
}

static ssize_t dso_cache_read(struct dso *dso, struct machine *machine,
u64 offset, u8 *data, ssize_t size)
{
struct dso_cache *cache;
+ ssize_t ret = 0;

- cache = dso_cache__find(dso, offset);
- if (cache)
- return dso_cache__memcpy(cache, offset, data, size);
- else
- return dso_cache__read(dso, machine, offset, data, size);
+ cache = dso_cache__find(dso, machine, offset, &ret);
+ if (!cache)
+ return ret;
+
+ return dso_cache__memcpy(cache, offset, data, size);
}

/*
--
2.17.1