On 6/24/21 6:03 AM, Shuyi Cheng wrote:
In order to enable the older kernel to use the CO-RE feature, load the
vmlinux btf of the specified path.
Learn from Andrii's comments in [0], add the custom_btf_path parameter
to bpf_obj_open_opts, you can directly use the skeleton's
<objname>_bpf__open_opts function to pass in the custom_btf_path
parameter.
Prior to this, there was also a developer who provided a patch with
similar functions. It is a pity that the follow-up did not continue to
advance. See [1].
[0]https://lore.kernel.org/bpf/CAEf4BzbJZLjNoiK8_VfeVg_Vrg=9iYFv+po-38SMe=UzwDKJ=Q@xxxxxxxxxxxxxx/#t
[1]https://yhbt.net/lore/all/CAEf4Bzbgw49w2PtowsrzKQNcxD4fZRE6AKByX-5-dMo-+oWHHA@xxxxxxxxxxxxxx/
Signed-off-by: Shuyi Cheng <chengshuyi@xxxxxxxxxxxxxxxxx>
---
tools/lib/bpf/libbpf.c | 23 ++++++++++++++++++++---
tools/lib/bpf/libbpf.h | 6 +++++-
2 files changed, 25 insertions(+), 4 deletions(-)
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 1e04ce7..518b19f 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -509,6 +509,8 @@ struct bpf_object {
void *priv;
bpf_object_clear_priv_t clear_priv;
+ char *custom_btf_path;
+
nit: This should rather go to the 'Parse and load BTF vmlinux if any of [...]'
section of struct bpf_object, and for consistency, I'd keep the btf_ prefix,
like: char *btf_custom_path
char path[];
};
#define obj_elf_valid(o) ((o)->efile.elf)
@@ -2679,8 +2681,15 @@ static int bpf_object__load_vmlinux_btf(struct bpf_object *obj, bool force)
if (!force && !obj_needs_vmlinux_btf(obj))
return 0;
- obj->btf_vmlinux = libbpf_find_kernel_btf();
- err = libbpf_get_error(obj->btf_vmlinux);
+ if (obj->custom_btf_path) {
+ obj->btf_vmlinux = btf__parse(obj->custom_btf_path, NULL);
+ err = libbpf_get_error(obj->btf_vmlinux);
+ pr_debug("loading custom vmlinux BTF '%s': %d\n", obj->custom_btf_path, err);
+ } else {
+ obj->btf_vmlinux = libbpf_find_kernel_btf();
+ err = libbpf_get_error(obj->btf_vmlinux);
+ }
Couldn't we do something like (only compile-tested):
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index b46760b93bb4..5b88ce3e483c 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -4394,7 +4394,7 @@ static int btf_dedup_remap_types(struct btf_dedup *d)
* Probe few well-known locations for vmlinux kernel image and try to load BTF
* data out of it to use for target BTF.
*/
-struct btf *libbpf_find_kernel_btf(void)
+static struct btf *__libbpf_find_kernel_btf(char *btf_custom_path)
{
struct {
const char *path_fmt;
@@ -4402,6 +4402,8 @@ struct btf *libbpf_find_kernel_btf(void)
} locations[] = {
/* try canonical vmlinux BTF through sysfs first */
{ "/sys/kernel/btf/vmlinux", true /* raw BTF */ },
+ /* try user defined vmlinux ELF if a path was specified */
+ { btf_custom_path },
/* fall back to trying to find vmlinux ELF on disk otherwise */
{ "/boot/vmlinux-%1$s" },
{ "/lib/modules/%1$s/vmlinux-%1$s" },
@@ -4419,11 +4421,11 @@ struct btf *libbpf_find_kernel_btf(void)
uname(&buf);
for (i = 0; i < ARRAY_SIZE(locations); i++) {
+ if (!locations[i].path_fmt)
+ continue;
snprintf(path, PATH_MAX, locations[i].path_fmt, buf.release);
-
if (access(path, R_OK))
continue;
-
if (locations[i].raw_btf)
btf = btf__parse_raw(path);
else
@@ -4440,6 +4442,11 @@ struct btf *libbpf_find_kernel_btf(void)
return libbpf_err_ptr(-ESRCH);
}
+struct btf *libbpf_find_kernel_btf(void)
+{
+ return __libbpf_find_kernel_btf(NULL);
+}
+
int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ctx)
{
int i, n, err;
And then you just call it as:
obj->btf_vmlinux = __libbpf_find_kernel_btf(obj->btf_custom_path);
err = libbpf_get_error(obj->btf_vmlinux);
if (err) {
pr_warn("Error loading vmlinux BTF: %d\n", err);
obj->btf_vmlinux = NULL;