[PATCH 44/44] perf annotate: Use absolute addresses to calculate jump target offsets
From: Arnaldo Carvalho de Melo
Date: Sat Mar 24 2018 - 16:05:29 EST
From: Arnaldo Carvalho de Melo <acme@xxxxxxxxxx>
These types of jumps were confusing the annotate browser:
entry_SYSCALL_64 /lib/modules/4.16.0-rc5-00086-gdf09348f78dc/build/vmlinux
entry_SYSCALL_64 /lib/modules/4.16.0-rc5-00086-gdf09348f78dc/build/vmlinux
Percentâffffffff81a00020: swapgs
<SNIP>
âffffffff81a00128: â jae ffffffff81a00139 <syscall_return_via_sysret+0x53>
<SNIP>
âffffffff81a00155: â jmpq *0x825d2d(%rip) # ffffffff82225e88 <pv_cpu_ops+0xe8>
I.e. the syscall_return_via_sysret function is actually "inside" the
entry_SYSCALL_64 function, and the offsets in jumps like these (+0x53)
are relative to syscall_return_via_sysret, not to syscall_return_via_sysret.
Or this may be some artifact in how the assembler marks the start and
end of a function and how this ends up in the ELF symtab for vmlinux,
i.e. syscall_return_via_sysret() isn't "inside" entry_SYSCALL_64, but
just right after it.
>From readelf -sw vmlinux:
80267: ffffffff81a00020 315 NOTYPE GLOBAL DEFAULT 1 entry_SYSCALL_64
316: ffffffff81a000e6 0 NOTYPE LOCAL DEFAULT 1 syscall_return_via_sysret
0xffffffff81a00020 + 315 > 0xffffffff81a000e6
So instead of looking for offsets after that last '+' sign, calculate
offsets for jump target addresses that are inside the function being
disassembled from the absolute address, 0xffffffff81a00139 in this case,
subtracting from it the objdump address for the start of the function
being disassembled, entry_SYSCALL_64() in this case.
So, before this patch:
entry_SYSCALL_64 /lib/modules/4.16.0-rc5-00086-gdf09348f78dc/build/vmlinux
Percentâ pop %r10
â pop %r9
â pop %r8
â pop %rax
â pop %rsi
â pop %rdx
â pop %rsi
â mov %rsp,%rdi
â mov %gs:0x5004,%rsp
â pushq 0x28(%rdi)
â pushq (%rdi)
â push %rax
â â jmp 6c
â mov %cr3,%rdi
â â jmp 62
â mov %rdi,%rax
â and $0x7ff,%rdi
â bt %rdi,%gs:0x2219a
â â jae 53
â btr %rdi,%gs:0x2219a
â mov %rax,%rdi
â â jmp 5b
After:
entry_SYSCALL_64 /lib/modules/4.16.0-rc5-00086-gdf09348f78dc/build/vmlinux
0.65 â â jne swapgs_restore_regs_and_return_to_usermode
â pop %r10
â pop %r9
â pop %r8
â pop %rax
â pop %rsi
â pop %rdx
â pop %rsi
â mov %rsp,%rdi
â mov %gs:0x5004,%rsp
â pushq 0x28(%rdi)
â pushq (%rdi)
â push %rax
â â jmp 132
â mov %cr3,%rdi
â âââjmp 128
â â mov %rdi,%rax
â â and $0x7ff,%rdi
â â bt %rdi,%gs:0x2219a
â ââ jae 119
â â btr %rdi,%gs:0x2219a
â â mov %rax,%rdi
â ââ jmp 121
â119:â mov %rax,%rdi
â â bts $0x3f,%rdi
â121:â or $0x800,%rdi
â128:âââor $0x1000,%rdi
â mov %rdi,%cr3
â132: pop %rax
â pop %rdi
â pop %rsp
â â jmpq *0x825d2d(%rip) # ffffffff82225e88 <pv_cpu_ops+0xe8>
With those at least navigating to the right destination, an improvement
for these cases seems to be to be to somehow mark those inner functions,
which in this case could be:
entry_SYSCALL_64 /lib/modules/4.16.0-rc5-00086-gdf09348f78dc/build/vmlinux
âsyscall_return_via_sysret:
â pop %r15
â pop %r14
â pop %r13
â pop %r12
â pop %rbp
â pop %rbx
â pop %rsi
â pop %r10
â pop %r9
â pop %r8
â pop %rax
â pop %rsi
â pop %rdx
â pop %rsi
â mov %rsp,%rdi
â mov %gs:0x5004,%rsp
â pushq 0x28(%rdi)
â pushq (%rdi)
â push %rax
â â jmp 132
â mov %cr3,%rdi
â âââjmp 128
â â mov %rdi,%rax
â â and $0x7ff,%rdi
â â bt %rdi,%gs:0x2219a
â ââ jae 119
â â btr %rdi,%gs:0x2219a
â â mov %rax,%rdi
â ââ jmp 121
â119:â mov %rax,%rdi
â â bts $0x3f,%rdi
â121:â or $0x800,%rdi
â128:âââor $0x1000,%rdi
â mov %rdi,%cr3
â132: pop %rax
â pop %rdi
â pop %rsp
â â jmpq *0x825d2d(%rip) # ffffffff82225e88 <pv_cpu_ops+0xe8>
This all gets much better viewed if one uses 'perf report --ignore-vmlinux'
forcing the usage of /proc/kcore + /proc/kallsyms, when the above
actually gets down to:
# perf report --ignore-vmlinux
## do '/64', will show the function names containing '64',
## navigate to /entry_SYSCALL_64_after_hwframe.annotation,
## press 'A' to annotate, then 'P' to print that annotation
## to a file
## From another xterm (or see on screen, this 'P' thing is for
## getting rid of those right side scroll bars/spaces):
# cat /entry_SYSCALL_64_after_hwframe.annotation
entry_SYSCALL_64_after_hwframe() /proc/kcore
Event: cycles:ppp
Percent
Disassembly of section load0:
ffffffff9aa00044 <load0>:
11.97 push %rax
4.85 push %rdi
push %rsi
2.59 push %rdx
2.27 push %rcx
0.32 pushq $0xffffffffffffffda
1.29 push %r8
xor %r8d,%r8d
1.62 push %r9
0.65 xor %r9d,%r9d
1.62 push %r10
xor %r10d,%r10d
5.50 push %r11
xor %r11d,%r11d
3.56 push %rbx
xor %ebx,%ebx
4.21 push %rbp
xor %ebp,%ebp
2.59 push %r12
0.97 xor %r12d,%r12d
3.24 push %r13
xor %r13d,%r13d
2.27 push %r14
xor %r14d,%r14d
4.21 push %r15
xor %r15d,%r15d
0.97 mov %rsp,%rdi
5.50 â callq do_syscall_64
14.56 mov 0x58(%rsp),%rcx
7.44 mov 0x80(%rsp),%r11
0.32 cmp %rcx,%r11
â jne swapgs_restore_regs_and_return_to_usermode
0.32 shl $0x10,%rcx
0.32 sar $0x10,%rcx
3.24 cmp %rcx,%r11
â jne swapgs_restore_regs_and_return_to_usermode
2.27 cmpq $0x33,0x88(%rsp)
1.29 â jne swapgs_restore_regs_and_return_to_usermode
mov 0x30(%rsp),%r11
8.74 cmp %r11,0x90(%rsp)
â jne swapgs_restore_regs_and_return_to_usermode
0.32 test $0x10100,%r11
â jne swapgs_restore_regs_and_return_to_usermode
0.32 cmpq $0x2b,0xa0(%rsp)
0.65 â jne swapgs_restore_regs_and_return_to_usermode
I.e. using kallsyms makes the function start/end be done differently
than using what is in the vmlinux ELF symtab and actually the hits
goes to entry_SYSCALL_64_after_hwframe, which is a GLOBAL() after the
start of entry_SYSCALL_64:
ENTRY(entry_SYSCALL_64)
UNWIND_HINT_EMPTY
<SNIP>
pushq $__USER_CS /* pt_regs->cs */
pushq %rcx /* pt_regs->ip */
GLOBAL(entry_SYSCALL_64_after_hwframe)
pushq %rax /* pt_regs->orig_ax */
PUSH_AND_CLEAR_REGS rax=$-ENOSYS
And it goes and ends at:
cmpq $__USER_DS, SS(%rsp) /* SS must match SYSRET */
jne swapgs_restore_regs_and_return_to_usermode
/*
* We win! This label is here just for ease of understanding
* perf profiles. Nothing jumps here.
*/
syscall_return_via_sysret:
/* rcx and r11 are already restored (see code above) */
UNWIND_HINT_EMPTY
POP_REGS pop_rdi=0 skip_r11rcx=1
So perhaps some people should really just play with '--ignore-vmlinux'
to force /proc/kcore + kallsyms.
One idea is to do both, i.e. have a vmlinux annotation and a
kcore+kallsyms one, when possible, and even show the patched location,
etc.
Reported-by: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx>
Cc: Adrian Hunter <adrian.hunter@xxxxxxxxx>
Cc: Andi Kleen <ak@xxxxxxxxxxxxxxx>
Cc: David Ahern <dsahern@xxxxxxxxx>
Cc: Jin Yao <yao.jin@xxxxxxxxxxxxxxx>
Cc: Jiri Olsa <jolsa@xxxxxxxxxx>
Cc: Namhyung Kim <namhyung@xxxxxxxxxx>
Cc: Wang Nan <wangnan0@xxxxxxxxxx>
Link: https://lkml.kernel.org/n/tip-r11knxv8voesav31xokjiuo6@xxxxxxxxxxxxxx
Signed-off-by: Arnaldo Carvalho de Melo <acme@xxxxxxxxxx>
---
tools/perf/util/annotate.c | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index f730e0cf8a26..3a428d7c59b9 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -280,7 +280,6 @@ static int jump__parse(struct arch *arch __maybe_unused, struct ins_operands *op
struct addr_map_symbol target = {
.map = map,
};
- const char *s = strchr(ops->raw, '+');
const char *c = strchr(ops->raw, ',');
u64 start, end;
/*
@@ -337,8 +336,8 @@ static int jump__parse(struct arch *arch __maybe_unused, struct ins_operands *op
map__rip_2objdump(target.map, map->map_ip(target.map, target.addr)) == ops->target.addr)
ops->target.sym = target.sym;
- if (s++ != NULL) {
- ops->target.offset = strtoull(s, NULL, 16);
+ if (!ops->target.outside) {
+ ops->target.offset = target.addr - start;
ops->target.offset_avail = true;
} else {
ops->target.offset_avail = false;
--
2.14.3