Re: [PATCHv5 2/3] x86/vdso: add mremap hook to vm_special_mapping

From: kbuild test robot
Date: Mon Apr 18 2016 - 10:00:23 EST


Hi Dmitry,

[auto build test WARNING on v4.6-rc4]
[also build test WARNING on next-20160418]
[cannot apply to tip/x86/core tip/x86/vdso]
[if your patch is applied to the wrong git tree, please drop us a note to help improving the system]

url: https://github.com/0day-ci/linux/commits/Dmitry-Safonov/x86-rename-is_-ia32-x32-_task-to-in_-ia32-x32-_syscall/20160418-214656
config: x86_64-randconfig-x000-201616 (attached as .config)
reproduce:
# save the attached .config to linux build tree
make ARCH=x86_64

All warnings (new ones prefixed by >>):

In file included from include/asm-generic/bug.h:4:0,
from arch/x86/include/asm/bug.h:35,
from include/linux/bug.h:4,
from include/linux/mmdebug.h:4,
from include/linux/mm.h:8,
from arch/x86/entry/vdso/vma.c:7:
arch/x86/entry/vdso/vma.c: In function 'vdso_mremap':
arch/x86/entry/vdso/vma.c:114:37: error: 'vdso_image_32' undeclared (first use in this function)
if (in_ia32_syscall() && image == &vdso_image_32) {
^
include/linux/compiler.h:151:30: note: in definition of macro '__trace_if'
if (__builtin_constant_p(!!(cond)) ? !!(cond) : \
^
>> arch/x86/entry/vdso/vma.c:114:2: note: in expansion of macro 'if'
if (in_ia32_syscall() && image == &vdso_image_32) {
^
arch/x86/entry/vdso/vma.c:114:37: note: each undeclared identifier is reported only once for each function it appears in
if (in_ia32_syscall() && image == &vdso_image_32) {
^
include/linux/compiler.h:151:30: note: in definition of macro '__trace_if'
if (__builtin_constant_p(!!(cond)) ? !!(cond) : \
^
>> arch/x86/entry/vdso/vma.c:114:2: note: in expansion of macro 'if'
if (in_ia32_syscall() && image == &vdso_image_32) {
^

vim +/if +114 arch/x86/entry/vdso/vma.c

1 /*
2 * Copyright 2007 Andi Kleen, SUSE Labs.
3 * Subject to the GPL, v.2
4 *
5 * This contains most of the x86 vDSO kernel-side code.
6 */
> 7 #include <linux/mm.h>
8 #include <linux/err.h>
9 #include <linux/sched.h>
10 #include <linux/slab.h>
11 #include <linux/init.h>
12 #include <linux/random.h>
13 #include <linux/elf.h>
14 #include <linux/cpu.h>
15 #include <linux/ptrace.h>
16 #include <asm/pvclock.h>
17 #include <asm/vgtod.h>
18 #include <asm/proto.h>
19 #include <asm/vdso.h>
20 #include <asm/vvar.h>
21 #include <asm/page.h>
22 #include <asm/hpet.h>
23 #include <asm/desc.h>
24 #include <asm/cpufeature.h>
25
26 #if defined(CONFIG_X86_64)
27 unsigned int __read_mostly vdso64_enabled = 1;
28 #endif
29
30 void __init init_vdso_image(const struct vdso_image *image)
31 {
32 BUG_ON(image->size % PAGE_SIZE != 0);
33
34 apply_alternatives((struct alt_instr *)(image->data + image->alt),
35 (struct alt_instr *)(image->data + image->alt +
36 image->alt_len));
37 }
38
39 struct linux_binprm;
40
41 /*
42 * Put the vdso above the (randomized) stack with another randomized
43 * offset. This way there is no hole in the middle of address space.
44 * To save memory make sure it is still in the same PTE as the stack
45 * top. This doesn't give that many random bits.
46 *
47 * Note that this algorithm is imperfect: the distribution of the vdso
48 * start address within a PMD is biased toward the end.
49 *
50 * Only used for the 64-bit and x32 vdsos.
51 */
52 static unsigned long vdso_addr(unsigned long start, unsigned len)
53 {
54 #ifdef CONFIG_X86_32
55 return 0;
56 #else
57 unsigned long addr, end;
58 unsigned offset;
59
60 /*
61 * Round up the start address. It can start out unaligned as a result
62 * of stack start randomization.
63 */
64 start = PAGE_ALIGN(start);
65
66 /* Round the lowest possible end address up to a PMD boundary. */
67 end = (start + len + PMD_SIZE - 1) & PMD_MASK;
68 if (end >= TASK_SIZE_MAX)
69 end = TASK_SIZE_MAX;
70 end -= len;
71
72 if (end > start) {
73 offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
74 addr = start + (offset << PAGE_SHIFT);
75 } else {
76 addr = start;
77 }
78
79 /*
80 * Forcibly align the final address in case we have a hardware
81 * issue that requires alignment for performance reasons.
82 */
83 addr = align_vdso_addr(addr);
84
85 return addr;
86 #endif
87 }
88
89 static int vdso_fault(const struct vm_special_mapping *sm,
90 struct vm_area_struct *vma, struct vm_fault *vmf)
91 {
92 const struct vdso_image *image = vma->vm_mm->context.vdso_image;
93
94 if (!image || (vmf->pgoff << PAGE_SHIFT) >= image->size)
95 return VM_FAULT_SIGBUS;
96
97 vmf->page = virt_to_page(image->data + (vmf->pgoff << PAGE_SHIFT));
98 get_page(vmf->page);
99 return 0;
100 }
101
102 static int vdso_mremap(const struct vm_special_mapping *sm,
103 struct vm_area_struct *new_vma)
104 {
105 unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
106 const struct vdso_image *image = current->mm->context.vdso_image;
107
108 if (image->size != new_size)
109 return -EINVAL;
110
111 if (current->mm != new_vma->vm_mm)
112 return -EFAULT;
113
> 114 if (in_ia32_syscall() && image == &vdso_image_32) {
115 struct pt_regs *regs = current_pt_regs();
116 unsigned long vdso_land = image->sym_int80_landing_pad;
117 unsigned long old_land_addr = vdso_land +

---
0-DAY kernel test infrastructure Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all Intel Corporation

Attachment: .config.gz
Description: Binary data