[PATCH v1 7/9] powerpc/vdso: Move vdso datapage up front

From: Christophe Leroy
Date: Tue Aug 25 2020 - 09:54:34 EST


Move the vdso datapage in front of the VDSO area,
before vdso test.

This will allow to remove the __kernel_datapage_offset symbol
and simplify __get_datapage() in the following patch.

Signed-off-by: Christophe Leroy <christophe.leroy@xxxxxxxxxx>
---
arch/powerpc/include/asm/mmu_context.h | 4 +++-
arch/powerpc/kernel/vdso.c | 22 ++++++++++------------
2 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index 7f3658a97384..be18ad12bb54 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -262,7 +262,9 @@ extern void arch_exit_mmap(struct mm_struct *mm);
static inline void arch_unmap(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
- if (start <= mm->context.vdso_base && mm->context.vdso_base < end)
+ unsigned long vdso_base = mm->context.vdso_base - PAGE_SIZE;
+
+ if (start <= vdso_base && vdso_base < end)
mm->context.vdso_base = 0;
}

diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index b9270923452e..1d72c4b7672f 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -124,7 +124,7 @@ static int vdso_mremap(unsigned long vdso_pages,
if (new_size != vdso_size)
return -EINVAL;

- current->mm->context.vdso_base = (unsigned long)new_vma->vm_start;
+ current->mm->context.vdso_base = (unsigned long)new_vma->vm_start + PAGE_SIZE;

return 0;
}
@@ -217,7 +217,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
* install_special_mapping or the perf counter mmap tracking code
* will fail to recognise it as a vDSO (since arch_vma_name fails).
*/
- current->mm->context.vdso_base = vdso_base;
+ current->mm->context.vdso_base = vdso_base + PAGE_SIZE;

/*
* our vma flags don't have VM_WRITE so by default, the process isn't
@@ -516,8 +516,7 @@ static __init int vdso_fixup_datapage(struct lib32_elfinfo *v32,
return -1;
}
*((int *)(vdso64_kbase + sym64->st_value - VDSO64_LBASE)) =
- (vdso64_pages << PAGE_SHIFT) -
- (sym64->st_value - VDSO64_LBASE);
+ (sym64->st_value - VDSO64_LBASE) - PAGE_SIZE;
#endif /* CONFIG_PPC64 */

#ifdef CONFIG_VDSO32
@@ -528,8 +527,7 @@ static __init int vdso_fixup_datapage(struct lib32_elfinfo *v32,
return -1;
}
*((int *)(vdso32_kbase + (sym32->st_value - VDSO32_LBASE))) =
- (vdso32_pages << PAGE_SHIFT) -
- (sym32->st_value - VDSO32_LBASE);
+ (sym32->st_value - VDSO32_LBASE) - PAGE_SIZE;
#endif

return 0;
@@ -771,10 +769,10 @@ static int __init vdso_init(void)
if (!pagelist)
goto alloc_failed;

- for (i = 0; i < vdso32_pages; i++)
- pagelist[i] = virt_to_page(vdso32_kbase + i * PAGE_SIZE);
+ pagelist[0] = virt_to_page(vdso_data);

- pagelist[i++] = virt_to_page(vdso_data);
+ for (i = 0; i < vdso32_pages; i++)
+ pagelist[i + 1] = virt_to_page(vdso32_kbase + i * PAGE_SIZE);

vdso32_spec.pages = pagelist;
}
@@ -784,10 +782,10 @@ static int __init vdso_init(void)
if (!pagelist)
goto alloc_failed;

- for (i = 0; i < vdso64_pages; i++)
- pagelist[i] = virt_to_page(vdso64_kbase + i * PAGE_SIZE);
+ pagelist[0] = virt_to_page(vdso_data);

- pagelist[i++] = virt_to_page(vdso_data);
+ for (i = 0; i < vdso64_pages; i++)
+ pagelist[i + 1] = virt_to_page(vdso64_kbase + i * PAGE_SIZE);

vdso64_spec.pages = pagelist;
}
--
2.25.0