[patch v2 10/11] x86/vdso: Prepare for robust futex unlock support

From: Thomas Gleixner

Date: Thu Mar 19 2026 - 19:34:36 EST


There will be a VDSO function to unlock non-contended robust futexes in
user space. The unlock sequence is racy vs. clearing the list_pending_op
pointer in the task's robust list head. To plug this race the kernel needs
to know the critical section window so it can clear the pointer when the
task is interrupted within that race window. The window is determined by
labels in the inline assembly.

Add these symbols to the vdso2c generator and use them in the VDSO VMA code
to update the critical section addresses in mm_struct::futex on (re)map().

The symbols are not exported to user space, but available in the debug
version of the vDSO.

Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxx>
---
V2: Rename the symbols
---
arch/x86/entry/vdso/vma.c | 35 +++++++++++++++++++++++++++++++++++
arch/x86/include/asm/vdso.h | 6 ++++++
arch/x86/tools/vdso2c.c | 20 +++++++++++++-------
3 files changed, 54 insertions(+), 7 deletions(-)

--- a/arch/x86/entry/vdso/vma.c
+++ b/arch/x86/entry/vdso/vma.c
@@ -73,6 +73,38 @@ static void vdso_fix_landing(const struc
regs->ip = new_vma->vm_start + ipoffset;
}

+#ifdef CONFIG_FUTEX_ROBUST_UNLOCK
+static void vdso_futex_robust_unlock_update_ips(void)
+{
+ const struct vdso_image *image = current->mm->context.vdso_image;
+ unsigned long vdso = (unsigned long) current->mm->context.vdso;
+ struct futex_mm_data *fd = &current->mm->futex;
+ struct futex_unlock_cs_range *csr = fd->unlock_cs_ranges;
+
+ fd->unlock_cs_num_ranges = 0;
+#ifdef CONFIG_X86_64
+ if (image->sym_x86_64_futex_try_unlock_cs_start) {
+ csr->start_ip = vdso + image->sym_x86_64_futex_try_unlock_cs_start;
+ csr->end_ip = vdso + image->sym_x86_64_futex_try_unlock_cs_end;
+ csr->pop_size32 = 0;
+ csr++;
+ fd->unlock_cs_num_ranges++;
+ }
+#endif /* CONFIG_X86_64 */
+
+#if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT)
+ if (image->sym_x86_32_futex_try_unlock_cs_start) {
+ csr->start_ip = vdso + image->sym_x86_32_futex_try_unlock_cs_start;
+ csr->end_ip = vdso + image->sym_x86_32_futex_try_unlock_cs_end;
+ csr->pop_size32 = 1;
+ fd->unlock_cs_num_ranges++;
+ }
+#endif /* CONFIG_X86_32 || CONFIG_COMPAT */
+}
+#else
+static inline void vdso_futex_robust_unlock_update_ips(void) { }
+#endif
+
static int vdso_mremap(const struct vm_special_mapping *sm,
struct vm_area_struct *new_vma)
{
@@ -80,6 +112,7 @@ static int vdso_mremap(const struct vm_s

vdso_fix_landing(image, new_vma);
current->mm->context.vdso = (void __user *)new_vma->vm_start;
+ vdso_futex_robust_unlock_update_ips();

return 0;
}
@@ -189,6 +222,8 @@ static int map_vdso(const struct vdso_im
current->mm->context.vdso = (void __user *)text_start;
current->mm->context.vdso_image = image;

+ vdso_futex_robust_unlock_update_ips();
+
up_fail:
mmap_write_unlock(mm);
return ret;
--- a/arch/x86/include/asm/vdso.h
+++ b/arch/x86/include/asm/vdso.h
@@ -25,6 +25,12 @@ struct vdso_image {
long sym_int80_landing_pad;
long sym_vdso32_sigreturn_landing_pad;
long sym_vdso32_rt_sigreturn_landing_pad;
+ long sym_x86_64_futex_try_unlock_cs_start;
+ long sym_x86_64_futex_try_unlock_cs_end;
+ long sym_x86_64_compat_futex_try_unlock_cs_start;
+ long sym_x86_64_compat_futex_try_unlock_cs_end;
+ long sym_x86_32_futex_try_unlock_cs_start;
+ long sym_x86_32_futex_try_unlock_cs_end;
};

extern const struct vdso_image vdso64_image;
--- a/arch/x86/tools/vdso2c.c
+++ b/arch/x86/tools/vdso2c.c
@@ -75,13 +75,19 @@ struct vdso_sym {
};

struct vdso_sym required_syms[] = {
- {"VDSO32_NOTE_MASK", true},
- {"__kernel_vsyscall", true},
- {"__kernel_sigreturn", true},
- {"__kernel_rt_sigreturn", true},
- {"int80_landing_pad", true},
- {"vdso32_rt_sigreturn_landing_pad", true},
- {"vdso32_sigreturn_landing_pad", true},
+ {"VDSO32_NOTE_MASK", true},
+ {"__kernel_vsyscall", true},
+ {"__kernel_sigreturn", true},
+ {"__kernel_rt_sigreturn", true},
+ {"int80_landing_pad", true},
+ {"vdso32_rt_sigreturn_landing_pad", true},
+ {"vdso32_sigreturn_landing_pad", true},
+ {"x86_64_futex_try_unlock_cs_start", true},
+ {"x86_64_futex_try_unlock_cs_end", true},
+ {"x86_64_compat_futex_try_unlock_cs_start", true},
+ {"x86_64_compat_futex_try_unlock_cs_end", true},
+ {"x86_32_futex_try_unlock_cs_start", true},
+ {"x86_32_futex_try_unlock_cs_end", true},
};

__attribute__((format(printf, 1, 2))) __attribute__((noreturn))