[PATCH 24/24] x86/mm/kaiser: Use the other page_table_lock pattern

From: Ingo Molnar
Date: Mon Nov 27 2017 - 05:53:00 EST


From: Peter Zijlstra <peterz@xxxxxxxxxxxxx>

Use the other page_table_lock pattern; this removes the free from
under the lock, reducing worst case hold times and makes it a leaf
lock.

Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
Cc: Andy Lutomirski <luto@xxxxxxxxxxxxxx>
Cc: Borislav Petkov <bp@xxxxxxxxx>
Cc: Dave Hansen <dave.hansen@xxxxxxxxxxxxxxx>
Cc: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx>
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Link: http://lkml.kernel.org/r/20171127085906.uth5hldrtbbqsnkr@xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
Signed-off-by: Ingo Molnar <mingo@xxxxxxxxxx>
---
arch/x86/mm/kaiser.c | 24 +++++++++++++++---------
1 file changed, 15 insertions(+), 9 deletions(-)

diff --git a/arch/x86/mm/kaiser.c b/arch/x86/mm/kaiser.c
index 0282169ede18..0ff502fa655b 100644
--- a/arch/x86/mm/kaiser.c
+++ b/arch/x86/mm/kaiser.c
@@ -196,11 +196,13 @@ static pte_t *kaiser_shadow_pagetable_walk(unsigned long address,
return NULL;

spin_lock(&shadow_table_allocation_lock);
- if (p4d_none(*p4d))
+ if (p4d_none(*p4d)) {
set_p4d(p4d, __p4d(_KERNPG_TABLE | __pa(new_pud_page)));
- else
- free_page(new_pud_page);
+ new_pud_page = 0;
+ }
spin_unlock(&shadow_table_allocation_lock);
+ if (new_pud_page)
+ free_page(new_pud_page);
}

pud = pud_offset(p4d, address);
@@ -215,11 +217,13 @@ static pte_t *kaiser_shadow_pagetable_walk(unsigned long address,
return NULL;

spin_lock(&shadow_table_allocation_lock);
- if (pud_none(*pud))
+ if (pud_none(*pud)) {
set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page)));
- else
- free_page(new_pmd_page);
+ new_pmd_page = 0;
+ }
spin_unlock(&shadow_table_allocation_lock);
+ if (new_pmd_page)
+ free_page(new_pmd_page);
}

pmd = pmd_offset(pud, address);
@@ -234,11 +238,13 @@ static pte_t *kaiser_shadow_pagetable_walk(unsigned long address,
return NULL;

spin_lock(&shadow_table_allocation_lock);
- if (pmd_none(*pmd))
+ if (pmd_none(*pmd)) {
set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page)));
- else
- free_page(new_pte_page);
+ new_pte_page = 0;
+ }
spin_unlock(&shadow_table_allocation_lock);
+ if (new_pte_page)
+ free_page(new_pte_page);
}

pte = pte_offset_kernel(pmd, address);
--
2.14.1