[tip:locking/rwsem] locking/rwsem, x86: Clean up ____down_write()
From: tip-bot for Borislav Petkov
Date: Thu Apr 28 2016 - 06:28:37 EST
Commit-ID: 71c01930b42e5dd65d4820dea116bcbe95a0b768
Gitweb: http://git.kernel.org/tip/71c01930b42e5dd65d4820dea116bcbe95a0b768
Author: Borislav Petkov <bp@xxxxxxx>
AuthorDate: Wed, 27 Apr 2016 13:47:32 +0200
Committer: Ingo Molnar <mingo@xxxxxxxxxx>
CommitDate: Thu, 28 Apr 2016 10:42:56 +0200
locking/rwsem, x86: Clean up ____down_write()
Move the RWSEM_ACTIVE_WRITE_BIAS out of the inline asm to reduce the
number of arguments. Also, make it an input argument only (why it was an
output operand, I still don't know...).
For better readability, use symbolic names for the arguments and move
the linebreak backspace to 80 cols.
Resulting asm differs only in the temporary GCC variable names and
locations:
-- before 2016-04-27 13:39:05.320778458 +0200
++ after 2016-04-27 13:52:37.336778994 +0200
@@ -11,8 +11,8 @@ down_write_killable:
.LBB84:
.LBB85:
.LBB86:
- .loc 2 128 0
- movabsq $-4294967295, %rdx #, tmp
+ .loc 2 130 0
+ movabsq $-4294967295, %rdx #, tmp94
movq %rdi, %rax # sem, sem
.LBE86:
.LBE85:
@@ -23,17 +23,17 @@ down_write_killable:
.LBB89:
.LBB88:
.LBB87:
- .loc 2 128 0
+ .loc 2 130 0
#APP
-# 128 "./arch/x86/include/asm/rwsem.h" 1
+# 130 "./arch/x86/include/asm/rwsem.h" 1
# beginning down_write
.pushsection .smp_locks,"a"
.balign 4
.long 671f - .
.popsection
671:
- lock; xadd %rdx,(%rax) # tmp, sem
- test %edx , %edx # tmp
+ lock; xadd %rdx,(%rax) # tmp94, sem
+ test %edx , %edx # tmp94
jz 1f
call call_rwsem_down_write_failed_killable
1:
Signed-off-by: Borislav Petkov <bp@xxxxxxx>
Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
Cc: Chris Zankel <chris@xxxxxxxxxx>
Cc: David S. Miller <davem@xxxxxxxxxxxxx>
Cc: Josh Poimboeuf <jpoimboe@xxxxxxxxxx>
Cc: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx>
Cc: Max Filippov <jcmvbkbc@xxxxxxxxx>
Cc: Michal Hocko <mhocko@xxxxxxxxxx>
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: Tony Luck <tony.luck@xxxxxxxxx>
Cc: linux-alpha@xxxxxxxxxxxxxxx
Cc: linux-arch@xxxxxxxxxxxxxxx
Cc: linux-ia64@xxxxxxxxxxxxxxx
Cc: linux-s390@xxxxxxxxxxxxxxx
Cc: linux-sh@xxxxxxxxxxxxxxx
Cc: linux-xtensa@xxxxxxxxxxxxxxxx
Cc: sparclinux@xxxxxxxxxxxxxxx
Link: http://lkml.kernel.org/r/20160427120217.GE21011@xxxxxxx
Signed-off-by: Ingo Molnar <mingo@xxxxxxxxxx>
---
arch/x86/include/asm/rwsem.h | 36 +++++++++++++++++++-----------------
1 file changed, 19 insertions(+), 17 deletions(-)
diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
index 453744c..d2f8d10 100644
--- a/arch/x86/include/asm/rwsem.h
+++ b/arch/x86/include/asm/rwsem.h
@@ -99,23 +99,25 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
/*
* lock for writing
*/
-#define ____down_write(sem, slow_path) \
-({ \
- long tmp; \
- struct rw_semaphore* ret; \
- asm volatile("# beginning down_write\n\t" \
- LOCK_PREFIX " xadd %1,(%3)\n\t" \
- /* adds 0xffff0001, returns the old value */ \
- " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t" \
- /* was the active mask 0 before? */\
- " jz 1f\n" \
- " call " slow_path "\n" \
- "1:\n" \
- "# ending down_write" \
- : "+m" (sem->count), "=d" (tmp), "=a" (ret) \
- : "a" (sem), "1" (RWSEM_ACTIVE_WRITE_BIAS) \
- : "memory", "cc"); \
- ret; \
+#define ____down_write(sem, slow_path) \
+({ \
+ long tmp = RWSEM_ACTIVE_WRITE_BIAS; \
+ struct rw_semaphore* ret; \
+ \
+ asm volatile("# beginning down_write\n\t" \
+ LOCK_PREFIX " xadd %[tmp],(%[sem])\n\t" \
+ /* adds 0xffff0001, returns the old value */ \
+ " test " __ASM_SEL(%w[tmp],%k[tmp]) "," \
+ __ASM_SEL(%w[tmp],%k[tmp]) "\n\t" \
+ /* was the active mask 0 before? */ \
+ " jz 1f\n" \
+ " call " slow_path "\n" \
+ "1:\n" \
+ "# ending down_write" \
+ : "+m" (sem->count), "=a" (ret) \
+ : [sem] "a" (sem), [tmp] "r" (tmp) \
+ : "memory", "cc"); \
+ ret; \
})
static inline void __down_write(struct rw_semaphore *sem)