[tip:locking/core] locking/atomic, riscv: Use s64 for atomic64
From: tip-bot for Mark Rutland
Date: Mon Jun 03 2019 - 09:45:52 EST
Commit-ID: 0754211847d7a228f1c34a49fd122979dfd19a1a
Gitweb: https://git.kernel.org/tip/0754211847d7a228f1c34a49fd122979dfd19a1a
Author: Mark Rutland <mark.rutland@xxxxxxx>
AuthorDate: Wed, 22 May 2019 14:22:44 +0100
Committer: Ingo Molnar <mingo@xxxxxxxxxx>
CommitDate: Mon, 3 Jun 2019 12:32:56 +0200
locking/atomic, riscv: Use s64 for atomic64
As a step towards making the atomic64 API use consistent types treewide,
let's have the RISC-V atomic64 implementation use s64 as the underlying
type for atomic64_t, rather than long, matching the generated headers.
As atomic64_read() depends on the generic defintion of atomic64_t, this
still returns long on 64-bit. This will be converted in a subsequent
patch.
Otherwise, there should be no functional change as a result of this patch.
Signed-off-by: Mark Rutland <mark.rutland@xxxxxxx>
Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
Reviewed-by: Palmer Dabbelt <palmer@xxxxxxxxxx>
Cc: Albert Ou <aou@xxxxxxxxxxxxxxxxx>
Cc: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx>
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: Will Deacon <will.deacon@xxxxxxx>
Cc: arnd@xxxxxxxx
Cc: bp@xxxxxxxxx
Cc: catalin.marinas@xxxxxxx
Cc: davem@xxxxxxxxxxxxx
Cc: fenghua.yu@xxxxxxxxx
Cc: heiko.carstens@xxxxxxxxxx
Cc: herbert@xxxxxxxxxxxxxxxxxxx
Cc: ink@xxxxxxxxxxxxxxxxxxxx
Cc: jhogan@xxxxxxxxxx
Cc: linux@xxxxxxxxxxxxxxx
Cc: mattst88@xxxxxxxxx
Cc: mpe@xxxxxxxxxxxxxx
Cc: paul.burton@xxxxxxxx
Cc: paulus@xxxxxxxxx
Cc: ralf@xxxxxxxxxxxxxx
Cc: rth@xxxxxxxxxxx
Cc: tony.luck@xxxxxxxxx
Cc: vgupta@xxxxxxxxxxxx
Link: https://lkml.kernel.org/r/20190522132250.26499-13-mark.rutland@xxxxxxx
Signed-off-by: Ingo Molnar <mingo@xxxxxxxxxx>
---
arch/riscv/include/asm/atomic.h | 44 +++++++++++++++++++++--------------------
1 file changed, 23 insertions(+), 21 deletions(-)
diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h
index 9c263bd9d5ad..96f95c9ebd97 100644
--- a/arch/riscv/include/asm/atomic.h
+++ b/arch/riscv/include/asm/atomic.h
@@ -38,11 +38,11 @@ static __always_inline void atomic_set(atomic_t *v, int i)
#ifndef CONFIG_GENERIC_ATOMIC64
#define ATOMIC64_INIT(i) { (i) }
-static __always_inline long atomic64_read(const atomic64_t *v)
+static __always_inline s64 atomic64_read(const atomic64_t *v)
{
return READ_ONCE(v->counter);
}
-static __always_inline void atomic64_set(atomic64_t *v, long i)
+static __always_inline void atomic64_set(atomic64_t *v, s64 i)
{
WRITE_ONCE(v->counter, i);
}
@@ -66,11 +66,11 @@ void atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \
#ifdef CONFIG_GENERIC_ATOMIC64
#define ATOMIC_OPS(op, asm_op, I) \
- ATOMIC_OP (op, asm_op, I, w, int, )
+ ATOMIC_OP (op, asm_op, I, w, int, )
#else
#define ATOMIC_OPS(op, asm_op, I) \
- ATOMIC_OP (op, asm_op, I, w, int, ) \
- ATOMIC_OP (op, asm_op, I, d, long, 64)
+ ATOMIC_OP (op, asm_op, I, w, int, ) \
+ ATOMIC_OP (op, asm_op, I, d, s64, 64)
#endif
ATOMIC_OPS(add, add, i)
@@ -127,14 +127,14 @@ c_type atomic##prefix##_##op##_return(c_type i, atomic##prefix##_t *v) \
#ifdef CONFIG_GENERIC_ATOMIC64
#define ATOMIC_OPS(op, asm_op, c_op, I) \
- ATOMIC_FETCH_OP( op, asm_op, I, w, int, ) \
- ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int, )
+ ATOMIC_FETCH_OP( op, asm_op, I, w, int, ) \
+ ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int, )
#else
#define ATOMIC_OPS(op, asm_op, c_op, I) \
- ATOMIC_FETCH_OP( op, asm_op, I, w, int, ) \
- ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int, ) \
- ATOMIC_FETCH_OP( op, asm_op, I, d, long, 64) \
- ATOMIC_OP_RETURN(op, asm_op, c_op, I, d, long, 64)
+ ATOMIC_FETCH_OP( op, asm_op, I, w, int, ) \
+ ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int, ) \
+ ATOMIC_FETCH_OP( op, asm_op, I, d, s64, 64) \
+ ATOMIC_OP_RETURN(op, asm_op, c_op, I, d, s64, 64)
#endif
ATOMIC_OPS(add, add, +, i)
@@ -166,11 +166,11 @@ ATOMIC_OPS(sub, add, +, -i)
#ifdef CONFIG_GENERIC_ATOMIC64
#define ATOMIC_OPS(op, asm_op, I) \
- ATOMIC_FETCH_OP(op, asm_op, I, w, int, )
+ ATOMIC_FETCH_OP(op, asm_op, I, w, int, )
#else
#define ATOMIC_OPS(op, asm_op, I) \
- ATOMIC_FETCH_OP(op, asm_op, I, w, int, ) \
- ATOMIC_FETCH_OP(op, asm_op, I, d, long, 64)
+ ATOMIC_FETCH_OP(op, asm_op, I, w, int, ) \
+ ATOMIC_FETCH_OP(op, asm_op, I, d, s64, 64)
#endif
ATOMIC_OPS(and, and, i)
@@ -219,9 +219,10 @@ static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
#define atomic_fetch_add_unless atomic_fetch_add_unless
#ifndef CONFIG_GENERIC_ATOMIC64
-static __always_inline long atomic64_fetch_add_unless(atomic64_t *v, long a, long u)
+static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
- long prev, rc;
+ s64 prev;
+ long rc;
__asm__ __volatile__ (
"0: lr.d %[p], %[c]\n"
@@ -290,11 +291,11 @@ c_t atomic##prefix##_cmpxchg(atomic##prefix##_t *v, c_t o, c_t n) \
#ifdef CONFIG_GENERIC_ATOMIC64
#define ATOMIC_OPS() \
- ATOMIC_OP( int, , 4)
+ ATOMIC_OP(int, , 4)
#else
#define ATOMIC_OPS() \
- ATOMIC_OP( int, , 4) \
- ATOMIC_OP(long, 64, 8)
+ ATOMIC_OP(int, , 4) \
+ ATOMIC_OP(s64, 64, 8)
#endif
ATOMIC_OPS()
@@ -332,9 +333,10 @@ static __always_inline int atomic_sub_if_positive(atomic_t *v, int offset)
#define atomic_dec_if_positive(v) atomic_sub_if_positive(v, 1)
#ifndef CONFIG_GENERIC_ATOMIC64
-static __always_inline long atomic64_sub_if_positive(atomic64_t *v, long offset)
+static __always_inline s64 atomic64_sub_if_positive(atomic64_t *v, s64 offset)
{
- long prev, rc;
+ s64 prev;
+ long rc;
__asm__ __volatile__ (
"0: lr.d %[p], %[c]\n"