[tip:locking/core] locking/atomic, mips: Use s64 for atomic64

From: tip-bot for Mark Rutland
Date: Mon Jun 03 2019 - 09:43:58 EST


Commit-ID: d184cf1a449ca4cb0d86f3dd82c3337c645ea6c0
Gitweb: https://git.kernel.org/tip/d184cf1a449ca4cb0d86f3dd82c3337c645ea6c0
Author: Mark Rutland <mark.rutland@xxxxxxx>
AuthorDate: Wed, 22 May 2019 14:22:41 +0100
Committer: Ingo Molnar <mingo@xxxxxxxxxx>
CommitDate: Mon, 3 Jun 2019 12:32:56 +0200

locking/atomic, mips: Use s64 for atomic64

As a step towards making the atomic64 API use consistent types treewide,
let's have the mips atomic64 implementation use s64 as the underlying
type for atomic64_t, rather than long or __s64, matching the generated
headers.

As atomic64_read() depends on the generic defintion of atomic64_t, this
still returns long on 64-bit. This will be converted in a subsequent
patch.

Otherwise, there should be no functional change as a result of this
patch.

Signed-off-by: Mark Rutland <mark.rutland@xxxxxxx>
Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
Cc: James Hogan <jhogan@xxxxxxxxxx>
Cc: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx>
Cc: Paul Burton <paul.burton@xxxxxxxx>
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Cc: Ralf Baechle <ralf@xxxxxxxxxxxxxx>
Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: Will Deacon <will.deacon@xxxxxxx>
Cc: aou@xxxxxxxxxxxxxxxxx
Cc: arnd@xxxxxxxx
Cc: bp@xxxxxxxxx
Cc: catalin.marinas@xxxxxxx
Cc: davem@xxxxxxxxxxxxx
Cc: fenghua.yu@xxxxxxxxx
Cc: heiko.carstens@xxxxxxxxxx
Cc: herbert@xxxxxxxxxxxxxxxxxxx
Cc: ink@xxxxxxxxxxxxxxxxxxxx
Cc: linux@xxxxxxxxxxxxxxx
Cc: mattst88@xxxxxxxxx
Cc: mpe@xxxxxxxxxxxxxx
Cc: palmer@xxxxxxxxxx
Cc: paulus@xxxxxxxxx
Cc: rth@xxxxxxxxxxx
Cc: tony.luck@xxxxxxxxx
Cc: vgupta@xxxxxxxxxxxx
Link: https://lkml.kernel.org/r/20190522132250.26499-10-mark.rutland@xxxxxxx
Signed-off-by: Ingo Molnar <mingo@xxxxxxxxxx>
---
arch/mips/include/asm/atomic.h | 22 +++++++++++-----------
1 file changed, 11 insertions(+), 11 deletions(-)

diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 94096299fc56..9a82dd11c0e9 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -254,10 +254,10 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
#define atomic64_set(v, i) WRITE_ONCE((v)->counter, (i))

#define ATOMIC64_OP(op, c_op, asm_op) \
-static __inline__ void atomic64_##op(long i, atomic64_t * v) \
+static __inline__ void atomic64_##op(s64 i, atomic64_t * v) \
{ \
if (kernel_uses_llsc) { \
- long temp; \
+ s64 temp; \
\
loongson_llsc_mb(); \
__asm__ __volatile__( \
@@ -280,12 +280,12 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \
}

#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \
-static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \
+static __inline__ s64 atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \
{ \
- long result; \
+ s64 result; \
\
if (kernel_uses_llsc) { \
- long temp; \
+ s64 temp; \
\
loongson_llsc_mb(); \
__asm__ __volatile__( \
@@ -314,12 +314,12 @@ static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \
}

#define ATOMIC64_FETCH_OP(op, c_op, asm_op) \
-static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \
+static __inline__ s64 atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v) \
{ \
- long result; \
+ s64 result; \
\
if (kernel_uses_llsc) { \
- long temp; \
+ s64 temp; \
\
loongson_llsc_mb(); \
__asm__ __volatile__( \
@@ -386,14 +386,14 @@ ATOMIC64_OPS(xor, ^=, xor)
* Atomically test @v and subtract @i if @v is greater or equal than @i.
* The function returns the old value of @v minus @i.
*/
-static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
+static __inline__ s64 atomic64_sub_if_positive(s64 i, atomic64_t * v)
{
- long result;
+ s64 result;

smp_mb__before_llsc();

if (kernel_uses_llsc) {
- long temp;
+ s64 temp;

__asm__ __volatile__(
" .set push \n"