[PATCH 03/18] locking/atomic: generic: use s64 for atomic64

From: Mark Rutland
Date: Wed May 22 2019 - 09:26:38 EST


As a step towards making the atomic64 API use consistent types treewide,
let's have the generic atomic64 implementation use s64 as the underlying
type for atomic64_t, rather than long long, matching the generated
headers.

Otherwise, there should be no functional change as a result of this
patch.

Signed-off-by: Mark Rutland <mark.rutland@xxxxxxx>
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Cc: Will Deacon <will.deacon@xxxxxxx>
---
include/asm-generic/atomic64.h | 20 ++++++++++----------
lib/atomic64.c | 32 ++++++++++++++++----------------
2 files changed, 26 insertions(+), 26 deletions(-)

diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
index 97b28b7f1f29..fc7b831ed632 100644
--- a/include/asm-generic/atomic64.h
+++ b/include/asm-generic/atomic64.h
@@ -14,24 +14,24 @@
#include <linux/types.h>

typedef struct {
- long long counter;
+ s64 counter;
} atomic64_t;

#define ATOMIC64_INIT(i) { (i) }

-extern long long atomic64_read(const atomic64_t *v);
-extern void atomic64_set(atomic64_t *v, long long i);
+extern s64 atomic64_read(const atomic64_t *v);
+extern void atomic64_set(atomic64_t *v, s64 i);

#define atomic64_set_release(v, i) atomic64_set((v), (i))

#define ATOMIC64_OP(op) \
-extern void atomic64_##op(long long a, atomic64_t *v);
+extern void atomic64_##op(s64 a, atomic64_t *v);

#define ATOMIC64_OP_RETURN(op) \
-extern long long atomic64_##op##_return(long long a, atomic64_t *v);
+extern s64 atomic64_##op##_return(s64 a, atomic64_t *v);

#define ATOMIC64_FETCH_OP(op) \
-extern long long atomic64_fetch_##op(long long a, atomic64_t *v);
+extern s64 atomic64_fetch_##op(s64 a, atomic64_t *v);

#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) ATOMIC64_FETCH_OP(op)

@@ -50,11 +50,11 @@ ATOMIC64_OPS(xor)
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP

-extern long long atomic64_dec_if_positive(atomic64_t *v);
+extern s64 atomic64_dec_if_positive(atomic64_t *v);
#define atomic64_dec_if_positive atomic64_dec_if_positive
-extern long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n);
-extern long long atomic64_xchg(atomic64_t *v, long long new);
-extern long long atomic64_fetch_add_unless(atomic64_t *v, long long a, long long u);
+extern s64 atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n);
+extern s64 atomic64_xchg(atomic64_t *v, s64 new);
+extern s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u);
#define atomic64_fetch_add_unless atomic64_fetch_add_unless

#endif /* _ASM_GENERIC_ATOMIC64_H */
diff --git a/lib/atomic64.c b/lib/atomic64.c
index 1d91e31eceec..62f218bf50a0 100644
--- a/lib/atomic64.c
+++ b/lib/atomic64.c
@@ -46,11 +46,11 @@ static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
}

-long long atomic64_read(const atomic64_t *v)
+s64 atomic64_read(const atomic64_t *v)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
- long long val;
+ s64 val;

raw_spin_lock_irqsave(lock, flags);
val = v->counter;
@@ -59,7 +59,7 @@ long long atomic64_read(const atomic64_t *v)
}
EXPORT_SYMBOL(atomic64_read);

-void atomic64_set(atomic64_t *v, long long i)
+void atomic64_set(atomic64_t *v, s64 i)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
@@ -71,7 +71,7 @@ void atomic64_set(atomic64_t *v, long long i)
EXPORT_SYMBOL(atomic64_set);

#define ATOMIC64_OP(op, c_op) \
-void atomic64_##op(long long a, atomic64_t *v) \
+void atomic64_##op(s64 a, atomic64_t *v) \
{ \
unsigned long flags; \
raw_spinlock_t *lock = lock_addr(v); \
@@ -83,11 +83,11 @@ void atomic64_##op(long long a, atomic64_t *v) \
EXPORT_SYMBOL(atomic64_##op);

#define ATOMIC64_OP_RETURN(op, c_op) \
-long long atomic64_##op##_return(long long a, atomic64_t *v) \
+s64 atomic64_##op##_return(s64 a, atomic64_t *v) \
{ \
unsigned long flags; \
raw_spinlock_t *lock = lock_addr(v); \
- long long val; \
+ s64 val; \
\
raw_spin_lock_irqsave(lock, flags); \
val = (v->counter c_op a); \
@@ -97,11 +97,11 @@ long long atomic64_##op##_return(long long a, atomic64_t *v) \
EXPORT_SYMBOL(atomic64_##op##_return);

#define ATOMIC64_FETCH_OP(op, c_op) \
-long long atomic64_fetch_##op(long long a, atomic64_t *v) \
+s64 atomic64_fetch_##op(s64 a, atomic64_t *v) \
{ \
unsigned long flags; \
raw_spinlock_t *lock = lock_addr(v); \
- long long val; \
+ s64 val; \
\
raw_spin_lock_irqsave(lock, flags); \
val = v->counter; \
@@ -134,11 +134,11 @@ ATOMIC64_OPS(xor, ^=)
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP

-long long atomic64_dec_if_positive(atomic64_t *v)
+s64 atomic64_dec_if_positive(atomic64_t *v)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
- long long val;
+ s64 val;

raw_spin_lock_irqsave(lock, flags);
val = v->counter - 1;
@@ -149,11 +149,11 @@ long long atomic64_dec_if_positive(atomic64_t *v)
}
EXPORT_SYMBOL(atomic64_dec_if_positive);

-long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
+s64 atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
- long long val;
+ s64 val;

raw_spin_lock_irqsave(lock, flags);
val = v->counter;
@@ -164,11 +164,11 @@ long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
}
EXPORT_SYMBOL(atomic64_cmpxchg);

-long long atomic64_xchg(atomic64_t *v, long long new)
+s64 atomic64_xchg(atomic64_t *v, s64 new)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
- long long val;
+ s64 val;

raw_spin_lock_irqsave(lock, flags);
val = v->counter;
@@ -178,11 +178,11 @@ long long atomic64_xchg(atomic64_t *v, long long new)
}
EXPORT_SYMBOL(atomic64_xchg);

-long long atomic64_fetch_add_unless(atomic64_t *v, long long a, long long u)
+s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
- long long val;
+ s64 val;

raw_spin_lock_irqsave(lock, flags);
val = v->counter;
--
2.11.0