[PATCH 13/18] locking/atomic: s390: use s64 for atomic64
From: Mark Rutland
Date: Wed May 22 2019 - 09:27:59 EST
As a step towards making the atomic64 API use consistent types treewide,
let's have the s390 atomic64 implementation use s64 as the underlying
type for atomic64_t, rather than long, matching the generated headers.
As atomic64_read() depends on the generic defintion of atomic64_t, this
still returns long. This will be converted in a subsequent patch.
The s390-internal __atomic64_*() ops are also used by the s390 bitops,
and expect pointers to long. Since atomic64_t::counter will be converted
to s64 in a subsequent patch, pointes to this are explicitly cast to
pointers to long when passed to __atomic64_*() ops.
Otherwise, there should be no functional change as a result of this
patch.
Signed-off-by: Mark Rutland <mark.rutland@xxxxxxx>
Cc: Heiko Carstens <heiko.carstens@xxxxxxxxxx>
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Cc: Will Deacon <will.deacon@xxxxxxx>
---
arch/s390/include/asm/atomic.h | 38 +++++++++++++++++++-------------------
1 file changed, 19 insertions(+), 19 deletions(-)
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index fd20ab5d4cf7..491ad53a0d4e 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -84,9 +84,9 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
#define ATOMIC64_INIT(i) { (i) }
-static inline long atomic64_read(const atomic64_t *v)
+static inline s64 atomic64_read(const atomic64_t *v)
{
- long c;
+ s64 c;
asm volatile(
" lg %0,%1\n"
@@ -94,49 +94,49 @@ static inline long atomic64_read(const atomic64_t *v)
return c;
}
-static inline void atomic64_set(atomic64_t *v, long i)
+static inline void atomic64_set(atomic64_t *v, s64 i)
{
asm volatile(
" stg %1,%0\n"
: "=Q" (v->counter) : "d" (i));
}
-static inline long atomic64_add_return(long i, atomic64_t *v)
+static inline s64 atomic64_add_return(s64 i, atomic64_t *v)
{
- return __atomic64_add_barrier(i, &v->counter) + i;
+ return __atomic64_add_barrier(i, (long *)&v->counter) + i;
}
-static inline long atomic64_fetch_add(long i, atomic64_t *v)
+static inline s64 atomic64_fetch_add(s64 i, atomic64_t *v)
{
- return __atomic64_add_barrier(i, &v->counter);
+ return __atomic64_add_barrier(i, (long *)&v->counter);
}
-static inline void atomic64_add(long i, atomic64_t *v)
+static inline void atomic64_add(s64 i, atomic64_t *v)
{
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
- __atomic64_add_const(i, &v->counter);
+ __atomic64_add_const(i, (long *)&v->counter);
return;
}
#endif
- __atomic64_add(i, &v->counter);
+ __atomic64_add(i, (long *)&v->counter);
}
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
-static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
+static inline s64 atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
{
- return __atomic64_cmpxchg(&v->counter, old, new);
+ return __atomic64_cmpxchg((long *)&v->counter, old, new);
}
#define ATOMIC64_OPS(op) \
-static inline void atomic64_##op(long i, atomic64_t *v) \
+static inline void atomic64_##op(s64 i, atomic64_t *v) \
{ \
- __atomic64_##op(i, &v->counter); \
+ __atomic64_##op(i, (long *)&v->counter); \
} \
-static inline long atomic64_fetch_##op(long i, atomic64_t *v) \
+static inline long atomic64_fetch_##op(s64 i, atomic64_t *v) \
{ \
- return __atomic64_##op##_barrier(i, &v->counter); \
+ return __atomic64_##op##_barrier(i, (long *)&v->counter); \
}
ATOMIC64_OPS(and)
@@ -145,8 +145,8 @@ ATOMIC64_OPS(xor)
#undef ATOMIC64_OPS
-#define atomic64_sub_return(_i, _v) atomic64_add_return(-(long)(_i), _v)
-#define atomic64_fetch_sub(_i, _v) atomic64_fetch_add(-(long)(_i), _v)
-#define atomic64_sub(_i, _v) atomic64_add(-(long)(_i), _v)
+#define atomic64_sub_return(_i, _v) atomic64_add_return(-(s64)(_i), _v)
+#define atomic64_fetch_sub(_i, _v) atomic64_fetch_add(-(s64)(_i), _v)
+#define atomic64_sub(_i, _v) atomic64_add(-(s64)(_i), _v)
#endif /* __ARCH_S390_ATOMIC__ */
--
2.11.0