[PATCH v2 7/7] asm-generic, x86: add comments for atomic instrumentation

From: Dmitry Vyukov
Date: Fri May 26 2017 - 15:12:15 EST


The comments are factored out from the code changes to make them
easier to read. Add them separately to explain some non-obvious
aspects.

Signed-off-by: Dmitry Vyukov <dvyukov@xxxxxxxxxx>
Cc: Mark Rutland <mark.rutland@xxxxxxx>
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Cc: Will Deacon <will.deacon@xxxxxxx>
Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
Cc: Andrey Ryabinin <aryabinin@xxxxxxxxxxxxx>
Cc: Ingo Molnar <mingo@xxxxxxxxxx>
Cc: kasan-dev@xxxxxxxxxxxxxxxx
Cc: linux-mm@xxxxxxxxx
Cc: linux-kernel@xxxxxxxxxxxxxxx
Cc: x86@xxxxxxxxxx
---
arch/x86/include/asm/atomic.h | 7 +++++++
include/asm-generic/atomic-instrumented.h | 30 ++++++++++++++++++++++++++++++
2 files changed, 37 insertions(+)

diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index b7900346c77e..8a9e65e585db 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -23,6 +23,13 @@
*/
static __always_inline int arch_atomic_read(const atomic_t *v)
{
+ /*
+ * Note: READ_ONCE() here leads to double instrumentation as
+ * both READ_ONCE() and atomic_read() contain instrumentation.
+ * This is a deliberate choice. READ_ONCE_NOCHECK() is compiled to a
+ * non-inlined function call that considerably increases binary size
+ * and stack usage under KASAN.
+ */
return READ_ONCE((v)->counter);
}

diff --git a/include/asm-generic/atomic-instrumented.h b/include/asm-generic/atomic-instrumented.h
index 7f8eb761f896..1134af090976 100644
--- a/include/asm-generic/atomic-instrumented.h
+++ b/include/asm-generic/atomic-instrumented.h
@@ -1,3 +1,15 @@
+/*
+ * This file provides wrappers with KASAN instrumentation for atomic operations.
+ * To use this functionality an arch's atomic.h file needs to define all
+ * atomic operations with arch_ prefix (e.g. arch_atomic_read()) and include
+ * this file at the end. This file provides atomic_read() that forwards to
+ * arch_atomic_read() for actual atomic operation.
+ * Note: if an arch atomic operation is implemented by means of other atomic
+ * operations (e.g. atomic_read()/atomic_cmpxchg() loop), then it needs to use
+ * arch_ variants (i.e. arch_atomic_read()/arch_atomic_cmpxchg()) to avoid
+ * double instrumentation.
+ */
+
#ifndef _LINUX_ATOMIC_INSTRUMENTED_H
#define _LINUX_ATOMIC_INSTRUMENTED_H

@@ -339,6 +351,15 @@ static __always_inline bool atomic64_add_negative(long long i, atomic64_t *v)
return arch_atomic64_add_negative(i, v);
}

+/*
+ * In the following macros we need to be careful to not clash with arch_ macros.
+ * arch_xchg() can be defined as an extended statement expression as well,
+ * if we define a __ptr variable, and arch_xchg() also defines __ptr variable,
+ * and we pass __ptr as an argument to arch_xchg(), it will use own __ptr
+ * instead of ours. This leads to unpleasant crashes. To avoid the problem
+ * the following macros declare variables with lots of underscores.
+ */
+
#define cmpxchg(ptr, old, new) \
({ \
__typeof__(ptr) ___ptr = (ptr); \
@@ -374,6 +395,15 @@ static __always_inline bool atomic64_add_negative(long long i, atomic64_t *v)
arch_cmpxchg64_local(____ptr, (old), (new)); \
})

+/*
+ * Originally we had the following code here:
+ * __typeof__(p1) ____p1 = (p1);
+ * kasan_check_write(____p1, 2 * sizeof(*____p1));
+ * arch_cmpxchg_double(____p1, (p2), (o1), (o2), (n1), (n2));
+ * But it leads to compilation failures (see gcc issue 72873).
+ * So for now it's left non-instrumented.
+ * There are few callers of cmpxchg_double(), so it's not critical.
+ */
#define cmpxchg_double(p1, p2, o1, o2, n1, n2) \
({ \
arch_cmpxchg_double((p1), (p2), (o1), (o2), (n1), (n2)); \
--
2.13.0.219.gdb65acc882-goog