[RFC v2 2/6] lockdep: LOCKED_ACCESS: Introduce locked access class and acqchain
From: Boqun Feng
Date: Tue Feb 16 2016 - 00:58:50 EST
As a similar concept as a lock class, a locked access class is a group
of locks and related data accesses of those locks. A locked access
class also contains the structures for allocation and lookup of
acqchains and accesses.
The address of a locked access class is used as its key, we tag a group
of locks with a locked access class by setting the content of the
group's lock_class_key to be the key(i.e. address) of locked access
class, and we can do this trick because, AFAIK, lockdep only uses the
address of a lock_class_key. However, although this doesn't change the
size of a lock_class_key, this does change the alignment requirement.
And tagging a lock_class_key with a locked access class is the step #1
to use LOCKED_ACCESS for a locking mechanism.
As a similar concept as a lock class chain, an acqchain is short for
acquire chain, which is a chain of lock acquisitions. The difference
between a lock class chain and an acqchain is that a lock class chain is
keyed by the hash sum of the class keys, whereas an acqchain is keyed by
the hash sum of the acquire ips. We introduce acqchains here because in
LOCKED_ACCESS, we care more about the locations of critical sections.
This patch defines the structures for those concepts, along with the
manipulation operations. Besides, config option CONFIG_LOCKED_ACCESS
is introduced in this patch, too.
Signed-off-by: Boqun Feng <boqun.feng@xxxxxxxxx>
---
include/linux/lockdep.h | 17 ++++
kernel/locking/lockdep.c | 156 +++++++++++++++++++++++++++++++++++++
kernel/locking/lockdep_internals.h | 74 ++++++++++++++++++
lib/Kconfig.debug | 12 +++
4 files changed, 259 insertions(+)
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index c57e424..140c1c3 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -51,9 +51,26 @@ struct lockdep_subclass_key {
char __one_byte;
} __attribute__ ((__packed__));
+#ifdef CONFIG_LOCKED_ACCESS
+struct locked_access_class;
+
+struct lock_class_key {
+ union {
+ struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
+ /*
+ * Use the content of the lock_class_key to store locked access
+ * class, as lockdep only use the address of a lock_class_key.
+ * However, please note by doing so, we will change the align
+ * requirement of lock_class_key
+ */
+ struct locked_access_class *laclass;
+ };
+};
+#else
struct lock_class_key {
struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
};
+#endif
extern struct lock_class_key __lockdep_no_validate__;
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index f36df37..fdb1b8c 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -4310,3 +4310,159 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
dump_stack();
}
EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious);
+
+#ifdef CONFIG_LOCKED_ACCESS
+static void locked_access_class_init(struct locked_access_class *laclass)
+{
+ int i;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ arch_spin_lock(&laclass->lock);
+
+ if (laclass->initialized) {
+ arch_spin_unlock(&laclass->lock);
+ return;
+ }
+
+ for (i = 0; i < ACQCHAIN_HASH_SIZE; i++)
+ INIT_LIST_HEAD(laclass->acqchain_hashtable + i);
+
+ laclass->nr_acqchains = 0;
+ laclass->nr_acqchain_hlocks = 0;
+ laclass->nr_access_structs = 0;
+
+ /*
+ * Make sure the members of laclass have been initialized before
+ * setting ->initialized.
+ */
+ smp_store_release(&laclass->initialized, 1);
+ arch_spin_unlock(&laclass->lock);
+ local_irq_restore(flags);
+}
+
+static struct acqchain *lookup_acqchain(struct locked_access_class *laclass,
+ struct task_struct *task,
+ u64 acqchain_key)
+{
+ struct list_head *hash_head;
+ struct acqchain *acqchain;
+
+ hash_head = acqchainhashentry(laclass, acqchain_key);
+
+ list_for_each_entry_lockless(acqchain, hash_head, entry) {
+ if (acqchain->chain_key == acqchain_key
+ && is_same_irq_context(acqchain->irq_context,
+ task_irq_context(task)))
+ return acqchain;
+ }
+
+ return NULL;
+}
+
+static struct acqchain *add_acqchain(struct locked_access_class *laclass,
+ struct task_struct *task,
+ u64 acqchain_key)
+{
+ unsigned long flags;
+ struct acqchain *acqchain = NULL;
+ struct held_lock *hlock, *hlock_curr;
+ struct lockdep_map *instance;
+ int i, j, max_depth;
+ struct list_head *hash_head;
+
+ local_irq_save(flags);
+ arch_spin_lock(&laclass->lock);
+
+ /* Lookup again while holding the lock */
+ acqchain = lookup_acqchain(laclass, task, acqchain_key);
+
+ if (acqchain)
+ goto out;
+
+ if (laclass->nr_acqchains >= MAX_ACQCHAINS)
+ goto out;
+
+ hash_head = acqchainhashentry(laclass, acqchain_key);
+ acqchain = laclass->acqchains + laclass->nr_acqchains;
+ acqchain->chain_key = acqchain_key;
+
+ hlock = task->held_locks + task->lockdep_depth - 1;
+ acqchain->irq_context = task_irq_context(task);
+
+ for (i = task->lockdep_depth - 1; i >= 0; i--) {
+ hlock_curr = task->held_locks + i;
+ if (!is_same_irq_context(hlock_curr->irq_context,
+ acqchain->irq_context))
+ break;
+ }
+
+ i++;
+ max_depth = task->lockdep_depth - i;
+
+ j = 0;
+ if (likely(laclass->nr_acqchain_hlocks + max_depth
+ <= MAX_ACQCHAIN_HLOCKS)) {
+ acqchain->base = laclass->nr_acqchain_hlocks;
+ for (; i < task->lockdep_depth; i++) {
+ hlock_curr = task->held_locks + i;
+ instance = hlock_curr->instance;
+
+ /*
+ * The a lock instance may use its address as * ->key,
+ * in which case the lock instance doesn't belong to
+ * a locked access class.
+ */
+ if (instance != (void *)instance->key &&
+ instance->key->laclass == laclass) {
+ laclass->acqchain_hlocks[acqchain->base + j]
+ = hlock_curr->acquire_ip;
+ j++;
+ }
+ }
+ laclass->nr_acqchain_hlocks += j;
+ }
+
+ acqchain->depth = j;
+
+ /*
+ * Make sure stores to the members of acqchain observed after publish
+ * it.
+ */
+ smp_store_release(&laclass->nr_acqchains, laclass->nr_acqchains + 1);
+ INIT_LIST_HEAD(&acqchain->accesses);
+
+ /*
+ * Pair with the list_for_each_entry_lockless() in lookup_acqchain()
+ */
+ list_add_tail_rcu(&acqchain->entry, hash_head);
+out:
+ arch_spin_unlock(&laclass->lock);
+ local_irq_restore(flags);
+ return acqchain;
+}
+
+/*
+ * Lookup the acqchain with a key of @acqchain_key in the hash table of
+ * @laclass, if none exists, add a new one.
+ *
+ * Return the acqchain if one is found or if one is added, otherwise return
+ * NULL.
+ *
+ * Must be called after @laclass is initialized.
+ */
+static struct acqchain *
+lookup_or_add_acqchain(struct locked_access_class *laclass,
+ struct task_struct *task,
+ u64 acqchain_key)
+{
+ struct acqchain *acqchain = NULL;
+
+ acqchain = lookup_acqchain(laclass, task, acqchain_key);
+ if (!acqchain)
+ acqchain = add_acqchain(laclass, task, acqchain_key);
+
+ return acqchain;
+}
+
+#endif /* CONFIG_LOCKED_ACCESS */
diff --git a/kernel/locking/lockdep_internals.h b/kernel/locking/lockdep_internals.h
index 51c4b24..5e2e133 100644
--- a/kernel/locking/lockdep_internals.h
+++ b/kernel/locking/lockdep_internals.h
@@ -168,3 +168,77 @@ DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats);
# define debug_atomic_dec(ptr) do { } while (0)
# define debug_atomic_read(ptr) 0
#endif
+
+#ifdef CONFIG_LOCKED_ACCESS
+/*
+ * A chain of lock acquisitions, keyed by the hash sum of all the
+ * instruction positions of lock acquisitions
+ */
+struct acqchain {
+ u8 irq_context;
+ s8 depth;
+ s16 base;
+ /* Entry in hash table */
+ struct list_head entry;
+ u64 chain_key;
+ /* List of data accesses that happen after this chain */
+ struct list_head accesses;
+};
+
+#define iterate_acqchain_key(key, ip) \
+ (((key) << MAX_LOCKDEP_KEYS_BITS) ^ \
+ ((key) >> (64 - MAX_LOCKDEP_KEYS_BITS)) ^ \
+ (ip))
+
+#define MAX_ACQCHAINS_BITS 16
+#define MAX_ACQCHAINS (1UL << MAX_ACQCHAINS_BITS)
+#define MAX_ACQCHAIN_HLOCKS (MAX_ACQCHAINS * 5)
+
+#define ACQCHAIN_HASH_BITS (MAX_ACQCHAINS_BITS-1)
+#define ACQCHAIN_HASH_SIZE (1UL << ACQCHAIN_HASH_BITS)
+#define __acqchainhashfn(chain) hash_long(chain, ACQCHAIN_HASH_BITS)
+#define acqchainhashentry(lad, chain) \
+ (lad->acqchain_hashtable + __acqchainhashfn((chain)))
+
+#define MAX_LOCKED_ACCESS_STRUCTS (1UL << 16)
+
+/* Records of data accesses in LOCKED_ACCESS */
+struct locked_access_struct {
+ struct list_head list;
+ struct locked_access_location *loc;
+ int type;
+};
+
+/*
+ * locked_access_class represent a group of critical sections and related data
+ * accesses. Locked access class should be only defined statically, and the
+ * address of a locked_access_class is used as the 'key' of a locked access
+ * class.
+ */
+struct locked_access_class {
+ const char *name;
+ /* Hash table of acqchains, for lookup */
+ struct list_head acqchain_hashtable[ACQCHAIN_HASH_SIZE];
+ /* Storage of acqchains, for allocation */
+ struct acqchain acqchains[MAX_ACQCHAINS];
+ long nr_acqchains;
+ /* Storage of acquired IPs of acqchains, for allocation */
+ unsigned long acqchain_hlocks[MAX_ACQCHAIN_HLOCKS];
+ long nr_acqchain_hlocks;
+ /* Storage of data accesses, for allocation */
+ struct locked_access_struct access_structs[MAX_LOCKED_ACCESS_STRUCTS];
+ long nr_access_structs;
+ arch_spinlock_t lock;
+ int initialized;
+};
+
+#define INIT_LOCKED_ACCESS_DATA(_name) \
+ { \
+ .name = #_name, \
+ .lock = __ARCH_SPIN_LOCK_UNLOCKED, \
+ .initialized = 0, \
+ .nr_acqchains = 0, \
+ .nr_acqchain_hlocks = 0,\
+ .nr_access_structs = 0, \
+ }
+#endif /* CONFIG_LOCKED_ACCESS */
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index ecb9e75..178f288 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1065,6 +1065,18 @@ config LOCK_STAT
CONFIG_LOCK_STAT defines "contended" and "acquired" lock events.
(CONFIG_LOCKDEP defines "acquire" and "release" events.)
+config LOCKED_ACCESS
+ bool
+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
+ select DEBUG_LOCK_ALLOC
+ default n
+ help
+ Provide a way to correlate data accesses with critical sections,
+ LOCKED_ACCESS is designed for the users of locking mechanisms to see
+ which data access happens inside which lock critical section. This
+ could help the users of a lock mechanism to gain more detailed
+ information about their lock usage.
+
config DEBUG_LOCKDEP
bool "Lock dependency engine debugging"
depends on DEBUG_KERNEL && LOCKDEP
--
2.7.1