[RFC v1 3/5] seq_file: Add percpu seq_hlist helpers with locking iterators
From: Daniel Wagner
Date: Fri Feb 20 2015 - 09:40:31 EST
Introduce a variant of the seq_hlist helpers for iterating seq_hlist
are protected by perpcu spinlocks.
Signed-off-by: Daniel Wagner <daniel.wagner@xxxxxxxxxxxx>
Cc: Jeff Layton <jlayton@xxxxxxxxxxxxxxx>
Cc: "J. Bruce Fields" <bfields@xxxxxxxxxxxx>
Cc: Alexander Viro <viro@xxxxxxxxxxxxxxxxxx>
---
fs/seq_file.c | 83 ++++++++++++++++++++++++++++++++++++++++++++++++
include/linux/seq_file.h | 13 ++++++++
2 files changed, 96 insertions(+)
diff --git a/fs/seq_file.c b/fs/seq_file.c
index 555f821..56adfdb 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -966,3 +966,86 @@ seq_hlist_next_percpu(void *v, struct hlist_head __percpu *head,
return NULL;
}
EXPORT_SYMBOL(seq_hlist_next_percpu);
+
+/**
+ * seq_hlist_start_precpu_locked - start an iteration of a percpu hlist array
+ * @head: pointer to percpu array of struct hlist_heads
+ * @lock: pointer to percpu spinlock which protects @head
+ * @cpu: pointer to cpu "cursor"
+ * @pos: start position of sequence
+ *
+ * Called at seq_file->op->start().
+ */
+struct hlist_node *
+seq_hlist_start_percpu_locked(struct hlist_head __percpu *head,
+ spinlock_t __percpu *lock, int *cpu, loff_t pos)
+{
+ struct hlist_node *node;
+
+ for_each_possible_cpu(*cpu) {
+ spin_lock(per_cpu_ptr(lock, *cpu));
+ hlist_for_each(node, per_cpu_ptr(head, *cpu)) {
+ if (pos-- == 0)
+ return node;
+ }
+ spin_unlock(per_cpu_ptr(lock, *cpu));
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(seq_hlist_start_percpu_locked);
+
+/**
+ * seq_hlist_next_percpu_locked - move to the next position of the percpu hlist array
+ * @v: pointer to current hlist_node
+ * @head: pointer to percpu array of struct hlist_heads
+ * @lock: pointer to percpu spinlock which protects @head
+ * @cpu: pointer to cpu "cursor"
+ * @pos: start position of sequence
+ *
+ * Called at seq_file->op->next().
+ */
+struct hlist_node *
+seq_hlist_next_percpu_locked(void *v, struct hlist_head __percpu *head,
+ spinlock_t __percpu *lock,
+ int *cpu, loff_t *pos)
+{
+ struct hlist_node *node = v;
+
+ ++*pos;
+
+ if (node->next)
+ return node->next;
+
+ spin_unlock(per_cpu_ptr(lock, *cpu));
+
+ for (*cpu = cpumask_next(*cpu, cpu_possible_mask); *cpu < nr_cpu_ids;
+ *cpu = cpumask_next(*cpu, cpu_possible_mask)) {
+ struct hlist_head *bucket;
+
+ spin_lock(per_cpu_ptr(lock, *cpu));
+ bucket = per_cpu_ptr(head, *cpu);
+
+ if (!hlist_empty(bucket))
+ return bucket->first;
+
+ spin_unlock(per_cpu_ptr(lock, *cpu));
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(seq_hlist_next_percpu_locked);
+
+/**
+ * seq_hlist_stop_percpu_locked - stop iterating over the percpu hlist array
+ * @v: pointer to current hlist_node
+ * @lock: pointer to percpu spinlock which protects @head
+ * @cpu: pointer to cpu "cursor"
+ *
+ * Called at seq_file->op->stop().
+ */
+void
+seq_hlist_stop_percpu_locked(void *v, spinlock_t __percpu *lock, int *cpu)
+{
+ if (v)
+ spin_unlock(per_cpu_ptr(lock, *cpu));
+}
+EXPORT_SYMBOL(seq_hlist_stop_percpu_locked);
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index afbb1fd..6419ac4 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -184,4 +184,17 @@ extern struct hlist_node *seq_hlist_start_percpu(struct hlist_head __percpu *hea
extern struct hlist_node *seq_hlist_next_percpu(void *v, struct hlist_head __percpu *head, int *cpu, loff_t *pos);
+extern struct hlist_node *seq_hlist_start_percpu_locked(
+ struct hlist_head __percpu *head,
+ spinlock_t __percpu *lock,
+ int *cpu, loff_t pos);
+
+extern struct hlist_node *seq_hlist_next_percpu_locked(
+ void *v, struct hlist_head __percpu *head,
+ spinlock_t __percpu *lock,
+ int *cpu, loff_t *pos);
+
+extern void seq_hlist_stop_percpu_locked(
+ void *v, spinlock_t __percpu *lock, int *cpu);
+
#endif
--
2.1.0
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/