[RFC v1 5/5] locks: Use blocked_lock_lock only to protect blocked_hash

From: Daniel Wagner
Date: Fri Feb 20 2015 - 09:40:54 EST


blocked_lock_lock and file_lock_lock is used to protect file_lock's
fl_link, fl_block, fl_next, blocked_hash and the percpu
file_lock_list.

The plan is to reorganize the usage of the locks and what they protect
so that the usage of the global blocked_lock_lock is reduced.

Whenever we insert a new lock we are going to grab besides the
flc_lock also the corresponding percpu file_lock_lock. The global
blocked_lock_lock is only used when blocked_hash is involved.

file_lock_lock protects now file_lock_list and fl_link, fl_block and
fl_next allone. That means we need to define which file_lock_lock is
used for all waiters. Luckely, fl_link_cpu can be reused for fl_block
and fl_next.

Signed-off-by: Daniel Wagner <daniel.wagner@xxxxxxxxxxxx>
Cc: Jeff Layton <jlayton@xxxxxxxxxxxxxxx>
Cc: "J. Bruce Fields" <bfields@xxxxxxxxxxxx>
Cc: Alexander Viro <viro@xxxxxxxxxxxxxxxxxx>
---
fs/locks.c | 78 ++++++++++++++++++++++++++++++++++----------------------------
1 file changed, 43 insertions(+), 35 deletions(-)

diff --git a/fs/locks.c b/fs/locks.c
index 20ed00a..73b99ac 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -161,6 +161,20 @@ int lease_break_time = 45;
* keep a list on each CPU, with each list protected by its own spinlock via
* the file_lock_lock. Note that alterations to the list also require that
* the relevant flc_lock is held.
+ *
+ * In addition, it also protects the fl->fl_block list, and the fl->fl_next
+ * pointer for file_lock structures that are acting as lock requests (in
+ * contrast to those that are acting as records of acquired locks).
+ *
+ * file_lock structures acting as lock requests (waiters) use the same
+ * spinlock as the those acting as lock holder (blocker). E.g. the
+ * blocker is initially added to the file_lock_list living on CPU 0,
+ * all waiters on that blocker are serialized via CPU 0 (see
+ * fl_link_cpu usage).
+ *
+ * In particular, adding an entry to the fl_block list requires that you hold
+ * both the flc_lock and the blocked_lock_lock (acquired in that order).
+ * Deleting an entry from the list however only requires the file_lock_lock.
*/
static DEFINE_PER_CPU(spinlock_t, file_lock_lock);
static DEFINE_PER_CPU(struct hlist_head, file_lock_list);
@@ -182,19 +196,6 @@ static DEFINE_HASHTABLE(blocked_hash, BLOCKED_HASH_BITS);
/*
* This lock protects the blocked_hash. Generally, if you're accessing it, you
* want to be holding this lock.
- *
- * In addition, it also protects the fl->fl_block list, and the fl->fl_next
- * pointer for file_lock structures that are acting as lock requests (in
- * contrast to those that are acting as records of acquired locks).
- *
- * Note that when we acquire this lock in order to change the above fields,
- * we often hold the flc_lock as well. In certain cases, when reading the fields
- * protected by this lock, we can skip acquiring it iff we already hold the
- * flc_lock.
- *
- * In particular, adding an entry to the fl_block list requires that you hold
- * both the flc_lock and the blocked_lock_lock (acquired in that order).
- * Deleting an entry from the list however only requires the file_lock_lock.
*/
static DEFINE_SPINLOCK(blocked_lock_lock);

@@ -602,7 +603,7 @@ static void locks_delete_global_blocked(struct file_lock *waiter)
/* Remove waiter from blocker's block list.
* When blocker ends up pointing to itself then the list is empty.
*
- * Must be called with blocked_lock_lock held.
+ * Must be called with file_lock_lock held.
*/
static void __locks_delete_block(struct file_lock *waiter)
{
@@ -612,7 +613,7 @@ static void __locks_delete_block(struct file_lock *waiter)

/* Posix block variant of __locks_delete_block.
*
- * Must be called with blocked_lock_lock held.
+ * Must be called with file_lock_lock held.
*/
static void __locks_delete_posix_block(struct file_lock *waiter)
{
@@ -622,16 +623,18 @@ static void __locks_delete_posix_block(struct file_lock *waiter)

static void locks_delete_block(struct file_lock *waiter)
{
- spin_lock(&blocked_lock_lock);
+ spin_lock(per_cpu_ptr(&file_lock_lock, waiter->fl_link_cpu));
__locks_delete_block(waiter);
- spin_unlock(&blocked_lock_lock);
+ spin_unlock(per_cpu_ptr(&file_lock_lock, waiter->fl_link_cpu));
}

static void locks_delete_posix_block(struct file_lock *waiter)
{
+ spin_lock(per_cpu_ptr(&file_lock_lock, waiter->fl_link_cpu));
spin_lock(&blocked_lock_lock);
__locks_delete_posix_block(waiter);
spin_unlock(&blocked_lock_lock);
+ spin_unlock(per_cpu_ptr(&file_lock_lock, waiter->fl_link_cpu));
}

/* Insert waiter into blocker's block list.
@@ -639,22 +642,23 @@ static void locks_delete_posix_block(struct file_lock *waiter)
* the order they blocked. The documentation doesn't require this but
* it seems like the reasonable thing to do.
*
- * Must be called with both the flc_lock and blocked_lock_lock held. The
- * fl_block list itself is protected by the blocked_lock_lock, but by ensuring
+ * Must be called with both the flc_lock and file_lock_lock held. The
+ * fl_block list itself is protected by the file_lock_lock, but by ensuring
* that the flc_lock is also held on insertions we can avoid taking the
- * blocked_lock_lock in some cases when we see that the fl_block list is empty.
+ * file_lock_lock in some cases when we see that the fl_block list is empty.
*/
static void __locks_insert_block(struct file_lock *blocker,
struct file_lock *waiter)
{
BUG_ON(!list_empty(&waiter->fl_block));
+ waiter->fl_link_cpu = blocker->fl_link_cpu;
waiter->fl_next = blocker;
list_add_tail(&waiter->fl_block, &blocker->fl_block);
}

/* Posix block variant of __locks_insert_block.
*
- * Must be called with flc_lock and blocked_lock_lock held.
+ * Must be called with flc_lock and file_lock_lock held.
*/
static void __locks_insert_posix_block(struct file_lock *blocker,
struct file_lock *waiter)
@@ -668,9 +672,9 @@ static void __locks_insert_posix_block(struct file_lock *blocker,
static void locks_insert_block(struct file_lock *blocker,
struct file_lock *waiter)
{
- spin_lock(&blocked_lock_lock);
+ spin_lock(per_cpu_ptr(&file_lock_lock, blocker->fl_link_cpu));
__locks_insert_block(blocker, waiter);
- spin_unlock(&blocked_lock_lock);
+ spin_unlock(per_cpu_ptr(&file_lock_lock, blocker->fl_link_cpu));
}

/*
@@ -681,31 +685,33 @@ static void locks_insert_block(struct file_lock *blocker,
static void locks_wake_up_blocks(struct file_lock *blocker)
{
/*
- * Avoid taking global lock if list is empty. This is safe since new
+ * Avoid taking lock if list is empty. This is safe since new
* blocked requests are only added to the list under the flc_lock, and
* the flc_lock is always held here. Note that removal from the fl_block
* list does not require the flc_lock, so we must recheck list_empty()
- * after acquiring the blocked_lock_lock.
+ * after acquiring the file_lock_lock.
*/
if (list_empty(&blocker->fl_block))
return;

- spin_lock(&blocked_lock_lock);
+ spin_lock(per_cpu_ptr(&file_lock_lock, blocker->fl_link_cpu));
while (!list_empty(&blocker->fl_block)) {
struct file_lock *waiter;

waiter = list_first_entry(&blocker->fl_block,
struct file_lock, fl_block);
- if (IS_POSIX(blocker))
+ if (IS_POSIX(blocker)) {
+ spin_lock(&blocked_lock_lock);
__locks_delete_posix_block(waiter);
- else
+ spin_unlock(&blocked_lock_lock);
+ } else
__locks_delete_block(waiter);
if (waiter->fl_lmops && waiter->fl_lmops->lm_notify)
waiter->fl_lmops->lm_notify(waiter);
else
wake_up(&waiter->fl_wait);
}
- spin_unlock(&blocked_lock_lock);
+ spin_unlock(per_cpu_ptr(&file_lock_lock, blocker->fl_link_cpu));
}

static void
@@ -732,9 +738,11 @@ static void
locks_delete_lock_ctx(struct file_lock *fl, struct list_head *dispose)
{
locks_unlink_lock_ctx(fl);
- if (dispose)
+ if (dispose) {
+ spin_lock(per_cpu_ptr(&file_lock_lock, fl->fl_link_cpu));
list_add(&fl->fl_list, dispose);
- else
+ spin_unlock(per_cpu_ptr(&file_lock_lock, fl->fl_link_cpu));
+ } else
locks_free_lock(fl);
}

@@ -1004,12 +1012,14 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
* locks list must be done while holding the same lock!
*/
error = -EDEADLK;
+ spin_lock(per_cpu_ptr(&file_lock_lock, fl->fl_link_cpu));
spin_lock(&blocked_lock_lock);
if (likely(!posix_locks_deadlock(request, fl))) {
error = FILE_LOCK_DEFERRED;
__locks_insert_posix_block(fl, request);
}
spin_unlock(&blocked_lock_lock);
+ spin_unlock(per_cpu_ptr(&file_lock_lock, fl->fl_link_cpu));
goto out;
}
}
@@ -2490,12 +2500,14 @@ posix_unblock_lock(struct file_lock *waiter)
{
int status = 0;

+ spin_lock(per_cpu_ptr(&file_lock_lock, waiter->fl_link_cpu));
spin_lock(&blocked_lock_lock);
if (waiter->fl_next)
__locks_delete_posix_block(waiter);
else
status = -ENOENT;
spin_unlock(&blocked_lock_lock);
+ spin_unlock(per_cpu_ptr(&file_lock_lock, waiter->fl_link_cpu));
return status;
}
EXPORT_SYMBOL(posix_unblock_lock);
@@ -2622,12 +2634,10 @@ static int locks_show(struct seq_file *f, void *v)
}

static void *locks_start(struct seq_file *f, loff_t *pos)
- __acquires(&blocked_lock_lock)
{
struct locks_iterator *iter = f->private;

iter->li_pos = *pos + 1;
- spin_lock(&blocked_lock_lock);
return seq_hlist_start_percpu_locked(&file_lock_list, &file_lock_lock,
&iter->li_cpu, *pos);
}
@@ -2642,12 +2652,10 @@ static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
}

static void locks_stop(struct seq_file *f, void *v)
- __releases(&blocked_lock_lock)
{
struct locks_iterator *iter = f->private;

seq_hlist_stop_percpu_locked(v, &file_lock_lock, &iter->li_cpu);
- spin_unlock(&blocked_lock_lock);
}

static const struct seq_operations locks_seq_operations = {
--
2.1.0

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/