On 06/04/2017 10:46, Davidlohr Bueso wrote:
+__range_read_lock_common(struct range_rwlock_tree *tree,
+ struct range_rwlock *lock, long state)
+{
+ struct interval_tree_node *node;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tree->lock, flags);
+ range_lock_set_reader(lock);
+
+ if (!__range_intersects_intree(tree, lock))
+ goto insert;
+
+ range_interval_tree_foreach(node, &tree->root,
+ lock->node.start, lock->node.last) {
+ struct range_rwlock *blocked_lock;
+ blocked_lock = range_entry(node, struct range_rwlock, node);
+
+ if (!range_lock_is_reader(blocked_lock))
+ lock->blocking_ranges++;
+ }
+insert:
+ __range_tree_insert(tree, lock);
+
+ lock->waiter = current;
Hi Davidlohr,
Setting lock->waiter after calling range_lock_set_reader() is resetting
the reader flag. Moving the call to range_lock_set_reader() here fixes that.
+ spin_unlock_irqrestore(&tree->lock, flags);
+
+ return wait_for_ranges(tree, lock, state);
+}
+int range_read_trylock(struct range_rwlock_tree *tree, struct range_rwlock *lock)
+{
+ int ret = true;
+ unsigned long flags;
+ struct interval_tree_node *node;
+
+ spin_lock_irqsave(&tree->lock, flags);
+
+ if (!__range_intersects_intree(tree, lock))
+ goto insert;
+
+ /*
+ * We have overlapping ranges in the tree, ensure that we can
+ * in fact share the lock.
+ */
+ range_interval_tree_foreach(node, &tree->root,
+ lock->node.start, lock->node.last) {
+ struct range_rwlock *blocked_lock;
+ blocked_lock = range_entry(node, struct range_rwlock, node);
+
+ if (!range_lock_is_reader(blocked_lock)) {
+ ret = false;
+ goto unlock;
+ }
+ }
+insert:
+ range_lock_set_reader(lock);
Here, the lock->waiter field should have been set to current before
calling range_lock_set_reader()