Re: [patch] inotify: use the idr layer

From: Robert Love
Date: Tue Sep 28 2004 - 17:16:49 EST


On Tue, 2004-09-28 at 17:58 -0400, John McCutchan wrote:
> + spin_lock(&dev->lock);
> + ret = idr_get_new(&dev->idr, watcher, &watcher->wd);
> + spin_lock(&dev->lock);
>
> I think you mean spin_unlock on the third line? Other than that I think
> it should work.

Ech, I have an updated patch.

Robert Love

idr layer support for inotify

Signed-Off-By: Robert Love <rml@xxxxxxxxxx>

drivers/char/inotify.c | 93 +++++++++++++++++++++++++++----------------------
1 files changed, 53 insertions(+), 40 deletions(-)

diff -urN linux-inotify/drivers/char/inotify.c linux/drivers/char/inotify.c
--- linux-inotify/drivers/char/inotify.c 2004-09-28 15:22:02.152818864 -0400
+++ linux/drivers/char/inotify.c 2004-09-28 15:27:03.994931888 -0400
@@ -17,15 +17,14 @@
/* TODO:
* rename inotify_watcher to inotify_watch
* need a way to connect MOVED_TO/MOVED_FROM events in user space
- * use b-tree so looking up watch by WD is faster
*/

#include <linux/bitops.h>
-#include <linux/bitmap.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
+#include <linux/idr.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/namei.h>
@@ -61,22 +60,19 @@
* For each inotify device, we need to keep track of the events queued on it,
* a list of the inodes that we are watching, and so on.
*
- * 'bitmask' holds one bit for each possible watcher descriptor: a set bit
- * implies that the given WD is valid, unset implies it is not.
- *
* This structure is protected by 'lock'. Lock ordering:
* dev->lock
* dev->wait->lock
* FIXME: Define lock ordering wrt inode and dentry locking!
*/
struct inotify_device {
- DECLARE_BITMAP(bitmask, MAX_INOTIFY_DEV_WATCHERS);
wait_queue_head_t wait;
+ struct idr idr;
struct list_head events;
struct list_head watchers;
spinlock_t lock;
unsigned int event_count;
- int nr_watches;
+ unsigned int nr_watches;
};

struct inotify_watcher {
@@ -179,10 +175,6 @@
return;

event_object_count--;
- INIT_LIST_HEAD(&kevent->list);
- kevent->event.wd = -1;
- kevent->event.mask = 0;
-
iprintk(INOTIFY_DEBUG_ALLOC, "free'd kevent %p\n", kevent);
kmem_cache_free(kevent_cache, kevent);
}
@@ -282,20 +274,30 @@
/*
* inotify_dev_get_wd - returns the next WD for use by the given dev
*
- * Caller must hold dev->lock before calling.
+ * This function sleeps.
*/
-static int inotify_dev_get_wd(struct inotify_device *dev)
+static int inotify_dev_get_wd(struct inotify_device *dev,
+ struct inotify_watcher *watcher)
{
- int wd;
+ int ret;

- if (!dev || dev->nr_watches == MAX_INOTIFY_DEV_WATCHERS)
+ if (dev->nr_watches == MAX_INOTIFY_DEV_WATCHERS)
return -1;

+repeat:
+ if (!idr_pre_get(&dev->idr, GFP_KERNEL))
+ return -1;
+
+ spin_lock(&dev->lock);
+ ret = idr_get_new(&dev->idr, watcher, &watcher->wd);
+ spin_unlock(&dev->lock);
+ if (ret == -EAGAIN) /* more memory is required, try again */
+ goto repeat;
+ if (ret == -ENOSPC) /* the idr is full! */
+ return -1;
dev->nr_watches++;
- wd = find_first_zero_bit(dev->bitmask, MAX_INOTIFY_DEV_WATCHERS);
- set_bit(wd, dev->bitmask);

- return wd;
+ return 0;
}

/*
@@ -309,7 +311,7 @@
return -1;

dev->nr_watches--;
- clear_bit(wd, dev->bitmask);
+ idr_remove(&dev->idr, wd);

return 0;
}
@@ -331,7 +333,6 @@
return NULL;
}

- watcher->wd = -1;
watcher->mask = mask;
watcher->inode = inode;
watcher->dev = dev;
@@ -339,11 +340,7 @@
INIT_LIST_HEAD(&watcher->i_list);
INIT_LIST_HEAD(&watcher->u_list);

- spin_lock(&dev->lock);
- watcher->wd = inotify_dev_get_wd(dev);
- spin_unlock(&dev->lock);
-
- if (watcher->wd < 0) {
+ if (inotify_dev_get_wd(dev, watcher)) {
iprintk(INOTIFY_DEBUG_ERRORS,
"Could not get wd for watcher %p\n", watcher);
iprintk(INOTIFY_DEBUG_ALLOC, "free'd watcher %p\n", watcher);
@@ -387,15 +384,18 @@
return NULL;
}

-static struct inotify_watcher *dev_find_wd(struct inotify_device *dev, int wd)
+/*
+ * dev_find_wd - given a (dev,wd) pair, returns the matching inotify_watcher
+ *
+ * Returns the results of looking up (dev,wd) in the idr layer. NULL is
+ * returned on error.
+ *
+ * The caller must hold dev->lock.
+ */
+static inline struct inotify_watcher *dev_find_wd(struct inotify_device *dev,
+ int wd)
{
- struct inotify_watcher *watcher;
-
- list_for_each_entry(watcher, &dev->watchers, d_list) {
- if (watcher->wd == wd)
- return watcher;
- }
- return NULL;
+ return idr_find(&dev->idr, wd);
}

static int inotify_dev_is_watching_inode(struct inotify_device *dev,
@@ -411,6 +411,11 @@
return 0;
}

+/*
+ * inotify_dev_add_watcher - add the given watcher to the given device instance
+ *
+ * Caller must hold dev->lock.
+ */
static int inotify_dev_add_watcher(struct inotify_device *dev,
struct inotify_watcher *watcher)
{
@@ -570,11 +575,11 @@
struct inotify_device *dev;
struct inode *inode;

- spin_lock(&watcher->dev->lock);
- spin_lock(&watcher->inode->i_lock);
-
- inode = watcher->inode;
dev = watcher->dev;
+ inode = watcher->inode;
+
+ spin_lock(&dev->lock);
+ spin_lock(&inode->i_lock);

if (event)
inotify_dev_queue_event(dev, watcher, event, NULL);
@@ -590,7 +595,8 @@
unref_inode(inode);
}

-static void process_umount_list(struct list_head *umount) {
+static void process_umount_list(struct list_head *umount)
+{
struct inotify_watcher *watcher, *next;

list_for_each_entry_safe(watcher, next, umount, u_list)
@@ -598,7 +604,7 @@
}

static void build_umount_list(struct list_head *head, struct super_block *sb,
- struct list_head *umount)
+ struct list_head *umount)
{
struct inode * inode;

@@ -766,7 +772,7 @@
if (!dev)
return -ENOMEM;

- bitmap_zero(dev->bitmask, MAX_INOTIFY_DEV_WATCHERS);
+ idr_init(&dev->idr);

INIT_LIST_HEAD(&dev->events);
INIT_LIST_HEAD(&dev->watchers);
@@ -898,9 +904,16 @@
{
struct inotify_watcher *watcher;

+ /*
+ * FIXME: Silly to grab dev->lock here and then drop it, when
+ * ignore_helper() grabs it anyway a few lines down.
+ */
+ spin_lock(&dev->lock);
watcher = dev_find_wd(dev, wd);
+ spin_unlock(&dev->lock);
if (!watcher)
return -EINVAL;
+
ignore_helper(watcher, 0);

return 0;