Since rpciod is handling most tcp rpc calls anyway the impact of this
appears minimal and I've copied a fair chunk of data around this way as
well as built Gnome over it (Linux nfs client <-> Linux unfsd tcp).
diff -u --recursive --new-file --exclude-from ../exclude linux.vanilla/net/sunrpc/auth_unix.c linux/net/sunrpc/auth_unix.c
--- linux.vanilla/net/sunrpc/auth_unix.c Sun Mar 22 23:06:14 1998
+++ linux/net/sunrpc/auth_unix.c Sun Aug 9 20:31:24 1998
@@ -178,6 +178,11 @@
base = p++;
*p++ = htonl(jiffies/HZ);
#ifndef DONT_FILLIN_HOSTNAME
+ /*
+ * Problem: The UTS name could change under us. We can't lock
+ * here to handle this. On the other hand we can't really
+ * go building a bad RPC!
+ */
if ((n = strlen((char *) system_utsname.nodename)) > UNX_MAXNODENAME)
n = UNX_MAXNODENAME;
*p++ = htonl(n);
diff -u --recursive --new-file --exclude-from ../exclude linux.vanilla/net/sunrpc/sched.c linux/net/sunrpc/sched.c
--- linux.vanilla/net/sunrpc/sched.c Tue Jul 21 14:19:32 1998
+++ linux/net/sunrpc/sched.c Sun Aug 9 21:03:09 1998
@@ -176,6 +176,21 @@
task->tk_flags |= RPC_TASK_RUNNING;
}
+
+/*
+ * For other people who may need to wake the I/O daemon
+ * but should (for now) know nothing about its innards
+ */
+
+void rpciod_wake_up(void)
+{
+ if(rpciod_pid==0)
+ {
+ printk(KERN_ERR "rpciod: wot no daemon?\n");
+ }
+ wake_up(&rpciod_idle);
+}
+
/*
* Prepare for sleeping on a wait queue.
* By always appending tasks to the list we ensure FIFO behavior.
@@ -795,6 +810,7 @@
dprintk("RPC: rpciod back to sleep\n");
interruptible_sleep_on(&rpciod_idle);
dprintk("RPC: switch to rpciod\n");
+ rpciod_tcp_dispatcher();
rounds = 0;
}
restore_flags(oldflags);
diff -u --recursive --new-file --exclude-from ../exclude linux.vanilla/net/sunrpc/xprt.c linux/net/sunrpc/xprt.c
--- linux.vanilla/net/sunrpc/xprt.c Tue Aug 4 13:52:20 1998
+++ linux/net/sunrpc/xprt.c Sun Aug 9 21:08:26 1998
@@ -324,6 +324,12 @@
fput(xprt->file);
else
sock_release(xprt->sock);
+ /*
+ * TCP doesnt require the rpciod now - other things may
+ * but rpciod handles that not us.
+ */
+ if(xprt->stream)
+ rpciod_down();
}
/*
@@ -700,19 +706,17 @@
static struct rpc_xprt *rpc_xprt_pending = NULL; /* Chain by rx_pending of rpc_xprt's */
-static struct tq_struct rpc_tcp_tqueue = { 0, 0, 0, 0 };
-
-
/*
- * This is protected from tcp_data_ready by the bh atomicity guarantees
+ * This is protected from tcp_data_ready and the stack as its run
+ * inside of the RPC I/O daemon
*/
-static void tcp_rpc_bh_run(void)
+void rpciod_tcp_dispatcher(void)
{
struct rpc_xprt *xprt;
int result;
- dprintk("tcp_rpc_bh_run: Queue Running\n");
+ dprintk("rpciod_tcp_dispatcher: Queue Running\n");
/*
* Empty each pending socket
@@ -725,7 +729,7 @@
rpc_xprt_pending=xprt->rx_pending;
xprt->rx_pending_flag=0;
- dprintk("tcp_rpc_run_bh: Processing %p\n", xprt);
+ dprintk("rpciod_tcp_dispatcher: Processing %p\n", xprt);
do
{
@@ -750,12 +754,9 @@
}
-static void tcp_rpc_bh_queue(void)
+extern inline void tcp_rpciod_queue(void)
{
- rpc_tcp_tqueue.routine=(void *)(void *)tcp_rpc_bh_run;
- queue_task(&rpc_tcp_tqueue, &tq_immediate);
- dprintk("RPC: tcp_rpc_bh_queue: immediate op queued\n");
- mark_bh(IMMEDIATE_BH);
+ rpciod_wake_up();
}
/*
@@ -787,7 +788,7 @@
{
dprintk("RPC: xprt queue\n");
if(rpc_xprt_pending==NULL)
- tcp_rpc_bh_queue();
+ tcp_rpciod_queue();
xprt->rx_pending_flag=1;
xprt->rx_pending=rpc_xprt_pending;
rpc_xprt_pending=xprt;
@@ -1279,6 +1280,12 @@
xprt->free = xprt->slot;
dprintk("RPC: created transport %p\n", xprt);
+
+ /*
+ * TCP requires the rpc I/O daemon is present
+ */
+ if(proto==IPPROTO_TCP)
+ rpciod_up();
return xprt;
}
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.rutgers.edu
Please read the FAQ at http://www.altern.org/andrebalsa/doc/lkml-faq.html