diff -u --recursive --new-file linux-2.2.17-nfsv3-0.22.2/include/linux/sunrpc/sched.h linux-2.2.17-nfsv3/include/linux/sunrpc/sched.h --- linux-2.2.17-nfsv3-0.22.2/include/linux/sunrpc/sched.h Tue Jul 18 00:37:58 2000 +++ linux-2.2.17-nfsv3/include/linux/sunrpc/sched.h Mon Jul 17 08:36:46 2000 @@ -12,6 +12,7 @@ #include #include #include +#include #include /* @@ -60,6 +61,7 @@ * action next procedure for async tasks * exit exit async task and report to caller */ + void (*tk_timeout_fn)(struct rpc_task *); void (*tk_callback)(struct rpc_task *); void (*tk_action)(struct rpc_task *); void (*tk_exit)(struct rpc_task *); @@ -174,28 +176,30 @@ void rpc_show_tasks(void); #endif -extern __inline__ void * +static __inline__ void * rpc_malloc(struct rpc_task *task, unsigned int size) { return rpc_allocate(task->tk_flags, size); } -extern __inline__ void +static __inline__ void rpc_exit(struct rpc_task *task, int status) { task->tk_status = status; task->tk_action = NULL; } -extern __inline__ void +static __inline__ void rpc_kill(struct rpc_task *task, int status) { rpc_exit(task, status); rpc_wake_up_task(task); } +extern spinlock_t rpc_queue_lock; + #ifdef RPC_DEBUG -extern __inline__ char * +static __inline__ char * rpc_qname(struct rpc_wait_queue *q) { return q? (q->name? q->name : "unknown") : "none"; diff -u --recursive --new-file linux-2.2.17-nfsv3-0.22.2/net/sunrpc/sched.c linux-2.2.17-nfsv3/net/sunrpc/sched.c --- linux-2.2.17-nfsv3-0.22.2/net/sunrpc/sched.c Tue Jul 18 00:37:58 2000 +++ linux-2.2.17-nfsv3/net/sunrpc/sched.c Tue Jul 18 00:22:51 2000 @@ -31,7 +31,7 @@ */ #define GFP_RPC GFP_NFS -static void __rpc_default_timer(struct rpc_task *task); +static void rpc_default_timer(struct rpc_task *task); static void rpciod_killall(void); /* @@ -68,6 +68,12 @@ static int rpc_inhibit = 0; /* + * Spinlock for wait queues. Access to the latter has to be interrupt-safe + * since we want to wake up tasks from sk->write_space(). + */ +spinlock_t rpc_queue_lock = SPIN_LOCK_UNLOCKED; + +/* * This is the last-ditch buffer for NFS swap requests */ static u32 swap_buffer[PAGE_SIZE >> 2]; @@ -86,6 +92,42 @@ } /* + * Disable the timer for a given RPC task. Should be called with + * rpc_queue_lock and bh_disabled in order to avoid races within + * rpc_run_timer(). + */ +static inline void +__rpc_disable_timer(struct rpc_task *task) +{ + dprintk("RPC: %4d disabling timer\n", task->tk_pid); + task->tk_timeout_fn = NULL; + task->tk_timeout = 0; +} + +/* + * Run a timeout function. + * We use the callback in order to allow __rpc_wake_up_task() + * and friends to disable the timer synchronously on SMP systems + * without calling del_timer_sync(). The latter could cause a + * deadlock if called while we're holding spinlocks... + */ +static void +rpc_run_timer(struct rpc_task *task) +{ + void (*callback)(struct rpc_task *); + unsigned long oldflags; + + spin_lock_irqsave(&rpc_queue_lock, oldflags); + callback = task->tk_timeout_fn; + task->tk_timeout_fn = NULL; + spin_unlock_irqrestore(&rpc_queue_lock, oldflags); + if (callback) { + dprintk("RPC: %4d running timer\n", task->tk_pid); + callback(task); + } +} + +/* * Set up a timer for the current task. */ static inline void @@ -97,16 +139,11 @@ dprintk("RPC: %4d setting alarm for %lu ms\n", task->tk_pid, task->tk_timeout * 1000 / HZ); - if (timer_pending(&task->tk_timer)) { - printk(KERN_ERR "RPC: Bug! Overwriting active timer\n"); - del_timer(&task->tk_timer); - } - if (!timer) - timer = __rpc_default_timer; - task->tk_timer.expires = jiffies + task->tk_timeout; - task->tk_timer.data = (unsigned long) task; - task->tk_timer.function = (void (*)(unsigned long)) timer; - add_timer(&task->tk_timer); + if (timer) + task->tk_timeout_fn = timer; + else + task->tk_timeout_fn = rpc_default_timer; + mod_timer(&task->tk_timer, jiffies + task->tk_timeout); } /* @@ -114,22 +151,24 @@ */ void rpc_add_timer(struct rpc_task *task, rpc_action timer) { - start_bh_atomic(); + unsigned long oldflags; + + spin_lock_irqsave(&rpc_queue_lock, oldflags); if (!(RPC_IS_RUNNING(task) || task->tk_wakeup)) __rpc_add_timer(task, timer); - end_bh_atomic(); + spin_unlock_irqrestore(&rpc_queue_lock, oldflags); } /* * Delete any timer for the current task. */ static inline void -__rpc_del_timer(struct rpc_task *task) +rpc_delete_timer(struct rpc_task *task) { - dprintk("RPC: %4d deleting timer\n", task->tk_pid); - if (timer_pending(&task->tk_timer)) + if (timer_pending(&task->tk_timer)) { + dprintk("RPC: %4d deleting timer\n", task->tk_pid); del_timer(&task->tk_timer); - task->tk_timeout = 0; + } } /* @@ -167,11 +206,12 @@ int rpc_add_wait_queue(struct rpc_wait_queue *q, struct rpc_task *task) { + unsigned long oldflags; int result; - start_bh_atomic(); + spin_lock_irqsave(&rpc_queue_lock, oldflags); result = __rpc_add_wait_queue(q, task); - end_bh_atomic(); + spin_unlock_irqrestore(&rpc_queue_lock, oldflags); return result; } @@ -197,21 +237,23 @@ void rpc_remove_wait_queue(struct rpc_task *task) { + unsigned long oldflags; + if (!task->tk_rpcwait) return; - start_bh_atomic(); + spin_lock_irqsave(&rpc_queue_lock, oldflags); __rpc_remove_wait_queue(task); - end_bh_atomic(); + spin_unlock_irqrestore(&rpc_queue_lock, oldflags); } /* * Make an RPC task runnable. * * Note: If the task is ASYNC, this must be called with - * bh_atomic to protect the wait queue operation. + * spin lock held in order to protect the wait queue operation. */ static inline void -rpc_make_runnable(struct rpc_task *task) +__rpc_make_runnable(struct rpc_task *task) { if (task->tk_timeout) { printk(KERN_ERR "RPC: task w/ running timer in rpc_make_runnable!!\n"); @@ -239,14 +281,14 @@ * Place a newly initialized task on the schedq. */ static inline void -rpc_schedule_run(struct rpc_task *task) +__rpc_schedule_run(struct rpc_task *task) { /* Don't run a child twice! */ if (RPC_IS_ACTIVATED(task)) return; task->tk_active = 1; task->tk_sleeping = 1; - rpc_make_runnable(task); + __rpc_make_runnable(task); } @@ -305,25 +347,29 @@ rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, rpc_action action, rpc_action timer) { + unsigned long oldflags; + /* * Protect the queue operations. */ - start_bh_atomic(); + spin_lock_irqsave(&rpc_queue_lock, oldflags); __rpc_sleep_on(q, task, action, timer); - end_bh_atomic(); + spin_unlock_irqrestore(&rpc_queue_lock, oldflags); } void rpc_sleep_locked(struct rpc_wait_queue *q, struct rpc_task *task, rpc_action action, rpc_action timer) { + unsigned long oldflags; + /* * Protect the queue operations. */ - start_bh_atomic(); + spin_lock_irqsave(&rpc_queue_lock, oldflags); __rpc_sleep_on(q, task, action, timer); __rpc_lock_task(task); - end_bh_atomic(); + spin_unlock_irqrestore(&rpc_queue_lock, oldflags); } /* @@ -354,7 +400,7 @@ if (RPC_IS_RUNNING(task)) return; - __rpc_del_timer(task); + __rpc_disable_timer(task); /* If the task has been locked, then set tk_wakeup so that * rpc_unlock_task() wakes us up... */ @@ -366,7 +412,7 @@ if (task->tk_rpcwait != &schedq) __rpc_remove_wait_queue(task); - rpc_make_runnable(task); + __rpc_make_runnable(task); dprintk("RPC: __rpc_wake_up done\n"); } @@ -375,12 +421,12 @@ * Default timeout handler if none specified by user */ static void -__rpc_default_timer(struct rpc_task *task) +rpc_default_timer(struct rpc_task *task) { dprintk("RPC: %4d timeout (default timer)\n", task->tk_pid); task->tk_status = -ETIMEDOUT; task->tk_timeout = 0; - __rpc_wake_up_task(task); + rpc_wake_up_task(task); } /* @@ -389,11 +435,14 @@ void rpc_wake_up_task(struct rpc_task *task) { + unsigned long oldflags; + if (RPC_IS_RUNNING(task)) return; - start_bh_atomic(); + + spin_lock_irqsave(&rpc_queue_lock, oldflags); __rpc_wake_up_task(task); - end_bh_atomic(); + spin_unlock_irqrestore(&rpc_queue_lock, oldflags); } /* @@ -415,10 +464,11 @@ rpc_wake_up_next(struct rpc_wait_queue *queue) { struct rpc_task *task; + unsigned long oldflags; - start_bh_atomic(); + spin_lock_irqsave(&rpc_queue_lock, oldflags); task = __rpc_wake_up_next(queue); - end_bh_atomic(); + spin_unlock_irqrestore(&rpc_queue_lock, oldflags); return task; } @@ -435,9 +485,11 @@ void rpc_wake_up(struct rpc_wait_queue *queue) { - start_bh_atomic(); + unsigned long oldflags; + + spin_lock_irqsave(&rpc_queue_lock, oldflags); __rpc_wake_up(queue); - end_bh_atomic(); + spin_unlock_irqrestore(&rpc_queue_lock, oldflags); } /* @@ -457,9 +509,11 @@ void rpc_wake_up_status(struct rpc_wait_queue *queue, int status) { - start_bh_atomic(); + unsigned long oldflags; + + spin_lock_irqsave(&rpc_queue_lock, oldflags); __rpc_wake_up_status(queue, status); - end_bh_atomic(); + spin_unlock_irqrestore(&rpc_queue_lock, oldflags); } /* @@ -487,28 +541,30 @@ void rpc_unlock_task(struct rpc_task *task) { - start_bh_atomic(); + unsigned long oldflags; + + spin_lock_irqsave(&rpc_queue_lock, oldflags); __rpc_unlock_task(task); - end_bh_atomic(); + spin_unlock_irqrestore(&rpc_queue_lock, oldflags); } /* * Run a task at a later time */ -static void __rpc_atrun(struct rpc_task *); +static void rpc_atrun(struct rpc_task *); void rpc_delay(struct rpc_task *task, unsigned long delay) { task->tk_timeout = delay; - rpc_sleep_on(&delay_queue, task, NULL, __rpc_atrun); + rpc_sleep_on(&delay_queue, task, NULL, rpc_atrun); } static void -__rpc_atrun(struct rpc_task *task) +rpc_atrun(struct rpc_task *task) { task->tk_status = 0; task->tk_timeout = 0; - __rpc_wake_up_task(task); + rpc_wake_up_task(task); } /* @@ -517,6 +573,7 @@ static int __rpc_execute(struct rpc_task *task) { + unsigned long oldflags; int status = 0; dprintk("RPC: %4d rpc_execute flgs %x\n", @@ -554,6 +611,10 @@ * by someone else. */ if (RPC_IS_RUNNING(task)) { + /* + * Garbage collection of pending timers... + */ + rpc_delete_timer(task); if (!task->tk_action) break; task->tk_action(task); @@ -567,23 +628,25 @@ * 27/9/99: The above has been attempted fixed by * introduction of task->tk_sleeping. */ - start_bh_atomic(); + spin_lock_irqsave(&rpc_queue_lock, oldflags); if (!RPC_IS_RUNNING(task)) { task->tk_sleeping = 1; if (RPC_IS_ASYNC(task)) { - end_bh_atomic(); + spin_unlock_irqrestore(&rpc_queue_lock, oldflags); return 0; } } else task->tk_sleeping = 0; - end_bh_atomic(); + spin_unlock_irqrestore(&rpc_queue_lock, oldflags); while (RPC_IS_SLEEPING(task)) { /* sync task: sleep here */ dprintk("RPC: %4d sync task going to sleep\n", task->tk_pid); - if (current->pid == rpciod_pid) - printk(KERN_ERR "RPC: rpciod waiting on sync task!\n"); + if (current->pid == rpciod_pid) { + printk(KERN_ERR "RPC: rpciod waiting on sync task %4d!\n", task->tk_pid); + rpc_show_tasks(); + } __wait_event(task->tk_wait, !RPC_IS_SLEEPING(task)); dprintk("RPC: %4d sync task resuming\n", task->tk_pid); @@ -660,6 +723,7 @@ __rpc_schedule(void) { struct rpc_task *task; + unsigned long oldflags; int count = 0; dprintk("RPC: rpc_schedule enter\n"); @@ -667,20 +731,20 @@ /* Ensure equal rights for tcp tasks... */ rpciod_tcp_dispatcher(); - start_bh_atomic(); + spin_lock_irqsave(&rpc_queue_lock, oldflags); if (!(task = schedq.task)) { - end_bh_atomic(); + spin_unlock_irqrestore(&rpc_queue_lock, oldflags); break; } if (task->tk_lock) { - end_bh_atomic(); + spin_unlock_irqrestore(&rpc_queue_lock, oldflags); printk(KERN_ERR "RPC: Locked task was scheduled !!!!\n"); rpc_debug = ~0; rpc_show_tasks(); break; } __rpc_remove_wait_queue(task); - end_bh_atomic(); + spin_unlock_irqrestore(&rpc_queue_lock, oldflags); __rpc_execute(task); @@ -763,6 +827,9 @@ { memset(task, 0, sizeof(*task)); init_timer(&task->tk_timer); + task->tk_timer.data = (unsigned long) task; + task->tk_timer.function = (void (*)(unsigned long)) rpc_run_timer; + task->tk_client = clnt; task->tk_exit = callback; task->tk_flags = flags; @@ -827,6 +894,7 @@ rpc_release_task(struct rpc_task *task) { struct rpc_task *next, *prev; + unsigned long oldflags; dprintk("RPC: %4d release task\n", task->tk_pid); @@ -851,17 +919,20 @@ task->tk_next_task = task->tk_prev_task = NULL; /* Protect the execution below. */ - start_bh_atomic(); + spin_lock_irqsave(&rpc_queue_lock, oldflags); - /* Delete any running timer */ - __rpc_del_timer(task); + /* Disable timer to prevent zombie wakeup */ + __rpc_disable_timer(task); /* Remove from any wait queue we're still on */ __rpc_remove_wait_queue(task); task->tk_active = 0; - end_bh_atomic(); + spin_unlock_irqrestore(&rpc_queue_lock, oldflags); + + /* Synchronously delete any running timer */ + rpc_delete_timer(task); /* Release resources */ if (task->tk_rqstp) @@ -913,13 +984,14 @@ rpc_child_exit(struct rpc_task *child) { struct rpc_task *parent; + unsigned long oldflags; - start_bh_atomic(); + spin_lock_irqsave(&rpc_queue_lock, oldflags); if ((parent = rpc_find_parent(child)) != NULL) { parent->tk_status = child->tk_status; __rpc_wake_up_task(parent); } - end_bh_atomic(); + spin_unlock_irqrestore(&rpc_queue_lock, oldflags); } /* @@ -945,11 +1017,13 @@ void rpc_run_child(struct rpc_task *task, struct rpc_task *child, rpc_action func) { - start_bh_atomic(); + unsigned long oldflags; + + spin_lock_irqsave(&rpc_queue_lock, oldflags); /* N.B. Is it possible for the child to have already finished? */ __rpc_sleep_on(&childq, task, func, NULL); - rpc_schedule_run(child); - end_bh_atomic(); + __rpc_schedule_run(child); + spin_unlock_irqrestore(&rpc_queue_lock, oldflags); } /* diff -u --recursive --new-file linux-2.2.17-nfsv3-0.22.2/net/sunrpc/xprt.c linux-2.2.17-nfsv3/net/sunrpc/xprt.c --- linux-2.2.17-nfsv3-0.22.2/net/sunrpc/xprt.c Tue Jul 18 00:37:58 2000 +++ linux-2.2.17-nfsv3/net/sunrpc/xprt.c Tue Jul 18 10:19:52 2000 @@ -68,6 +68,9 @@ * Local variables */ +/* Spinlock for critical sections in the code. */ +spinlock_t xprt_sock_lock = SPIN_LOCK_UNLOCKED; + #ifdef RPC_DEBUG # undef RPC_DEBUG_DATA # define RPCDBG_FACILITY RPCDBG_XPRT @@ -230,7 +233,6 @@ break; default: printk(KERN_NOTICE "RPC: sendmsg returned error %d\n", -result); - result = 0; } return result; } @@ -343,43 +345,54 @@ } static inline void -__xprt_del_tcp_timer(struct rpc_xprt *xprt) +__xprt_disable_tcp_timer(struct rpc_xprt *xprt) +{ + xprt->tcp_timeout = 0; +} + +static inline void +__xprt_delete_tcp_timer(struct rpc_xprt *xprt) { if (timer_pending(&xprt->tcp_timer)) del_timer(&xprt->tcp_timer); - xprt->tcp_timeout = 0; } +/* + * Safe for use outside BH contexts. + */ static inline void -xprt_del_tcp_timer(struct rpc_xprt *xprt) +xprt_delete_tcp_timer(struct rpc_xprt *xprt) { start_bh_atomic(); - __xprt_del_tcp_timer(xprt); + __xprt_delete_tcp_timer(xprt); end_bh_atomic(); } static void xprt_tcp_timeout(struct rpc_xprt *xprt) { - __xprt_del_tcp_timer(xprt); - __xprt_disconnect(xprt); + if (xprt->tcp_timeout) { + __xprt_disable_tcp_timer(xprt); + __xprt_disconnect(xprt); + } } static inline void -__xprt_add_tcp_timer(struct rpc_xprt *xprt) +__xprt_add_tcp_timer(struct rpc_xprt *xprt, long timeout) { - if (!xprt->tcp_timeout) + if ((xprt->tcp_timeout = timeout) == 0) return; - xprt->tcp_timer.data = (unsigned long)xprt; - xprt->tcp_timer.function = (void(*)(unsigned long)) xprt_tcp_timeout; mod_timer(&xprt->tcp_timer, jiffies + xprt->tcp_timeout); } +/* + * Safe for use outside BH contexts. + */ static inline void -xprt_add_tcp_timer(struct rpc_xprt *xprt) +xprt_add_tcp_timer(struct rpc_xprt *xprt, long timeout) { start_bh_atomic(); - __xprt_add_tcp_timer(xprt); + __xprt_add_tcp_timer(xprt, timeout); end_bh_atomic(); } @@ -398,10 +411,10 @@ return; } - __xprt_del_tcp_timer(xprt); + __xprt_disable_tcp_timer(xprt); xprt->connected = 0; __xprt_remove_pending(xprt); - __rpc_wake_up_status(&xprt->pending, -ENOTCONN); + rpc_wake_up_status(&xprt->pending, -ENOTCONN); sk = xprt->inet; xprt->inet = NULL; @@ -414,6 +427,8 @@ end_bh_atomic(); + xprt_delete_tcp_timer(xprt); + sock_release(sock); /* * TCP doesnt require the rpciod now - other things may @@ -432,7 +447,7 @@ dprintk("RPC: disconnected transport %p\n", xprt); xprt->connected = 0; __xprt_append_pending(xprt); - __rpc_wake_up_status(&xprt->pending, -ENOTCONN); + rpc_wake_up_status(&xprt->pending, -ENOTCONN); } /* @@ -484,8 +499,7 @@ status = sock->ops->connect(sock, (struct sockaddr *) &xprt->addr, sizeof(xprt->addr), O_NONBLOCK); - xprt->tcp_timeout = RPCXPRT_TIMEOUT; - xprt_add_tcp_timer(xprt); + xprt_add_tcp_timer(xprt, RPCXPRT_TIMEOUT); if (status < 0) { switch (status) { @@ -510,7 +524,7 @@ start_bh_atomic(); if (!xprt->connected) { task->tk_timeout = xprt->timeout.to_maxval; - __rpc_sleep_on(&xprt->reconn, task, xprt_reconn_status, NULL); + rpc_sleep_on(&xprt->reconn, task, xprt_reconn_status, NULL); end_bh_atomic(); return; } @@ -545,12 +559,14 @@ * Look up the RPC request corresponding to a reply, and then lock it. */ static struct rpc_rqst * -__xprt_lookup_rqst(struct rpc_xprt *xprt, u32 xid) +xprt_lookup_rqst(struct rpc_xprt *xprt, u32 xid) { struct rpc_task *head, *task; struct rpc_rqst *req; + unsigned long oldflags; int safe = 0; + spin_lock_irqsave(&rpc_queue_lock, oldflags); if ((head = xprt->pending.task) != NULL) { task = head; do { @@ -569,17 +585,7 @@ out: if (req && !__rpc_lock_task(req->rq_task)) req = NULL; - return req; -} - -static inline struct rpc_rqst * -xprt_lookup_rqst(struct rpc_xprt *xprt, u32 xid) -{ - struct rpc_rqst *req; - - start_bh_atomic(); - req = __xprt_lookup_rqst(xprt, xid); - end_bh_atomic(); + spin_unlock_irqrestore(&rpc_queue_lock, oldflags); return req; } @@ -619,10 +625,13 @@ req->rq_received = 1; /* ... and wake up the process. */ - __rpc_wake_up_task(task); + rpc_wake_up_task(task); return; } +/* + * Safe for use outside BH contexts. + */ static inline void xprt_complete_rqst(struct rpc_xprt *xprt, struct rpc_rqst *req, int copied) { @@ -666,7 +675,7 @@ } /* Look up and lock the request corresponding to the given XID */ - rovr = __xprt_lookup_rqst(xprt, *(u32 *) (skb->h.raw + sizeof(struct udphdr))); + rovr = xprt_lookup_rqst(xprt, *(u32 *) (skb->h.raw + sizeof(struct udphdr))); if (!rovr) goto dropit; task = rovr->rq_task; @@ -685,7 +694,7 @@ __xprt_complete_rqst(xprt, rovr, copied); - __rpc_unlock_task(task); + rpc_unlock_task(task); dropit: skb_free_datagram(sk, skb); wake: @@ -1010,8 +1019,8 @@ case TCP_ESTABLISHED: xprt->connected = 1; if (xprt->snd_task && xprt->snd_task->tk_rpcwait == &xprt->sending) - __rpc_wake_up_task(xprt->snd_task); - __rpc_wake_up(&xprt->reconn); + rpc_wake_up_task(xprt->snd_task); + rpc_wake_up(&xprt->reconn); break; case TCP_SYN_SENT: case TCP_SYN_RECV: @@ -1031,26 +1040,28 @@ static void tcp_write_space(struct sock *sk) { - struct socket *sock; struct rpc_xprt *xprt; + unsigned long oldflags; + + if (!(xprt = xprt_from_sock(sk))) + return; + if (xprt->shutdown) + return; /* Wait until we have enough socket memory */ if (sock_wspace(sk) < min(sk->sndbuf,XPRT_MIN_WRITE_SPACE)) return; - if (!(xprt = xprt_from_sock(sk)) || !(sock = sk->socket)) - goto wake; - if (xprt->shutdown) - goto wake; - + spin_lock_irqsave(&xprt_sock_lock, oldflags); if (xprt->write_space) - goto wake; + goto out_unlock; xprt->write_space = 1; if (xprt->snd_task && xprt->snd_task->tk_rpcwait == &xprt->sending) - __rpc_wake_up_task(xprt->snd_task); - wake: + rpc_wake_up_task(xprt->snd_task); + out_unlock: + spin_unlock_irqrestore(&xprt_sock_lock, oldflags); wake_up_interruptible(sk->sleep); } @@ -1058,24 +1069,27 @@ udp_write_space(struct sock *sk) { struct rpc_xprt *xprt; + unsigned long oldflags; + + if (!(xprt = xprt_from_sock(sk))) + return; + if (xprt->shutdown) + return; /* Wait until we have enough socket memory */ if (sock_wspace(sk) < min(sk->sndbuf,XPRT_MIN_WRITE_SPACE)) return; - if (!(xprt = xprt_from_sock(sk))) - goto wake; - if (xprt->shutdown) - goto wake; - + spin_lock_irqsave(&xprt_sock_lock, oldflags); if (xprt->write_space) - goto wake; + goto out_unlock; xprt->write_space = 1; if (xprt->snd_task && xprt->snd_task->tk_rpcwait == &xprt->sending) - __rpc_wake_up_task(xprt->snd_task); - wake: + rpc_wake_up_task(xprt->snd_task); + out_unlock: + spin_unlock_irqrestore(&xprt_sock_lock, oldflags); wake_up_interruptible(sk->sleep); } @@ -1095,7 +1109,7 @@ task->tk_status = -ETIMEDOUT; task->tk_timeout = 0; - __rpc_wake_up_task(task); + rpc_wake_up_task(task); } @@ -1109,7 +1123,7 @@ struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; struct rpc_rqst *req = task->tk_rqstp; - xprt_del_tcp_timer(xprt); + xprt_delete_tcp_timer(xprt); if (xprt->snd_task && xprt->snd_task != task) { dprintk("RPC: %4d TCP write queue full (task %d)\n", task->tk_pid, xprt->snd_task->tk_pid); @@ -1136,10 +1150,8 @@ if (xprt->snd_task && xprt->snd_task == task) { xprt->snd_task = NULL; - if (!rpc_wake_up_next(&xprt->sending) && xprt->stream) { - xprt->tcp_timeout = RPCXPRT_TIMEOUT; - xprt_add_tcp_timer(xprt); - } + if (!rpc_wake_up_next(&xprt->sending) && xprt->stream) + xprt_add_tcp_timer(xprt, RPCXPRT_TIMEOUT); } } @@ -1165,9 +1177,6 @@ if (task->tk_status < 0) return; - if (task->tk_rpcwait) - rpc_remove_wait_queue(task); - /* set up everything as needed. */ /* Write the record marker */ if (xprt->stream) { @@ -1187,6 +1196,7 @@ { struct rpc_rqst *req = task->tk_rqstp; struct rpc_xprt *xprt = req->rq_xprt; + unsigned long oldflags; int status, retry = 0; @@ -1253,12 +1263,12 @@ switch (status) { case -ENOMEM: /* Protect against (udp|tcp)_write_space */ - start_bh_atomic(); + spin_lock_irqsave(&xprt_sock_lock, oldflags); if (!xprt->write_space) { task->tk_timeout = req->rq_timeout.to_current; - __rpc_sleep_on(&xprt->sending, task, NULL, NULL); + rpc_sleep_on(&xprt->sending, task, NULL, NULL); } - end_bh_atomic(); + spin_unlock_irqrestore(&xprt_sock_lock, oldflags); return; case -EAGAIN: /* Keep holding the socket if it is blocked */ @@ -1323,7 +1333,7 @@ } else { dprintk("RPC: xprt_reserve waiting on backlog\n"); task->tk_status = -EAGAIN; - __rpc_sleep_on(&xprt->backlog, task, NULL, NULL); + rpc_sleep_on(&xprt->backlog, task, NULL, NULL); } end_bh_atomic(); dprintk("RPC: %4d xprt_reserve returns %d\n", @@ -1406,12 +1416,6 @@ dprintk("RPC: %4d release request %p\n", task->tk_pid, req); - /* remove slot from queue of pending */ - if (task->tk_rpcwait) { - printk("RPC: task of released request still queued!\n"); - rpc_remove_wait_queue(task); - } - start_bh_atomic(); req->rq_next = xprt->free; xprt->free = req; @@ -1487,6 +1491,8 @@ xprt_default_timeout(&xprt->timeout, xprt->prot); init_timer(&xprt->tcp_timer); + xprt->tcp_timer.data = (unsigned long)xprt; + xprt->tcp_timer.function = (void(*)(unsigned long)) xprt_tcp_timeout; xprt->pending = RPC_INIT_WAITQ("xprt_pending"); xprt->sending = RPC_INIT_WAITQ("xprt_sending"); @@ -1640,7 +1646,7 @@ { if (RPCXPRT_CONGESTED(xprt)) return 0; - __rpc_wake_up_next(&xprt->backlog); + rpc_wake_up_next(&xprt->backlog); wake_up(&xprt->cong_wait); return 1; } @@ -1654,7 +1660,7 @@ dprintk("RPC: destroying transport %p\n", xprt); xprt_shutdown(xprt); xprt_close(xprt); - xprt_del_tcp_timer(xprt); + xprt_delete_tcp_timer(xprt); xprt_remove_pending(xprt); kfree(xprt);