RPC: Shrink struct rpc_task by switching to wait_on_bit() Signed-off-by: Trond Myklebust --- include/linux/sunrpc/sched.h | 1 - net/sunrpc/sched.c | 31 ++++++++++++++++++------------- 2 files changed, 18 insertions(+), 14 deletions(-) Index: linux-2.6.12-rc1/net/sunrpc/sched.c =================================================================== --- linux-2.6.12-rc1.orig/net/sunrpc/sched.c +++ linux-2.6.12-rc1/net/sunrpc/sched.c @@ -290,7 +290,7 @@ static void rpc_make_runnable(struct rpc return; } } else - wake_up(&task->u.tk_wait.waitq); + wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED); } /* @@ -554,6 +554,14 @@ __rpc_atrun(struct rpc_task *task) rpc_wake_up_task(task); } +static int rpc_wait_bit_interruptible(void *word) +{ + if (signal_pending(current)) + return -ERESTARTSYS; + schedule(); + return 0; +} + /* * This is the RPC `scheduler' (or rather, the finite state machine). */ @@ -624,22 +632,21 @@ static int __rpc_execute(struct rpc_task /* sync task: sleep here */ dprintk("RPC: %4d sync task going to sleep\n", task->tk_pid); - if (RPC_TASK_UNINTERRUPTIBLE(task)) { - __wait_event(task->u.tk_wait.waitq, !RPC_IS_QUEUED(task)); - } else { - __wait_event_interruptible(task->u.tk_wait.waitq, !RPC_IS_QUEUED(task), status); + /* Note: Caller should be using rpc_clnt_sigmask() */ + status = out_of_line_wait_on_bit(&task->tk_runstate, + RPC_TASK_QUEUED, rpc_wait_bit_interruptible, + TASK_INTERRUPTIBLE); + if (status == -ERESTARTSYS) { /* * When a sync task receives a signal, it exits with * -ERESTARTSYS. In order to catch any callbacks that * clean up after sleeping on some queue, we don't * break the loop here, but go around once more. */ - if (status == -ERESTARTSYS) { - dprintk("RPC: %4d got signal\n", task->tk_pid); - task->tk_flags |= RPC_TASK_KILLED; - rpc_exit(task, -ERESTARTSYS); - rpc_wake_up_task(task); - } + dprintk("RPC: %4d got signal\n", task->tk_pid); + task->tk_flags |= RPC_TASK_KILLED; + rpc_exit(task, -ERESTARTSYS); + rpc_wake_up_task(task); } rpc_set_running(task); dprintk("RPC: %4d sync task resuming\n", task->tk_pid); @@ -759,8 +766,6 @@ void rpc_init_task(struct rpc_task *task /* Initialize workqueue for async tasks */ task->tk_workqueue = rpciod_workqueue; - if (!RPC_IS_ASYNC(task)) - init_waitqueue_head(&task->u.tk_wait.waitq); if (clnt) { atomic_inc(&clnt->cl_users); Index: linux-2.6.12-rc1/include/linux/sunrpc/sched.h =================================================================== --- linux-2.6.12-rc1.orig/include/linux/sunrpc/sched.h +++ linux-2.6.12-rc1/include/linux/sunrpc/sched.h @@ -31,7 +31,6 @@ struct rpc_wait_queue; struct rpc_wait { struct list_head list; /* wait queue links */ struct list_head links; /* Links to related tasks */ - wait_queue_head_t waitq; /* sync: sleep on this q */ struct rpc_wait_queue * rpc_waitq; /* RPC wait queue we're on */ };