aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc/sched.c
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2007-06-14 17:26:17 -0400
committerTrond Myklebust <Trond.Myklebust@netapp.com>2007-07-10 23:40:30 -0400
commit4ada539ed77c7a2bbcb75cafbbd7bd8d2b9bef7b (patch)
treecb4ba7df88aee07173dc7e44a3c35249ae4ab539 /net/sunrpc/sched.c
parentab418d70e1fceda1e2824c45ba3323a1b1413507 (diff)
downloadlinux-4ada539ed77c7a2bbcb75cafbbd7bd8d2b9bef7b.tar.gz
SUNRPC: Make create_client() take a reference to the rpciod workqueue
Ensures that an rpc_client always has the possibility to send asynchronous RPC calls. Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'net/sunrpc/sched.c')
-rw-r--r--net/sunrpc/sched.c31
1 files changed, 0 insertions, 31 deletions
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index f6eed4d4e5dd13..05825154ddd903 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -39,7 +39,6 @@ static mempool_t *rpc_task_mempool __read_mostly;
static mempool_t *rpc_buffer_mempool __read_mostly;
static void __rpc_default_timer(struct rpc_task *task);
-static void rpciod_killall(void);
static void rpc_async_schedule(struct work_struct *);
static void rpc_release_task(struct rpc_task *task);
@@ -52,7 +51,6 @@ static RPC_WAITQ(delay_queue, "delayq");
* All RPC clients are linked into this list
*/
static LIST_HEAD(all_clients);
-static DECLARE_WAIT_QUEUE_HEAD(client_kill_wait);
/*
* rpciod-related stuff
@@ -996,32 +994,6 @@ void rpc_killall_tasks(struct rpc_clnt *clnt)
spin_unlock(&clnt->cl_lock);
}
-static void rpciod_killall(void)
-{
- struct rpc_clnt *clnt;
- unsigned long flags;
-
- for(;;) {
- clear_thread_flag(TIF_SIGPENDING);
-
- spin_lock(&rpc_sched_lock);
- list_for_each_entry(clnt, &all_clients, cl_clients)
- rpc_killall_tasks(clnt);
- spin_unlock(&rpc_sched_lock);
- flush_workqueue(rpciod_workqueue);
- if (!list_empty(&all_clients))
- break;
- dprintk("RPC: rpciod_killall: waiting for tasks "
- "to exit\n");
- wait_event_timeout(client_kill_wait,
- list_empty(&all_clients), 1*HZ);
- }
-
- spin_lock_irqsave(&current->sighand->siglock, flags);
- recalc_sigpending();
- spin_unlock_irqrestore(&current->sighand->siglock, flags);
-}
-
void rpc_register_client(struct rpc_clnt *clnt)
{
spin_lock(&rpc_sched_lock);
@@ -1033,8 +1005,6 @@ void rpc_unregister_client(struct rpc_clnt *clnt)
{
spin_lock(&rpc_sched_lock);
list_del(&clnt->cl_clients);
- if (list_empty(&all_clients))
- wake_up(&client_kill_wait);
spin_unlock(&rpc_sched_lock);
}
@@ -1083,7 +1053,6 @@ rpciod_down(void)
dprintk("RPC: destroying workqueue rpciod\n");
if (atomic_read(&rpciod_users) == 0 && rpciod_workqueue != NULL) {
- rpciod_killall();
destroy_workqueue(rpciod_workqueue);
rpciod_workqueue = NULL;
}