aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc/sched.c
diff options
context:
space:
mode:
authorTrond Myklebust <trondmy@gmail.com>2019-04-07 13:58:44 -0400
committerAnna Schumaker <Anna.Schumaker@Netapp.com>2019-04-25 14:18:12 -0400
commitae67bd3821bb0a54d97e7883d211196637d487a9 (patch)
tree351a54fd10a71f6c76d5e8d8a8ce323c9252192e /net/sunrpc/sched.c
parent085b7755808aa11f78ab9377257e1dad2e6fa4bb (diff)
downloadlinux-ae67bd3821bb0a54d97e7883d211196637d487a9.tar.gz
SUNRPC: Fix up task signalling
The RPC_TASK_KILLED flag should really not be set from another context because it can clobber data in the struct task when task->tk_flags is changed non-atomically. Let's therefore swap out RPC_TASK_KILLED with an atomic flag, and add a function to set that flag and safely wake up the task. Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com> Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
Diffstat (limited to 'net/sunrpc/sched.c')
-rw-r--r--net/sunrpc/sched.c28
1 files changed, 23 insertions, 5 deletions
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 28956c70100af0..3d6cb91ba598f4 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -759,8 +759,7 @@ static void
rpc_reset_task_statistics(struct rpc_task *task)
{
task->tk_timeouts = 0;
- task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_KILLED|RPC_TASK_SENT);
-
+ task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_SENT);
rpc_init_task_statistics(task);
}
@@ -773,7 +772,6 @@ void rpc_exit_task(struct rpc_task *task)
if (task->tk_ops->rpc_call_done != NULL) {
task->tk_ops->rpc_call_done(task, task->tk_calldata);
if (task->tk_action != NULL) {
- WARN_ON(RPC_ASSASSINATED(task));
/* Always release the RPC slot and buffer memory */
xprt_release(task);
rpc_reset_task_statistics(task);
@@ -781,6 +779,19 @@ void rpc_exit_task(struct rpc_task *task)
}
}
+void rpc_signal_task(struct rpc_task *task)
+{
+ struct rpc_wait_queue *queue;
+
+ if (!RPC_IS_ACTIVATED(task))
+ return;
+ set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate);
+ smp_mb__after_atomic();
+ queue = READ_ONCE(task->tk_waitqueue);
+ if (queue)
+ rpc_wake_up_queued_task_set_status(queue, task, -ERESTARTSYS);
+}
+
void rpc_exit(struct rpc_task *task, int status)
{
task->tk_status = status;
@@ -836,6 +847,13 @@ static void __rpc_execute(struct rpc_task *task)
*/
if (!RPC_IS_QUEUED(task))
continue;
+
+ /*
+ * Signalled tasks should exit rather than sleep.
+ */
+ if (RPC_SIGNALLED(task))
+ rpc_exit(task, -ERESTARTSYS);
+
/*
* The queue->lock protects against races with
* rpc_make_runnable().
@@ -861,7 +879,7 @@ static void __rpc_execute(struct rpc_task *task)
status = out_of_line_wait_on_bit(&task->tk_runstate,
RPC_TASK_QUEUED, rpc_wait_bit_killable,
TASK_KILLABLE);
- if (status == -ERESTARTSYS) {
+ if (status < 0) {
/*
* When a sync task receives a signal, it exits with
* -ERESTARTSYS. In order to catch any callbacks that
@@ -869,7 +887,7 @@ static void __rpc_execute(struct rpc_task *task)
* break the loop here, but go around once more.
*/
dprintk("RPC: %5u got signal\n", task->tk_pid);
- task->tk_flags |= RPC_TASK_KILLED;
+ set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate);
rpc_exit(task, -ERESTARTSYS);
}
dprintk("RPC: %5u sync task resuming\n", task->tk_pid);