aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc/xprt.c
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2012-09-07 11:08:50 -0400
committerTrond Myklebust <Trond.Myklebust@netapp.com>2012-09-07 11:43:49 -0400
commitf39c1bfb5a03e2d255451bff05be0d7255298fa4 (patch)
treec4f7c970840e7803e22ce10809746272d23f6fde /net/sunrpc/xprt.c
parent01913b49cf1dc6409a07dd2a4cc6af2e77f3c410 (diff)
downloadlinux-f39c1bfb5a03e2d255451bff05be0d7255298fa4.tar.gz
SUNRPC: Fix a UDP transport regression
Commit 43cedbf0e8dfb9c5610eb7985d5f21263e313802 (SUNRPC: Ensure that we grab the XPRT_LOCK before calling xprt_alloc_slot) is causing hangs in the case of NFS over UDP mounts. Since neither the UDP or the RDMA transport mechanism use dynamic slot allocation, we can skip grabbing the socket lock for those transports. Add a new rpc_xprt_op to allow switching between the TCP and UDP/RDMA case. Note that the NFSv4.1 back channel assigns the slot directly through rpc_run_bc_task, so we can ignore that case. Reported-by: Dick Streefland <dick.streefland@altium.nl> Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com> Cc: stable@vger.kernel.org [>= 3.1]
Diffstat (limited to 'net/sunrpc/xprt.c')
-rw-r--r--net/sunrpc/xprt.c34
1 files changed, 20 insertions, 14 deletions
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index a5a402a7d21f9e..5d7f61d7559c97 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -969,11 +969,11 @@ static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
return false;
}
-static void xprt_alloc_slot(struct rpc_task *task)
+void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
{
- struct rpc_xprt *xprt = task->tk_xprt;
struct rpc_rqst *req;
+ spin_lock(&xprt->reserve_lock);
if (!list_empty(&xprt->free)) {
req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
list_del(&req->rq_list);
@@ -994,12 +994,29 @@ static void xprt_alloc_slot(struct rpc_task *task)
default:
task->tk_status = -EAGAIN;
}
+ spin_unlock(&xprt->reserve_lock);
return;
out_init_req:
task->tk_status = 0;
task->tk_rqstp = req;
xprt_request_init(task, xprt);
+ spin_unlock(&xprt->reserve_lock);
+}
+EXPORT_SYMBOL_GPL(xprt_alloc_slot);
+
+void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
+{
+ /* Note: grabbing the xprt_lock_write() ensures that we throttle
+ * new slot allocation if the transport is congested (i.e. when
+ * reconnecting a stream transport or when out of socket write
+ * buffer space).
+ */
+ if (xprt_lock_write(xprt, task)) {
+ xprt_alloc_slot(xprt, task);
+ xprt_release_write(xprt, task);
+ }
}
+EXPORT_SYMBOL_GPL(xprt_lock_and_alloc_slot);
static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
{
@@ -1083,20 +1100,9 @@ void xprt_reserve(struct rpc_task *task)
if (task->tk_rqstp != NULL)
return;
- /* Note: grabbing the xprt_lock_write() here is not strictly needed,
- * but ensures that we throttle new slot allocation if the transport
- * is congested (e.g. if reconnecting or if we're out of socket
- * write buffer space).
- */
task->tk_timeout = 0;
task->tk_status = -EAGAIN;
- if (!xprt_lock_write(xprt, task))
- return;
-
- spin_lock(&xprt->reserve_lock);
- xprt_alloc_slot(task);
- spin_unlock(&xprt->reserve_lock);
- xprt_release_write(xprt, task);
+ xprt->ops->alloc_slot(xprt, task);
}
static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt)