aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.de>2023-09-11 10:40:15 -0400
committerChuck Lever <chuck.lever@oracle.com>2023-09-17 19:05:46 -0400
commita74d70b036068624c8925fc6e1c43bc1a27e7e2d (patch)
treedbad14b7012a4166fdd437d7c32b5cf3e675d016
parent59c25aac42f19d9f7c7ca97251c2698e6c6565ad (diff)
downloadlinux-a74d70b036068624c8925fc6e1c43bc1a27e7e2d.tar.gz
SUNRPC: discard sp_lock
sp_lock is now only used to protect sp_all_threads. This isn't needed as sp_all_threads is only manipulated through svc_set_num_threads(), which is already serialized. Read-acccess only requires rcu_read_lock(). So no more locking is needed. Signed-off-by: NeilBrown <neilb@suse.de> Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
-rw-r--r--include/linux/sunrpc/svc.h1
-rw-r--r--net/sunrpc/svc.c10
2 files changed, 5 insertions, 6 deletions
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 9d0fcd6148ae9b..8ce1392c1a357a 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -34,7 +34,6 @@
*/
struct svc_pool {
unsigned int sp_id; /* pool id; also node id on NUMA */
- spinlock_t sp_lock; /* protects all fields */
struct lwq sp_xprts; /* pending transports */
atomic_t sp_nrthreads; /* # of threads in pool */
struct list_head sp_all_threads; /* all server threads */
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 0928d3f918b0bb..efe7a58ccbdca1 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -511,7 +511,6 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
lwq_init(&pool->sp_xprts);
INIT_LIST_HEAD(&pool->sp_all_threads);
init_llist_head(&pool->sp_idle_threads);
- spin_lock_init(&pool->sp_lock);
percpu_counter_init(&pool->sp_messages_arrived, 0, GFP_KERNEL);
percpu_counter_init(&pool->sp_sockets_queued, 0, GFP_KERNEL);
@@ -682,9 +681,12 @@ svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node)
spin_unlock_bh(&serv->sv_lock);
atomic_inc(&pool->sp_nrthreads);
- spin_lock_bh(&pool->sp_lock);
+
+ /* Protected by whatever lock the service uses when calling
+ * svc_set_num_threads()
+ */
list_add_rcu(&rqstp->rq_all, &pool->sp_all_threads);
- spin_unlock_bh(&pool->sp_lock);
+
return rqstp;
}
@@ -922,9 +924,7 @@ svc_exit_thread(struct svc_rqst *rqstp)
struct svc_serv *serv = rqstp->rq_server;
struct svc_pool *pool = rqstp->rq_pool;
- spin_lock_bh(&pool->sp_lock);
list_del_rcu(&rqstp->rq_all);
- spin_unlock_bh(&pool->sp_lock);
atomic_dec(&pool->sp_nrthreads);