6.10-stable review patch. If anyone has any objections, please let me know.
------------------
From: NeilBrown neilb@suse.de
[ Upstream commit 60749cbe3d8ae572a6c7dda675de3e8b25797a18 ]
sp_nrthreads is only ever accessed under the service mutex nlmsvc_mutex nfs_callback_mutex nfsd_mutex so these is no need for it to be an atomic_t.
The fact that all code using it is single-threaded means that we can simplify svc_pool_victim and remove the temporary elevation of sp_nrthreads.
Signed-off-by: NeilBrown neilb@suse.de Signed-off-by: Chuck Lever chuck.lever@oracle.com Stable-dep-of: aadc3bbea163 ("NFSD: Limit the number of concurrent async COPY operations") Signed-off-by: Sasha Levin sashal@kernel.org --- fs/nfsd/nfsctl.c | 2 +- fs/nfsd/nfssvc.c | 2 +- include/linux/sunrpc/svc.h | 4 ++-- net/sunrpc/svc.c | 31 +++++++++++-------------------- 4 files changed, 15 insertions(+), 24 deletions(-)
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c index 0f9b4f7b56cd8..37f619ccafce0 100644 --- a/fs/nfsd/nfsctl.c +++ b/fs/nfsd/nfsctl.c @@ -1746,7 +1746,7 @@ int nfsd_nl_threads_get_doit(struct sk_buff *skb, struct genl_info *info) struct svc_pool *sp = &nn->nfsd_serv->sv_pools[i];
err = nla_put_u32(skb, NFSD_A_SERVER_THREADS, - atomic_read(&sp->sp_nrthreads)); + sp->sp_nrthreads); if (err) goto err_unlock; } diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c index 89d7918de7b1a..877f926356549 100644 --- a/fs/nfsd/nfssvc.c +++ b/fs/nfsd/nfssvc.c @@ -705,7 +705,7 @@ int nfsd_get_nrthreads(int n, int *nthreads, struct net *net)
if (serv) for (i = 0; i < serv->sv_nrpools && i < n; i++) - nthreads[i] = atomic_read(&serv->sv_pools[i].sp_nrthreads); + nthreads[i] = serv->sv_pools[i].sp_nrthreads; return 0; }
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 23617da0e565e..38a4fdf784e9a 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -33,9 +33,9 @@ * node traffic on multi-node NUMA NFS servers. */ struct svc_pool { - unsigned int sp_id; /* pool id; also node id on NUMA */ + unsigned int sp_id; /* pool id; also node id on NUMA */ struct lwq sp_xprts; /* pending transports */ - atomic_t sp_nrthreads; /* # of threads in pool */ + unsigned int sp_nrthreads; /* # of threads in pool */ struct list_head sp_all_threads; /* all server threads */ struct llist_head sp_idle_threads; /* idle server threads */
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index d9cda1e53a017..6a15b831589c0 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -682,7 +682,7 @@ svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node) serv->sv_nrthreads += 1; spin_unlock_bh(&serv->sv_lock);
- atomic_inc(&pool->sp_nrthreads); + pool->sp_nrthreads += 1;
/* Protected by whatever lock the service uses when calling * svc_set_num_threads() @@ -737,31 +737,22 @@ svc_pool_victim(struct svc_serv *serv, struct svc_pool *target_pool, struct svc_pool *pool; unsigned int i;
-retry: pool = target_pool;
- if (pool != NULL) { - if (atomic_inc_not_zero(&pool->sp_nrthreads)) - goto found_pool; - return NULL; - } else { + if (!pool) { for (i = 0; i < serv->sv_nrpools; i++) { pool = &serv->sv_pools[--(*state) % serv->sv_nrpools]; - if (atomic_inc_not_zero(&pool->sp_nrthreads)) - goto found_pool; + if (pool->sp_nrthreads) + break; } - return NULL; }
-found_pool: - set_bit(SP_VICTIM_REMAINS, &pool->sp_flags); - set_bit(SP_NEED_VICTIM, &pool->sp_flags); - if (!atomic_dec_and_test(&pool->sp_nrthreads)) + if (pool && pool->sp_nrthreads) { + set_bit(SP_VICTIM_REMAINS, &pool->sp_flags); + set_bit(SP_NEED_VICTIM, &pool->sp_flags); return pool; - /* Nothing left in this pool any more */ - clear_bit(SP_NEED_VICTIM, &pool->sp_flags); - clear_bit(SP_VICTIM_REMAINS, &pool->sp_flags); - goto retry; + } + return NULL; }
static int @@ -840,7 +831,7 @@ svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) if (!pool) nrservs -= serv->sv_nrthreads; else - nrservs -= atomic_read(&pool->sp_nrthreads); + nrservs -= pool->sp_nrthreads;
if (nrservs > 0) return svc_start_kthreads(serv, pool, nrservs); @@ -928,7 +919,7 @@ svc_exit_thread(struct svc_rqst *rqstp)
list_del_rcu(&rqstp->rq_all);
- atomic_dec(&pool->sp_nrthreads); + pool->sp_nrthreads -= 1;
spin_lock_bh(&serv->sv_lock); serv->sv_nrthreads -= 1;