aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSagi Grimberg <sagi.grimberg@vastdata.com>2024-05-07 09:54:10 +0300
committerKeith Busch <kbusch@kernel.org>2024-05-07 08:07:05 -0700
commit34cfb09cdc75457a671279165a88a0739a170f07 (patch)
tree27c66ee00586139fa3c264949eabf645d0396695
parent4b9a89be214235acbff003232baba123c868a25c (diff)
downloadlinux-34cfb09cdc75457a671279165a88a0739a170f07.tar.gz
nvmet: make nvmet_wq unbound
When deleting many controllers one-by-one, it takes a very long time as these work elements may serialize as they are scheduled on the executing cpu instead of spreading. In general nvmet_wq can definitely be used for long standing work elements so its better to make it unbound regardless. Signed-off-by: Sagi Grimberg <sagi.grimberg@vastdata.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Keith Busch <kbusch@kernel.org>
-rw-r--r--drivers/nvme/target/core.c3
1 files changed, 2 insertions, 1 deletions
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index e06013c5dace94..2fde22323622e4 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -1686,7 +1686,8 @@ static int __init nvmet_init(void)
if (!buffered_io_wq)
goto out_free_zbd_work_queue;
- nvmet_wq = alloc_workqueue("nvmet-wq", WQ_MEM_RECLAIM, 0);
+ nvmet_wq = alloc_workqueue("nvmet-wq",
+ WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
if (!nvmet_wq)
goto out_free_buffered_work_queue;