aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2015-01-29 15:10:08 +0100
committerSebastian Andrzej Siewior <bigeasy@linutronix.de>2016-02-13 00:36:19 +0100
commit862622197f8c5f5dc178809fb270130af14f3ef7 (patch)
tree4d08986fe7d46ddcceb034f16b615ab84e106910
parent9d518f2ce7c5e92384b6ef46559cd7db789c9da7 (diff)
downloadrt-linux-862622197f8c5f5dc178809fb270130af14f3ef7.tar.gz
block/mq: don't complete requests via IPI
The IPI runs in hardirq context and there are sleeping locks. This patch moves the completion into a workqueue. Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-rw-r--r--block/blk-core.c3
-rw-r--r--block/blk-mq.c20
-rw-r--r--include/linux/blk-mq.h1
-rw-r--r--include/linux/blkdev.h1
4 files changed, 25 insertions, 0 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 1ed2f55793a0d0..485dea101a3f6d 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -125,6 +125,9 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
INIT_LIST_HEAD(&rq->queuelist);
INIT_LIST_HEAD(&rq->timeout_list);
+#ifdef CONFIG_PREEMPT_RT_FULL
+ INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work);
+#endif
rq->cpu = -1;
rq->q = q;
rq->__sector = (sector_t) -1;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 33ceddfd81a221..798e9d8a3541a8 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -196,6 +196,9 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
rq->resid_len = 0;
rq->sense = NULL;
+#ifdef CONFIG_PREEMPT_RT_FULL
+ INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work);
+#endif
INIT_LIST_HEAD(&rq->timeout_list);
rq->timeout = 0;
@@ -325,6 +328,17 @@ void blk_mq_end_request(struct request *rq, int error)
}
EXPORT_SYMBOL(blk_mq_end_request);
+#ifdef CONFIG_PREEMPT_RT_FULL
+
+void __blk_mq_complete_request_remote_work(struct work_struct *work)
+{
+ struct request *rq = container_of(work, struct request, work);
+
+ rq->q->softirq_done_fn(rq);
+}
+
+#else
+
static void __blk_mq_complete_request_remote(void *data)
{
struct request *rq = data;
@@ -332,6 +346,8 @@ static void __blk_mq_complete_request_remote(void *data)
rq->q->softirq_done_fn(rq);
}
+#endif
+
static void blk_mq_ipi_complete_request(struct request *rq)
{
struct blk_mq_ctx *ctx = rq->mq_ctx;
@@ -348,10 +364,14 @@ static void blk_mq_ipi_complete_request(struct request *rq)
shared = cpus_share_cache(cpu, ctx->cpu);
if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
+#ifdef CONFIG_PREEMPT_RT_FULL
+ schedule_work_on(ctx->cpu, &rq->work);
+#else
rq->csd.func = __blk_mq_complete_request_remote;
rq->csd.info = rq;
rq->csd.flags = 0;
smp_call_function_single_async(ctx->cpu, &rq->csd);
+#endif
} else {
rq->q->softirq_done_fn(rq);
}
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index daf17d70aeca92..463df89542556f 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -212,6 +212,7 @@ static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int);
+void __blk_mq_complete_request_remote_work(struct work_struct *work);
int blk_mq_request_started(struct request *rq);
void blk_mq_start_request(struct request *rq);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index c70e3588a48c72..460be1e1453514 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -89,6 +89,7 @@ struct request {
struct list_head queuelist;
union {
struct call_single_data csd;
+ struct work_struct work;
unsigned long fifo_time;
};