aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2020-08-27 16:46:24 -0600
committerJens Axboe <axboe@kernel.dk>2020-08-27 16:48:34 -0600
commitfdee946d0925f971f167d2606984426763355e4f (patch)
treefa089a3a3f0cfbdb57086596edfe5df66cda9cf4
parenteefdf30f3dcb5c1d47bee2b3afdb9d4d05343ff3 (diff)
downloadlinux-visconti-fdee946d0925f971f167d2606984426763355e4f.tar.gz
io_uring: don't bounce block based -EAGAIN retry off task_work
These events happen inline from submission, so there's no need to bounce them through the original task. Just set them up for retry and issue retry directly instead of going over task_work. Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--fs/io_uring.c26
1 files changed, 6 insertions, 20 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 8c77ad4a65f06..852c2eaf1a9ac 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2295,22 +2295,6 @@ end_req:
io_req_complete(req, ret);
return false;
}
-
-static void io_rw_resubmit(struct callback_head *cb)
-{
- struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
- struct io_ring_ctx *ctx = req->ctx;
- int err;
-
- err = io_sq_thread_acquire_mm(ctx, req);
-
- if (io_resubmit_prep(req, err)) {
- refcount_inc(&req->refs);
- io_queue_async_work(req);
- }
-
- percpu_ref_put(&ctx->refs);
-}
#endif
static bool io_rw_reissue(struct io_kiocb *req, long res)
@@ -2321,12 +2305,14 @@ static bool io_rw_reissue(struct io_kiocb *req, long res)
if ((res != -EAGAIN && res != -EOPNOTSUPP) || io_wq_current_is_worker())
return false;
- init_task_work(&req->task_work, io_rw_resubmit);
- percpu_ref_get(&req->ctx->refs);
+ ret = io_sq_thread_acquire_mm(req->ctx, req);
- ret = io_req_task_work_add(req, &req->task_work, true);
- if (!ret)
+ if (io_resubmit_prep(req, ret)) {
+ refcount_inc(&req->refs);
+ io_queue_async_work(req);
return true;
+ }
+
#endif
return false;
}