From: Nick Piggin Move current_io_context out of the get_request fastpth. Also try to streamline a few other things in this area. Signed-off-by: Nick Piggin Cc: Jens Axboe Signed-off-by: Andrew Morton --- drivers/block/ll_rw_blk.c | 70 ++++++++++++++++++++++++---------------------- 1 files changed, 38 insertions(+), 32 deletions(-) diff -puN drivers/block/ll_rw_blk.c~blk-fastpath-get_request drivers/block/ll_rw_blk.c --- 25/drivers/block/ll_rw_blk.c~blk-fastpath-get_request 2005-05-03 20:53:51.000000000 -0700 +++ 25-akpm/drivers/block/ll_rw_blk.c 2005-05-03 20:53:51.000000000 -0700 @@ -1875,54 +1875,54 @@ static void freed_request(request_queue_ * Returns !NULL on success, with queue_lock *not held*. */ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio, - int gfp_mask) + int gfp_mask) { + int may_queue; struct request *rq = NULL; struct request_list *rl = &q->rq; - struct io_context *ioc = current_io_context(GFP_ATOMIC); + struct io_context *ioc = NULL; if (unlikely(test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags))) goto out; - if (rl->count[rw]+1 >= q->nr_requests) { - /* - * The queue will fill after this allocation, so set it as - * full, and mark this process as "batching". This process - * will be allowed to complete a batch of requests, others - * will be blocked. - */ - if (!blk_queue_full(q, rw)) { - ioc_set_batching(q, ioc); - blk_set_queue_full(q, rw); + may_queue = elv_may_queue(q, rw, bio); + if (may_queue == ELV_MQUEUE_NO) + goto rq_starved; + + if (rl->count[rw]+1 >= queue_congestion_on_threshold(q)) { + if (rl->count[rw]+1 >= q->nr_requests) { + ioc = current_io_context(GFP_ATOMIC); + /* + * The queue will fill after this allocation, so set + * it as full, and mark this process as "batching". + * This process will be allowed to complete a batch of + * requests, others will be blocked. + */ + if (!blk_queue_full(q, rw)) { + ioc_set_batching(q, ioc); + blk_set_queue_full(q, rw); + } else { + if (may_queue != ELV_MQUEUE_MUST + && !ioc_batching(q, ioc)) { + /* + * The queue is full and the allocating + * process is not a "batcher", and not + * exempted by the IO scheduler + */ + goto out; + } + } } - } - - switch (elv_may_queue(q, rw, bio)) { - case ELV_MQUEUE_NO: - goto rq_starved; - case ELV_MQUEUE_MAY: - break; - case ELV_MQUEUE_MUST: - goto get_rq; - } - if (blk_queue_full(q, rw) && !ioc_batching(q, ioc)) { - /* - * The queue is full and the allocating process is not a - * "batcher", and not exempted by the IO scheduler - */ - goto out; + set_queue_congested(q, rw); } -get_rq: rl->count[rw]++; rl->starved[rw] = 0; - if (rl->count[rw] >= queue_congestion_on_threshold(q)) - set_queue_congested(q, rw); spin_unlock_irq(q->queue_lock); rq = blk_alloc_request(q, rw, bio, gfp_mask); - if (!rq) { + if (unlikely(!rq)) { /* * Allocation failed presumably due to memory. Undo anything * we might have messed up. @@ -1947,6 +1947,12 @@ rq_starved: goto out; } + /* + * ioc may be NULL here, and ioc_batching will be false. That's + * OK, if the queue is under the request limit then requests need + * not count toward the nr_batch_requests limit. There will always + * be some limit enforced by BLK_BATCH_TIME. + */ if (ioc_batching(q, ioc)) ioc->nr_batch_requests--; _