From: Nick Piggin Change around locking a bit for a result of 1-2 less spin lock unlock pairs in request submission paths. Signed-off-by: Nick Piggin Cc: Jens Axboe Signed-off-by: Andrew Morton --- drivers/block/ll_rw_blk.c | 25 +++++++++++++++++-------- 1 files changed, 17 insertions(+), 8 deletions(-) diff -puN drivers/block/ll_rw_blk.c~blk-reduce-locking drivers/block/ll_rw_blk.c --- 25/drivers/block/ll_rw_blk.c~blk-reduce-locking 2005-05-03 20:53:50.000000000 -0700 +++ 25-akpm/drivers/block/ll_rw_blk.c 2005-05-03 20:53:50.000000000 -0700 @@ -1870,19 +1870,20 @@ static void freed_request(request_queue_ #define blkdev_free_rq(list) list_entry((list)->next, struct request, queuelist) /* - * Get a free request, queue_lock must not be held + * Get a free request, queue_lock must be held. + * Returns NULL on failure, with queue_lock held. + * Returns !NULL on success, with queue_lock *not held*. */ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio, int gfp_mask) { struct request *rq = NULL; struct request_list *rl = &q->rq; - struct io_context *ioc = get_io_context(gfp_mask); + struct io_context *ioc = get_io_context(GFP_ATOMIC); if (unlikely(test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags))) goto out; - spin_lock_irq(q->queue_lock); if (rl->count[rw]+1 >= q->nr_requests) { /* * The queue will fill after this allocation, so set it as @@ -1910,7 +1911,6 @@ static struct request *get_request(reque * The queue is full and the allocating process is not a * "batcher", and not exempted by the IO scheduler */ - spin_unlock_irq(q->queue_lock); goto out; } @@ -1944,7 +1944,6 @@ rq_starved: if (unlikely(rl->count[rw] == 0)) rl->starved[rw] = 1; - spin_unlock_irq(q->queue_lock); goto out; } @@ -1961,6 +1960,8 @@ out: /* * No available requests for this queue, unplug the device and wait for some * requests to become available. + * + * Called with q->queue_lock held, and returns with it unlocked. */ static struct request *get_request_wait(request_queue_t *q, int rw, struct bio *bio) @@ -1980,7 +1981,8 @@ static struct request *get_request_wait( if (!rq) { struct io_context *ioc; - generic_unplug_device(q); + __generic_unplug_device(q); + spin_unlock_irq(q->queue_lock); io_schedule(); /* @@ -1992,6 +1994,8 @@ static struct request *get_request_wait( ioc = get_io_context(GFP_NOIO); ioc_set_batching(q, ioc); put_io_context(ioc); + + spin_lock_irq(q->queue_lock); } finish_wait(&rl->wait[rw], &wait); } @@ -2007,8 +2011,12 @@ struct request *blk_get_request(request_ if (gfp_mask & __GFP_WAIT) rq = get_request_wait(q, rw, NULL); - else + else { rq = get_request(q, rw, NULL, gfp_mask); + if (!rq) + spin_unlock_irq(q->queue_lock); + } + /* q->queue_lock is unlocked at this point */ return rq; } @@ -2647,9 +2655,10 @@ static int __make_request(request_queue_ get_rq: /* * Grab a free request. This is might sleep but can not fail. + * Returns with the queue unlocked. */ - spin_unlock_irq(q->queue_lock); req = get_request_wait(q, rw, bio); + /* * After dropping the lock and possibly sleeping here, our request * may now be mergeable after it had proven unmergeable (above). _