From: Andrew Morton - Coding style fixes - 80-col display fixes - Remove some inlines (actually they probably make sense given that they're just wrappers) Cc: Christoph Lameter Signed-off-by: Andrew Morton --- drivers/block/as-iosched.c | 7 ++++--- drivers/block/deadline-iosched.c | 6 ++++-- drivers/block/genhd.c | 6 +++--- drivers/block/ll_rw_blk.c | 25 +++++++++++-------------- drivers/ide/ide-disk.c | 3 ++- drivers/ide/ide-probe.c | 6 ++++-- include/linux/blkdev.h | 5 +++-- include/linux/mempool.h | 12 +++++++----- mm/mempool.c | 14 ++++++-------- linux/genhd.h | 0 linux/ide.h | 0 11 files changed, 44 insertions(+), 40 deletions(-) diff -puN drivers/block/as-iosched.c~numa-aware-block-device-control-structure-allocation-tidy drivers/block/as-iosched.c --- 25/drivers/block/as-iosched.c~numa-aware-block-device-control-structure-allocation-tidy Fri May 6 15:45:56 2005 +++ 25-akpm/drivers/block/as-iosched.c Fri May 6 15:45:56 2005 @@ -1878,14 +1878,15 @@ static int as_init_queue(request_queue_t ad->q = q; /* Identify what queue the data belongs to */ - ad->hash = kmalloc_node(sizeof(struct list_head)*AS_HASH_ENTRIES, GFP_KERNEL, q->node); + ad->hash = kmalloc_node(sizeof(struct list_head)*AS_HASH_ENTRIES, + GFP_KERNEL, q->node); if (!ad->hash) { kfree(ad); return -ENOMEM; } - ad->arq_pool = mempool_create_node(BLKDEV_MIN_RQ,mempool_alloc_slab, mempool_free_slab, - arq_pool, q->node); + ad->arq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, + mempool_free_slab, arq_pool, q->node); if (!ad->arq_pool) { kfree(ad->hash); kfree(ad); diff -puN drivers/block/deadline-iosched.c~numa-aware-block-device-control-structure-allocation-tidy drivers/block/deadline-iosched.c --- 25/drivers/block/deadline-iosched.c~numa-aware-block-device-control-structure-allocation-tidy Fri May 6 15:45:56 2005 +++ 25-akpm/drivers/block/deadline-iosched.c Fri May 6 15:45:56 2005 @@ -716,13 +716,15 @@ static int deadline_init_queue(request_q return -ENOMEM; memset(dd, 0, sizeof(*dd)); - dd->hash = kmalloc_node(sizeof(struct list_head)*DL_HASH_ENTRIES, GFP_KERNEL, q->node); + dd->hash = kmalloc_node(sizeof(struct list_head)*DL_HASH_ENTRIES, + GFP_KERNEL, q->node); if (!dd->hash) { kfree(dd); return -ENOMEM; } - dd->drq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, mempool_free_slab, drq_pool, q->node); + dd->drq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, + mempool_free_slab, drq_pool, q->node); if (!dd->drq_pool) { kfree(dd->hash); kfree(dd); diff -puN drivers/block/genhd.c~numa-aware-block-device-control-structure-allocation-tidy drivers/block/genhd.c --- 25/drivers/block/genhd.c~numa-aware-block-device-control-structure-allocation-tidy Fri May 6 15:45:56 2005 +++ 25-akpm/drivers/block/genhd.c Fri May 6 15:45:56 2005 @@ -582,8 +582,6 @@ struct seq_operations diskstats_op = { .show = diskstats_show }; - -inline struct gendisk *alloc_disk(int minors) { return alloc_disk_node(minors, -1); @@ -591,7 +589,9 @@ struct gendisk *alloc_disk(int minors) struct gendisk *alloc_disk_node(int minors, int node_id) { - struct gendisk *disk = kmalloc_node(sizeof(struct gendisk), GFP_KERNEL, node_id); + struct gendisk *disk; + + disk = kmalloc_node(sizeof(struct gendisk), GFP_KERNEL, node_id); if (disk) { memset(disk, 0, sizeof(struct gendisk)); if (!init_disk_stats(disk)) { diff -puN drivers/block/ll_rw_blk.c~numa-aware-block-device-control-structure-allocation-tidy drivers/block/ll_rw_blk.c --- 25/drivers/block/ll_rw_blk.c~numa-aware-block-device-control-structure-allocation-tidy Fri May 6 15:45:56 2005 +++ 25-akpm/drivers/block/ll_rw_blk.c Fri May 6 15:45:56 2005 @@ -28,6 +28,7 @@ #include #include #include +#include /* * for max sense size @@ -1645,8 +1646,8 @@ static int blk_init_free_list(request_qu init_waitqueue_head(&rl->wait[WRITE]); init_waitqueue_head(&rl->drain); - rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, mempool_free_slab, - request_cachep, q->node); + rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, + mempool_free_slab, request_cachep, q->node); if (!rl->rq_pool) return -ENOMEM; @@ -1656,19 +1657,17 @@ static int blk_init_free_list(request_qu static int __make_request(request_queue_t *, struct bio *); -request_queue_t *blk_alloc_queue_node(int gfp_mask, int node_id); - -inline request_queue_t *blk_alloc_queue(int gfp_mask) +request_queue_t *blk_alloc_queue(int gfp_mask) { return blk_alloc_queue_node(gfp_mask, -1); } - - +EXPORT_SYMBOL(blk_alloc_queue); request_queue_t *blk_alloc_queue_node(int gfp_mask, int node_id) { - request_queue_t *q = kmem_cache_alloc_node(requestq_cachep, gfp_mask, node_id); + request_queue_t *q; + q = kmem_cache_alloc_node(requestq_cachep, gfp_mask, node_id); if (!q) return NULL; @@ -1681,8 +1680,6 @@ request_queue_t *blk_alloc_queue_node(in return q; } - -EXPORT_SYMBOL(blk_alloc_queue); EXPORT_SYMBOL(blk_alloc_queue_node); /** @@ -1716,13 +1713,15 @@ EXPORT_SYMBOL(blk_alloc_queue_node); * blk_init_queue() must be paired with a blk_cleanup_queue() call * when the block device is deactivated (such as at module unload). **/ -inline + request_queue_t *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock) { return blk_init_queue_node(rfn, lock, -1); } +EXPORT_SYMBOL(blk_init_queue); -request_queue_t *blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) +request_queue_t * +blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) { request_queue_t *q = blk_alloc_queue_node(GFP_KERNEL, node_id); @@ -1772,8 +1771,6 @@ out_init: kmem_cache_free(requestq_cachep, q); return NULL; } - -EXPORT_SYMBOL(blk_init_queue); EXPORT_SYMBOL(blk_init_queue_node); int blk_get_queue(request_queue_t *q) diff -puN drivers/ide/ide-disk.c~numa-aware-block-device-control-structure-allocation-tidy drivers/ide/ide-disk.c --- 25/drivers/ide/ide-disk.c~numa-aware-block-device-control-structure-allocation-tidy Fri May 6 15:45:56 2005 +++ 25-akpm/drivers/ide/ide-disk.c Fri May 6 15:45:56 2005 @@ -1216,7 +1216,8 @@ static int idedisk_attach(ide_drive_t *d if (!idkp) goto failed; - g = alloc_disk_node(1 << PARTN_BITS, pcibus_to_node(drive->hwif->pci_dev->bus)); + g = alloc_disk_node(1 << PARTN_BITS, + pcibus_to_node(drive->hwif->pci_dev->bus)); if (!g) goto out_free_idkp; diff -puN drivers/ide/ide-probe.c~numa-aware-block-device-control-structure-allocation-tidy drivers/ide/ide-probe.c --- 25/drivers/ide/ide-probe.c~numa-aware-block-device-control-structure-allocation-tidy Fri May 6 15:45:56 2005 +++ 25-akpm/drivers/ide/ide-probe.c Fri May 6 15:45:56 2005 @@ -977,7 +977,8 @@ static int ide_init_queue(ide_drive_t *d * do not. */ - q = blk_init_queue_node(do_ide_request, &ide_lock, pcibus_to_node(drive->hwif->pci_dev->bus)); + q = blk_init_queue_node(do_ide_request, &ide_lock, + pcibus_to_node(drive->hwif->pci_dev->bus)); if (!q) return 1; @@ -1094,7 +1095,8 @@ static int init_irq (ide_hwif_t *hwif) hwgroup->hwif->next = hwif; spin_unlock_irq(&ide_lock); } else { - hwgroup = kmalloc_node(sizeof(ide_hwgroup_t), GFP_KERNEL, pcibus_to_node(hwif->drives[0].hwif->pci_dev->bus)); + hwgroup = kmalloc_node(sizeof(ide_hwgroup_t), GFP_KERNEL, + pcibus_to_node(hwif->drives[0].hwif->pci_dev->bus)); if (!hwgroup) goto out_up; diff -puN include/linux/blkdev.h~numa-aware-block-device-control-structure-allocation-tidy include/linux/blkdev.h --- 25/include/linux/blkdev.h~numa-aware-block-device-control-structure-allocation-tidy Fri May 6 15:45:56 2005 +++ 25-akpm/include/linux/blkdev.h Fri May 6 15:45:56 2005 @@ -616,7 +616,8 @@ static inline void blkdev_dequeue_reques /* * Access functions for manipulating queue properties */ -extern request_queue_t *blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int); +extern request_queue_t *blk_init_queue_node(request_fn_proc *rfn, + spinlock_t *lock, int node_id); extern request_queue_t *blk_init_queue(request_fn_proc *, spinlock_t *); extern void blk_cleanup_queue(request_queue_t *); extern void blk_queue_make_request(request_queue_t *, make_request_fn *); @@ -648,7 +649,7 @@ extern void blk_wait_queue_drained(reque extern void blk_finish_queue_drain(request_queue_t *); int blk_get_queue(request_queue_t *); -request_queue_t *blk_alloc_queue(int); +request_queue_t *blk_alloc_queue(int gfp_mask); request_queue_t *blk_alloc_queue_node(int,int); #define blk_put_queue(q) blk_cleanup_queue((q)) diff -puN include/linux/genhd.h~numa-aware-block-device-control-structure-allocation-tidy include/linux/genhd.h diff -puN include/linux/ide.h~numa-aware-block-device-control-structure-allocation-tidy include/linux/ide.h diff -puN include/linux/mempool.h~numa-aware-block-device-control-structure-allocation-tidy include/linux/mempool.h --- 25/include/linux/mempool.h~numa-aware-block-device-control-structure-allocation-tidy Fri May 6 15:45:56 2005 +++ 25-akpm/include/linux/mempool.h Fri May 6 15:45:56 2005 @@ -20,12 +20,14 @@ typedef struct mempool_s { mempool_free_t *free; wait_queue_head_t wait; } mempool_t; -extern mempool_t * mempool_create(int min_nr, mempool_alloc_t *alloc_fn, - mempool_free_t *free_fn, void *pool_data); -extern mempool_t * mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn, - mempool_free_t *free_fn, void *pool_data, int nid); -extern int mempool_resize(mempool_t *pool, int new_min_nr, unsigned int __nocast gfp_mask); +extern mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn, + mempool_free_t *free_fn, void *pool_data); +extern mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn, + mempool_free_t *free_fn, void *pool_data, int nid); + +extern int mempool_resize(mempool_t *pool, int new_min_nr, + unsigned int __nocast gfp_mask); extern void mempool_destroy(mempool_t *pool); extern void * mempool_alloc(mempool_t *pool, unsigned int __nocast gfp_mask); extern void mempool_free(void *element, mempool_t *pool); diff -puN mm/mempool.c~numa-aware-block-device-control-structure-allocation-tidy mm/mempool.c --- 25/mm/mempool.c~numa-aware-block-device-control-structure-allocation-tidy Fri May 6 15:45:56 2005 +++ 25-akpm/mm/mempool.c Fri May 6 15:45:56 2005 @@ -51,23 +51,23 @@ static void free_pool(mempool_t *pool) * functions might sleep - as long as the mempool_alloc function is not called * from IRQ contexts. */ -inline -mempool_t * mempool_create(int min_nr, mempool_alloc_t *alloc_fn, +mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn, mempool_free_t *free_fn, void *pool_data) { return mempool_create_node(min_nr,alloc_fn,free_fn, pool_data,-1); } +EXPORT_SYMBOL(mempool_create); - -mempool_t * mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn, - mempool_free_t *free_fn, void *pool_data, int node_id) +mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn, + mempool_free_t *free_fn, void *pool_data, int node_id) { mempool_t *pool; pool = kmalloc_node(sizeof(*pool), GFP_KERNEL, node_id); if (!pool) return NULL; memset(pool, 0, sizeof(*pool)); - pool->elements = kmalloc_node(min_nr * sizeof(void *), GFP_KERNEL, node_id); + pool->elements = kmalloc_node(min_nr * sizeof(void *), + GFP_KERNEL, node_id); if (!pool->elements) { kfree(pool); return NULL; @@ -94,8 +94,6 @@ mempool_t * mempool_create_node(int min_ } return pool; } - -EXPORT_SYMBOL(mempool_create); EXPORT_SYMBOL(mempool_create_node); /** _