aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authordavem <davem>2001-12-13 10:36:01 +0000
committerdavem <davem>2001-12-13 10:36:01 +0000
commit07a8bd20cd42a0d3c9ed6854de8f1f59eacc95ff (patch)
tree90665c5fa3f44d614fb266d753dc8bbd7decb5b3
parent83e53f8f9d1edf52b48412b6bd48b709df81b66f (diff)
downloadnetdev-vger-cvs-07a8bd20cd42a0d3c9ed6854de8f1f59eacc95ff.tar.gz
Reimplement IOMMU bio layer support so that
it works. Patch sent to Jens.
-rw-r--r--drivers/block/DAC960.c7
-rw-r--r--drivers/block/cciss.c8
-rw-r--r--drivers/block/cpqarray.c8
-rw-r--r--drivers/block/ll_rw_blk.c208
-rw-r--r--drivers/block/paride/pf.c3
-rw-r--r--drivers/ide/ide-probe.c7
-rw-r--r--drivers/message/i2o/i2o_block.c9
-rw-r--r--drivers/scsi/ncr53c8xx.c9
-rw-r--r--drivers/scsi/scsi.c10
-rw-r--r--drivers/scsi/scsi.h3
-rw-r--r--drivers/scsi/scsi_dma.c7
-rw-r--r--drivers/scsi/scsi_lib.c9
-rw-r--r--drivers/scsi/scsi_merge.c319
-rw-r--r--drivers/scsi/scsi_scan.c4
-rw-r--r--drivers/scsi/sd.c2
-rw-r--r--drivers/scsi/sr.c4
-rw-r--r--drivers/scsi/sym53c8xx.c13
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c7
-rw-r--r--fs/bio.c19
-rw-r--r--include/asm-sparc64/io.h4
-rw-r--r--include/linux/bio.h26
-rw-r--r--include/linux/blkdev.h12
-rw-r--r--kernel/ksyms.c2
23 files changed, 302 insertions, 398 deletions
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index eed7b446c..0f0060dbb 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -1948,8 +1948,11 @@ static boolean DAC960_RegisterBlockDevice(DAC960_Controller_T *Controller)
RequestQueue = BLK_DEFAULT_QUEUE(MajorNumber);
blk_init_queue(RequestQueue, DAC960_RequestFunction);
RequestQueue->queuedata = Controller;
- RequestQueue->max_segments = Controller->DriverScatterGatherLimit;
- RequestQueue->max_sectors = Controller->MaxBlocksPerCommand;
+ blk_queue_max_hw_segments(RequestQueue,
+ Controller->DriverScatterGatherLimit);
+ blk_queue_max_phys_segments(RequestQueue, ~0);
+ blk_queue_max_sectors(RequestQueue, Controller->MaxBlocksPerCommand);
+
Controller->RequestQueue = RequestQueue;
/*
Initialize the Disk Partitions array, Partition Sizes array, Block Sizes
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 208a785c6..e3aeae4a6 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -1868,7 +1868,13 @@ static int __init cciss_init_one(struct pci_dev *pdev,
q->queuedata = hba[i];
blk_init_queue(q, do_cciss_request);
blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask);
- blk_queue_max_segments(q, MAXSGENTRIES);
+
+ /* This is a hardware imposed limit. */
+ blk_queue_max_hw_segments(q, MAXSGENTRIES);
+
+ /* This is a limit in the driver and could be eliminated. */
+ blk_queue_max_phys_segments(q, MAXSGENTRIES);
+
blk_queue_max_sectors(q, 512);
/* fill in the other Kernel structs */
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index e34f7b47e..4062aea0b 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -469,7 +469,13 @@ int __init cpqarray_init(void)
q->queuedata = hba[i];
blk_init_queue(q, do_ida_request);
blk_queue_bounce_limit(q, hba[i]->pci_dev->dma_mask);
- blk_queue_max_segments(q, SG_MAX);
+
+ /* This is a hardware imposed limit. */
+ blk_queue_max_hw_segments(q, SG_MAX);
+
+ /* This is a driver limit and could be eliminated. */
+ blk_queue_max_phys_segments(q, SG_MAX);
+
blksize_size[MAJOR_NR+i] = ida_blocksizes + (i*256);
read_ahead[MAJOR_NR+i] = READ_AHEAD;
diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c
index dce810ac4..86fe87ce6 100644
--- a/drivers/block/ll_rw_blk.c
+++ b/drivers/block/ll_rw_blk.c
@@ -144,7 +144,8 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
/*
* set defaults
*/
- q->max_segments = MAX_SEGMENTS;
+ q->max_phys_segments = MAX_PHYS_SEGMENTS;
+ q->max_hw_segments = MAX_HW_SEGMENTS;
q->make_request_fn = mfn;
blk_queue_max_sectors(q, MAX_SECTORS);
blk_queue_hardsect_size(q, 512);
@@ -201,17 +202,34 @@ void blk_queue_max_sectors(request_queue_t *q, unsigned short max_sectors)
}
/**
- * blk_queue_max_segments - set max segments for a request for this queue
+ * blk_queue_max_phys_segments - set max phys segments for a request for this queue
* @q: the request queue for the device
* @max_segments: max number of segments
*
* Description:
* Enables a low level driver to set an upper limit on the number of
- * data segments in a request
+ * physical data segments in a request. This would be the largest sized
+ * scatter list the driver could handle.
**/
-void blk_queue_max_segments(request_queue_t *q, unsigned short max_segments)
+void blk_queue_max_phys_segments(request_queue_t *q, unsigned short max_segments)
{
- q->max_segments = max_segments;
+ q->max_phys_segments = max_segments;
+}
+
+/**
+ * blk_queue_max_hw_segments - set max hw segments for a request for this queue
+ * @q: the request queue for the device
+ * @max_segments: max number of segments
+ *
+ * Description:
+ * Enables a low level driver to set an upper limit on the number of
+ * hw data segments in a request. This would be the largest number of
+ * address/length pairs the host adapter can actually give as once
+ * to the device.
+ **/
+void blk_queue_max_hw_segments(request_queue_t *q, unsigned short max_segments)
+{
+ q->max_hw_segments = max_segments;
}
/**
@@ -319,44 +337,78 @@ static int ll_10byte_cmd_build(request_queue_t *q, struct request *rq)
void blk_recount_segments(request_queue_t *q, struct bio *bio)
{
struct bio_vec *bv, *bvprv = NULL;
- int i, nr_segs, seg_size, cluster;
+ int i, nr_phys_segs, nr_hw_segs, seg_size, cluster;
if (unlikely(!bio->bi_io_vec))
return;
cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
- seg_size = nr_segs = 0;
+ seg_size = nr_phys_segs = nr_hw_segs = 0;
bio_for_each_segment(bv, bio, i) {
if (bvprv && cluster) {
- if (seg_size + bv->bv_len > q->max_segment_size)
+ int phys, seg;
+
+ if (seg_size + bv->bv_len > q->max_segment_size) {
+ nr_phys_segs++;
goto new_segment;
- if (!BIOVEC_MERGEABLE(bvprv, bv))
+ }
+
+ phys = BIOVEC_PHYS_MERGEABLE(bvprv, bv);
+ seg = BIOVEC_SEG_BOUNDARY(q, bvprv, bv);
+ if (!phys || !seg)
+ nr_phys_segs++;
+ if (!seg)
goto new_segment;
- if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
+
+ if (!BIOVEC_VIRT_MERGEABLE(bvprv, bv))
goto new_segment;
seg_size += bv->bv_len;
bvprv = bv;
continue;
+ } else {
+ nr_phys_segs++;
}
new_segment:
- nr_segs++;
+ nr_hw_segs++;
bvprv = bv;
- seg_size = 0;
+ seg_size = bv->bv_len;
}
- bio->bi_hw_seg = nr_segs;
+ bio->bi_phys_segments = nr_phys_segs;
+ bio->bi_hw_segments = nr_hw_segs;
bio->bi_flags |= (1 << BIO_SEG_VALID);
}
-inline int blk_contig_segment(request_queue_t *q, struct bio *bio,
- struct bio *nxt)
+inline int blk_phys_contig_segment(request_queue_t *q, struct bio *bio,
+ struct bio *nxt)
{
if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER)))
return 0;
- if (!BIO_CONTIG(bio, nxt))
+ if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
+ return 0;
+ if (bio->bi_size + nxt->bi_size > q->max_segment_size)
+ return 0;
+
+ /*
+ * bio and nxt are contigous in memory, check if the queue allows
+ * these two to be merged into one
+ */
+ if (BIO_SEG_BOUNDARY(q, bio, nxt))
+ return 1;
+
+ return 0;
+}
+
+inline int blk_hw_contig_segment(request_queue_t *q, struct bio *bio,
+ struct bio *nxt)
+{
+ if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER)))
+ return 0;
+
+ if (!BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
return 0;
if (bio->bi_size + nxt->bi_size > q->max_segment_size)
return 0;
@@ -399,7 +451,7 @@ int blk_rq_map_sg(request_queue_t *q, struct request *rq, struct scatterlist *sg
if (sg[nsegs - 1].length + nbytes > q->max_segment_size)
goto new_segment;
- if (!BIOVEC_MERGEABLE(bvprv, bvec))
+ if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
goto new_segment;
if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
goto new_segment;
@@ -407,11 +459,6 @@ int blk_rq_map_sg(request_queue_t *q, struct request *rq, struct scatterlist *sg
sg[nsegs - 1].length += nbytes;
} else {
new_segment:
- if (nsegs >= q->max_segments) {
- printk("map: %d >= %d, i %d, phys_segs %d, size %ld\n", nsegs, q->max_segments, i, rq->nr_phys_segments, rq->nr_sectors);
- BUG();
- }
-
sg[nsegs].address = NULL;
sg[nsegs].page = bvec->bv_page;
sg[nsegs].length = nbytes;
@@ -430,18 +477,44 @@ new_segment:
* the standard queue merge functions, can be overridden with device
* specific ones if so desired
*/
-static inline int ll_new_segment(request_queue_t *q, struct request *req,
- struct bio *bio)
+
+static inline int ll_new_mergeable(request_queue_t *q,
+ struct request *req,
+ struct bio *bio)
{
- int nr_segs = bio_hw_segments(q, bio);
+ int nr_phys_segs = bio_phys_segments(q, bio);
- if (req->nr_phys_segments + nr_segs <= q->max_segments) {
- req->nr_phys_segments += nr_segs;
- return 1;
+ if (req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
+ req->flags |= REQ_NOMERGE;
+ return 0;
}
- req->flags |= REQ_NOMERGE;
- return 0;
+ /*
+ * A hw segment is just getting larger, bump just the phys
+ * counter.
+ */
+ req->nr_phys_segments += nr_phys_segs;
+ return 1;
+}
+
+static inline int ll_new_hw_segment(request_queue_t *q,
+ struct request *req,
+ struct bio *bio)
+{
+ int nr_hw_segs = bio_hw_segments(q, bio);
+
+ if (req->nr_hw_segments + nr_hw_segs > q->max_hw_segments) {
+ req->flags |= REQ_NOMERGE;
+ return 0;
+ }
+
+ /*
+ * This will form the start of a new hw segment. Bump both
+ * counters.
+ */
+ req->nr_hw_segments += nr_hw_segs;
+ req->nr_phys_segments += bio_phys_segments(q, bio);
+ return 1;
}
static int ll_back_merge_fn(request_queue_t *q, struct request *req,
@@ -452,7 +525,11 @@ static int ll_back_merge_fn(request_queue_t *q, struct request *req,
return 0;
}
- return ll_new_segment(q, req, bio);
+ if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail),
+ __BVEC_START(bio)))
+ return ll_new_mergeable(q, req, bio);
+
+ return ll_new_hw_segment(q, req, bio);
}
static int ll_front_merge_fn(request_queue_t *q, struct request *req,
@@ -463,21 +540,49 @@ static int ll_front_merge_fn(request_queue_t *q, struct request *req,
return 0;
}
- return ll_new_segment(q, req, bio);
+ if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio),
+ __BVEC_START(req->bio)))
+ return ll_new_mergeable(q, req, bio);
+
+ return ll_new_hw_segment(q, req, bio);
}
static int ll_merge_requests_fn(request_queue_t *q, struct request *req,
struct request *next)
{
- int total_segments = req->nr_phys_segments + next->nr_phys_segments;
+ int total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
+ int total_hw_segments = req->nr_hw_segments + next->nr_hw_segments;
- if (blk_contig_segment(q, req->biotail, next->bio))
- total_segments--;
+ /*
+ * First check if the either of the requests are re-queued
+ * requests. Can't merge them if they are.
+ */
+ if (req->special || next->special)
+ return 0;
+
+ /*
+ * Will it become to large?
+ */
+ if ((req->nr_sectors + next->nr_sectors) > q->max_sectors)
+ return 0;
+
+ total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
+ if (blk_phys_contig_segment(q, req->biotail, next->bio))
+ total_phys_segments--;
+
+ if (total_phys_segments > q->max_phys_segments)
+ return 0;
+
+ total_hw_segments = req->nr_hw_segments + next->nr_hw_segments;
+ if (blk_hw_contig_segment(q, req->biotail, next->bio))
+ total_hw_segments--;
- if (total_segments > q->max_segments)
+ if (total_hw_segments > q->max_hw_segments)
return 0;
- req->nr_phys_segments = total_segments;
+ /* Merge is OK... */
+ req->nr_phys_segments = total_phys_segments;
+ req->nr_hw_segments = total_hw_segments;
return 1;
}
@@ -1101,7 +1206,7 @@ get_rq:
req->hard_sector = req->sector = sector;
req->hard_nr_sectors = req->nr_sectors = nr_sectors;
req->current_nr_sectors = req->hard_cur_sectors = cur_nr_sectors;
- req->nr_phys_segments = bio->bi_vcnt;
+ req->nr_phys_segments = bio_phys_segments(q, bio);
req->nr_hw_segments = bio_hw_segments(q, bio);
req->buffer = bio_data(bio); /* see ->buffer comment above */
req->waiting = NULL;
@@ -1429,6 +1534,9 @@ extern int stram_device_init (void);
inline void blk_recalc_request(struct request *rq, int nsect)
{
+ struct bio *bio;
+ int nr_phys_segs, nr_hw_segs;
+
rq->hard_sector += nsect;
rq->hard_nr_sectors -= nsect;
rq->sector = rq->hard_sector;
@@ -1446,7 +1554,25 @@ inline void blk_recalc_request(struct request *rq, int nsect)
rq->nr_sectors = rq->current_nr_sectors;
}
+ /* XXX Who updates the bio page/offset values to reflect
+ * XXX the hard sector number advance above??? -DaveM
+ */
rq->buffer = bio_data(rq->bio);
+
+ /* XXX If nobody is updating the BIO the following is a
+ * XXX nop. But once that is fixed, this is needed. -DaveM
+ */
+ nr_phys_segs = nr_hw_segs = 0;
+ rq_for_each_bio(bio, rq) {
+ /* Force bio hw/phys segs to be recalculated. */
+ bio->bi_flags &= ~(1 << BIO_SEG_VALID);
+
+ nr_phys_segs += bio_phys_segments(rq->q, bio);
+ nr_hw_segs += bio_hw_segments(rq->q, bio);
+ }
+
+ rq->nr_phys_segments = nr_phys_segs;
+ rq->nr_hw_segments = nr_hw_segs;
}
/**
@@ -1599,7 +1725,8 @@ EXPORT_SYMBOL(generic_unplug_device);
EXPORT_SYMBOL(blk_attempt_remerge);
EXPORT_SYMBOL(blk_max_low_pfn);
EXPORT_SYMBOL(blk_queue_max_sectors);
-EXPORT_SYMBOL(blk_queue_max_segments);
+EXPORT_SYMBOL(blk_queue_max_phys_segments);
+EXPORT_SYMBOL(blk_queue_max_hw_segments);
EXPORT_SYMBOL(blk_queue_max_segment_size);
EXPORT_SYMBOL(blk_queue_hardsect_size);
EXPORT_SYMBOL(blk_queue_segment_boundary);
@@ -1607,4 +1734,5 @@ EXPORT_SYMBOL(blk_rq_map_sg);
EXPORT_SYMBOL(blk_nohighio);
EXPORT_SYMBOL(blk_dump_rq_flags);
EXPORT_SYMBOL(submit_bio);
-EXPORT_SYMBOL(blk_contig_segment);
+EXPORT_SYMBOL(blk_phys_contig_segment);
+EXPORT_SYMBOL(blk_hw_contig_segment);
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index 3feca299a..a86ec90bd 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -359,7 +359,8 @@ int pf_init (void) /* preliminary initialisation */
}
q = BLK_DEFAULT_QUEUE(MAJOR_NR);
blk_init_queue(q, DEVICE_REQUEST);
- blk_queue_max_segments(q, cluster);
+ blk_queue_max_phys_segments(q, cluster);
+ blk_queue_max_hw_segments(q, cluster);
read_ahead[MAJOR_NR] = 8; /* 8 sector (4kB) read ahead */
for (i=0;i<PF_UNITS;i++) pf_blocksizes[i] = 1024;
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 82123b2e0..f076200dd 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -608,8 +608,11 @@ static void ide_init_queue(ide_drive_t *drive)
#endif
blk_queue_max_sectors(q, max_sectors);
- /* IDE DMA can do PRD_ENTRIES number of segments */
- q->max_segments = PRD_ENTRIES;
+ /* IDE DMA can do PRD_ENTRIES number of segments. */
+ blk_queue_max_hw_segments(q, PRD_ENTRIES);
+
+ /* This is a driver limit and could be eliminated. */
+ blk_queue_max_phys_segments(q, PRD_ENTRIES);
}
/*
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index bdf52592b..c64b7393b 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -1301,7 +1301,8 @@ static int i2ob_install_device(struct i2o_controller *c, struct i2o_device *d, i
request_queue_t *q = i2ob_dev[unit].req_queue;
blk_queue_max_sectors(q, 256);
- blk_queue_max_segments(q, (d->controller->status_block->inbound_frame_size - 8)/2);
+ blk_queue_max_phys_segments(q, (d->controller->status_block->inbound_frame_size - 8)/2);
+ blk_queue_max_hw_segments(q, (d->controller->status_block->inbound_frame_size - 8)/2);
if(d->controller->type == I2O_TYPE_PCI && d->controller->bus.pci.queue_buggy == 2)
i2ob_dev[i].depth = 32;
@@ -1309,14 +1310,16 @@ static int i2ob_install_device(struct i2o_controller *c, struct i2o_device *d, i
if(d->controller->type == I2O_TYPE_PCI && d->controller->bus.pci.queue_buggy == 1)
{
blk_queue_max_sectors(q, 32);
- blk_queue_max_segments(q, 8);
+ blk_queue_max_phys_segments(q, 8);
+ blk_queue_max_hw_segments(q, 8);
i2ob_dev[i].depth = 4;
}
if(d->controller->type == I2O_TYPE_PCI && d->controller->bus.pci.short_req)
{
blk_queue_max_sectors(q, 8);
- blk_queue_max_segments(q, 8);
+ blk_queue_max_phys_segments(q, 8);
+ blk_queue_max_hw_segments(q, 8);
}
}
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index f8688af4f..4d4ac2e12 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -8127,10 +8127,14 @@ static int ncr_scatter(ncb_p np, ccb_p cp, Scsi_Cmnd *cmd)
segment = 1;
}
}
- else if (use_sg <= MAX_SCATTER) {
+ else {
struct scatterlist *scatter = (struct scatterlist *)cmd->buffer;
use_sg = map_scsi_sg_data(np, cmd);
+ if (use_sg > MAX_SCATTER) {
+ unmap_scsi_sg_data(np, cmd);
+ return -1;
+ }
data = &data[MAX_SCATTER - use_sg];
while (segment < use_sg) {
@@ -8143,9 +8147,6 @@ static int ncr_scatter(ncb_p np, ccb_p cp, Scsi_Cmnd *cmd)
++segment;
}
}
- else {
- return -1;
- }
return segment;
}
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 6faaad0b7..35d336de5 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -181,12 +181,18 @@ void scsi_build_commandblocks(Scsi_Device * SDpnt);
void scsi_initialize_queue(Scsi_Device * SDpnt, struct Scsi_Host * SHpnt)
{
request_queue_t *q = &SDpnt->request_queue;
- int max_segments = SHpnt->sg_tablesize;
blk_init_queue(q, scsi_request_fn);
q->queuedata = (void *) SDpnt;
- blk_queue_max_segments(q, max_segments);
+ /* Hardware imposed limit. */
+ blk_queue_max_hw_segments(q, SHpnt->sg_tablesize);
+
+ /* If we stopped using scsi_malloc for the scatterlists
+ * we could eliminate this limit too.
+ */
+ blk_queue_max_phys_segments(q, PAGE_SIZE / sizeof(struct scatterlist));
+
blk_queue_max_sectors(q, SHpnt->max_sectors);
if (!SHpnt->use_clustering)
diff --git a/drivers/scsi/scsi.h b/drivers/scsi/scsi.h
index 3e6b1c3b3..efa95d8a0 100644
--- a/drivers/scsi/scsi.h
+++ b/drivers/scsi/scsi.h
@@ -449,8 +449,7 @@ int scsi_free(void *, unsigned int);
/*
* Prototypes for functions in scsi_merge.c
*/
-extern void recount_segments(Scsi_Cmnd * SCpnt);
-extern void initialize_merge_fn(Scsi_Device * SDpnt);
+extern void scsi_initialize_merge_fn(Scsi_Device * SDpnt);
/*
* Prototypes for functions in scsi_queue.c
diff --git a/drivers/scsi/scsi_dma.c b/drivers/scsi/scsi_dma.c
index 76fbc3aaf..0d5ad403b 100644
--- a/drivers/scsi/scsi_dma.c
+++ b/drivers/scsi/scsi_dma.c
@@ -246,8 +246,11 @@ void scsi_resize_dma_pool(void)
*/
if (SDpnt->type == TYPE_WORM || SDpnt->type == TYPE_ROM ||
SDpnt->type == TYPE_DISK || SDpnt->type == TYPE_MOD) {
- /* XXX This needs fixing -DaveM */
- int nents = host->sg_tablesize;
+ /* XXX We should not be using scsi_malloc() for
+ * XXX scatterlists. MUST FIXME -DaveM
+ */
+ int nents = PAGE_SIZE / sizeof(struct scatterlist);
+
new_dma_sectors += ((nents *
sizeof(struct scatterlist) + 511) >> 9) *
SDpnt->queue_depth;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 6e5cae494..00e931304 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -924,15 +924,6 @@ void scsi_request_fn(request_queue_t * q)
*/
if (req->special) {
SCpnt = (Scsi_Cmnd *) req->special;
- /*
- * We need to recount the number of
- * scatter-gather segments here - the
- * normal case code assumes this to be
- * correct, as it would be a performance
- * loss to always recount. Handling
- * errors is always unusual, of course.
- */
- recount_segments(SCpnt);
} else {
SCpnt = scsi_allocate_device(SDpnt, FALSE, FALSE);
}
diff --git a/drivers/scsi/scsi_merge.c b/drivers/scsi/scsi_merge.c
index 23995ab3f..285f4c218 100644
--- a/drivers/scsi/scsi_merge.c
+++ b/drivers/scsi/scsi_merge.c
@@ -11,26 +11,8 @@
/*
* This file contains queue management functions that are used by SCSI.
- * Typically this is used for several purposes. First, we need to ensure
- * that commands do not grow so large that they cannot be handled all at
- * once by a host adapter. The various flavors of merge functions included
- * here serve this purpose.
- *
- * Note that it would be quite trivial to allow the low-level driver the
- * flexibility to define it's own queue handling functions. For the time
- * being, the hooks are not present. Right now we are just using the
- * data in the host template as an indicator of how we should be handling
- * queues, and we select routines that are optimized for that purpose.
- *
- * Some hosts do not impose any restrictions on the size of a request.
- * In such cases none of the merge functions in this file are called,
- * and we allow ll_rw_blk to merge requests in the default manner.
- * This isn't guaranteed to be optimal, but it should be pretty darned
- * good. If someone comes up with ideas of better ways of managing queues
- * to improve on the default behavior, then certainly fit it into this
- * scheme in whatever manner makes the most sense. Please note that
- * since each device has it's own queue, we have considerable flexibility
- * in queue management.
+ * We need to ensure that commands do not grow so large that they cannot
+ * be handled all at once by a host adapter.
*/
#define __NO_VERSION__
@@ -64,15 +46,7 @@
#include "constants.h"
#include <scsi/scsi_ioctl.h>
-/*
- * This means that bounce buffers cannot be allocated in chunks > PAGE_SIZE.
- * Ultimately we should get away from using a dedicated DMA bounce buffer
- * pool, and we should instead try and use kmalloc() instead. If we can
- * eliminate this pool, then this restriction would no longer be needed.
- */
-#define DMA_SEGMENT_SIZE_LIMITED
-
-static void dma_exhausted(Scsi_Cmnd * SCpnt, int i)
+static void dma_exhausted(Scsi_Cmnd *SCpnt, int i)
{
int jj;
struct scatterlist *sgpnt;
@@ -92,8 +66,7 @@ static void dma_exhausted(Scsi_Cmnd * SCpnt, int i)
/*
* Now dump the scatter-gather table, up to the point of failure.
*/
- for(jj=0; jj < SCpnt->use_sg; jj++)
- {
+ for (jj = 0; jj < SCpnt->use_sg; jj++) {
printk("[%d]\tlen:%d\taddr:%p\tbounce:%p\n",
jj,
sgpnt[jj].length,
@@ -107,260 +80,11 @@ static void dma_exhausted(Scsi_Cmnd * SCpnt, int i)
}
/*
- * This entire source file deals with the new queueing code.
- */
-
-/*
- * Function: __count_segments()
- *
- * Purpose: Prototype for queue merge function.
- *
- * Arguments: q - Queue for which we are merging request.
- * req - request into which we wish to merge.
- * dma_host - 1 if this host has ISA DMA issues (bus doesn't
- * expose all of the address lines, so that DMA cannot
- * be done from an arbitrary address).
- * remainder - used to track the residual size of the last
- * segment. Comes in handy when we want to limit the
- * size of bounce buffer segments to PAGE_SIZE.
- *
- * Returns: Count of the number of SG segments for the request.
- *
- * Lock status:
- *
- * Notes: This is only used for diagnostic purposes.
- */
-__inline static int __count_segments(struct request *req,
- int dma_host,
- int * remainder)
-{
- int ret = 1;
- int reqsize = 0;
- int i;
- struct bio *bio;
- struct bio_vec *bvec;
-
- if (remainder)
- reqsize = *remainder;
-
- /*
- * Add in the size increment for the first buffer.
- */
- bio = req->bio;
-#ifdef DMA_SEGMENT_SIZE_LIMITED
- if (reqsize + bio->bi_size > PAGE_SIZE)
- ret++;
-#endif
-
- rq_for_each_bio(bio, req) {
- bio_for_each_segment(bvec, bio, i)
- ret++;
-
- reqsize += bio->bi_size;
- }
-
- if (remainder)
- *remainder = reqsize;
-
- return ret;
-}
-
-/*
- * Function: recount_segments()
- *
- * Purpose: Recount the number of scatter-gather segments for this request.
- *
- * Arguments: req - request that needs recounting.
- *
- * Returns: Count of the number of SG segments for the request.
- *
- * Lock status: Irrelevant.
- *
- * Notes: This is only used when we have partially completed requests
- * and the bit that is leftover is of an indeterminate size.
- * This can come up if you get a MEDIUM_ERROR, for example,
- * as we will have "completed" all of the sectors up to and
- * including the bad sector, and the leftover bit is what
- * we have to do now. This tends to be a rare occurrence, so
- * we aren't busting our butts to instantiate separate versions
- * of this function for the 4 different flag values. We
- * probably should, however.
- */
-void
-recount_segments(Scsi_Cmnd * SCpnt)
-{
- struct request *req = &SCpnt->request;
- struct Scsi_Host *SHpnt = SCpnt->host;
-
- req->nr_phys_segments = __count_segments(req, SHpnt->unchecked_isa_dma,NULL);
-}
-
-static inline int scsi_new_segment(request_queue_t * q,
- struct request * req,
- struct bio *bio)
-{
- int nr_segs = bio_hw_segments(q, bio);
-
- if (req->nr_phys_segments + nr_segs > q->max_segments) {
- req->flags |= REQ_NOMERGE;
- return 0;
- }
-
- /*
- * This will form the start of a new segment. Bump the
- * counter.
- */
- req->nr_phys_segments += nr_segs;
- return 1;
-}
-
-/*
- * Function: __scsi_merge_fn()
- *
- * Purpose: Prototype for queue merge function.
- *
- * Arguments: q - Queue for which we are merging request.
- * req - request into which we wish to merge.
- * bio - Block which we may wish to merge into request
- * dma_host - 1 if this host has ISA DMA issues (bus doesn't
- * expose all of the address lines, so that DMA cannot
- * be done from an arbitrary address).
- *
- * Returns: 1 if it is OK to merge the block into the request. 0
- * if it is not OK.
- *
- * Lock status: queue lock is assumed to be held here.
- *
- * Notes: Some drivers have limited scatter-gather table sizes, and
- * thus they cannot queue an infinitely large command. This
- * function is called from ll_rw_blk before it attempts to merge
- * a new block into a request to make sure that the request will
- * not become too large.
- *
- * This function is not designed to be directly called. Instead
- * it should be referenced from other functions where the
- * dma_host parameter should be an integer constant. The
- * compiler should thus be able to properly optimize the code,
- * eliminating stuff that is irrelevant.
- * It is more maintainable to do this way with a single function
- * than to have 4 separate functions all doing roughly the
- * same thing.
- */
-__inline static int __scsi_back_merge_fn(request_queue_t * q,
- struct request *req,
- struct bio *bio)
-{
- if (req->nr_sectors + bio_sectors(bio) > q->max_sectors) {
- req->flags |= REQ_NOMERGE;
- return 0;
- }
-
- return scsi_new_segment(q, req, bio);
-}
-
-__inline static int __scsi_front_merge_fn(request_queue_t * q,
- struct request *req,
- struct bio *bio)
-{
- if (req->nr_sectors + bio_sectors(bio) > q->max_sectors) {
- req->flags |= REQ_NOMERGE;
- return 0;
- }
-
- return scsi_new_segment(q, req, bio);
-}
-
-/*
- * Function: scsi_merge_fn_()
- *
- * Purpose: queue merge function.
- *
- * Arguments: q - Queue for which we are merging request.
- * req - request into which we wish to merge.
- * bio - Block which we may wish to merge into request
- *
- * Returns: 1 if it is OK to merge the block into the request. 0
- * if it is not OK.
- *
- * Lock status: queue lock is assumed to be held here.
- *
- * Notes: Optimized for different cases depending upon whether
- * ISA DMA is in use and whether clustering should be used.
- */
-#define MERGEFCT(_FUNCTION, _BACK_FRONT) \
-static int _FUNCTION(request_queue_t * q, \
- struct request * req, \
- struct bio *bio) \
-{ \
- int ret; \
- ret = __scsi_ ## _BACK_FRONT ## _merge_fn(q, \
- req, \
- bio); \
- return ret; \
-}
-
-MERGEFCT(scsi_back_merge_fn, back)
-MERGEFCT(scsi_front_merge_fn, front)
-
-/*
- * Function: scsi_merge_requests_fn_()
- *
- * Purpose: queue merge function.
- *
- * Arguments: q - Queue for which we are merging request.
- * req - request into which we wish to merge.
- * next - Block which we may wish to merge into request
- *
- * Returns: 1 if it is OK to merge the block into the request. 0
- * if it is not OK.
- *
- * Lock status: queue lock is assumed to be held here.
- *
- */
-inline static int scsi_merge_requests_fn(request_queue_t * q,
- struct request *req,
- struct request *next)
-{
- int bio_segs;
-
- /*
- * First check if the either of the requests are re-queued
- * requests. Can't merge them if they are.
- */
- if (req->special || next->special)
- return 0;
-
- /*
- * will become to large?
- */
- if ((req->nr_sectors + next->nr_sectors) > q->max_sectors)
- return 0;
-
- bio_segs = req->nr_phys_segments + next->nr_phys_segments;
- if (blk_contig_segment(q, req->biotail, next->bio))
- bio_segs--;
-
- /*
- * exceeds our max allowed segments?
- */
- if (bio_segs > q->max_segments)
- return 0;
-
- /*
- * This will form the start of a new segment. Bump the
- * counter.
- */
- req->nr_phys_segments = bio_segs;
- return 1;
-}
-
-/*
* Function: __init_io()
*
* Purpose: Prototype for io initialize function.
*
* Arguments: SCpnt - Command descriptor we wish to initialize
- * sg_count_valid - 1 if the sg count in the req is valid.
* dma_host - 1 if this host has ISA DMA issues (bus doesn't
* expose all of the address lines, so that DMA cannot
* be done from an arbitrary address).
@@ -386,9 +110,7 @@ inline static int scsi_merge_requests_fn(request_queue_t * q,
* (mainly because we don't need queue management functions
* which keep the tally uptodate.
*/
-__inline static int __init_io(Scsi_Cmnd * SCpnt,
- int sg_count_valid,
- int dma_host)
+__inline static int __init_io(Scsi_Cmnd *SCpnt, int dma_host)
{
struct bio * bio;
char * buff;
@@ -405,11 +127,7 @@ __inline static int __init_io(Scsi_Cmnd * SCpnt,
/*
* First we need to know how many scatter gather segments are needed.
*/
- if (!sg_count_valid) {
- count = __count_segments(req, dma_host, NULL);
- } else {
- count = req->nr_phys_segments;
- }
+ count = req->nr_phys_segments;
/*
* If the dma pool is nearly empty, then queue a minimal request
@@ -646,10 +364,10 @@ __inline static int __init_io(Scsi_Cmnd * SCpnt,
return 1;
}
-#define INITIO(_FUNCTION, _VALID, _DMA) \
+#define INITIO(_FUNCTION, _DMA) \
static int _FUNCTION(Scsi_Cmnd * SCpnt) \
{ \
- return __init_io(SCpnt, _VALID, _DMA); \
+ return __init_io(SCpnt, _DMA); \
}
/*
@@ -658,11 +376,11 @@ static int _FUNCTION(Scsi_Cmnd * SCpnt) \
* We always force "_VALID" to 1. Eventually clean this up
* and get rid of the extra argument.
*/
-INITIO(scsi_init_io_v, 1, 0)
-INITIO(scsi_init_io_vd, 1, 1)
+INITIO(scsi_init_io_v, 0)
+INITIO(scsi_init_io_vd, 1)
/*
- * Function: initialize_merge_fn()
+ * Function: scsi_initialize_merge_fn()
*
* Purpose: Initialize merge function for a host
*
@@ -674,26 +392,15 @@ INITIO(scsi_init_io_vd, 1, 1)
*
* Notes:
*/
-void initialize_merge_fn(Scsi_Device * SDpnt)
+void scsi_initialize_merge_fn(Scsi_Device * SDpnt)
{
struct Scsi_Host *SHpnt = SDpnt->host;
request_queue_t *q = &SDpnt->request_queue;
dma64_addr_t bounce_limit;
/*
- * If this host has an unlimited tablesize, then don't bother with a
- * merge manager. The whole point of the operation is to make sure
- * that requests don't grow too large, and this host isn't picky.
- *
- * Note that ll_rw_blk.c is effectively maintaining a segment
- * count which is only valid if clustering is used, and it obviously
- * doesn't handle the DMA case. In the end, it
- * is simply easier to do it ourselves with our own functions
- * rather than rely upon the default behavior of ll_rw_blk.
+ * The generic merging functions work just fine for us.
*/
- q->back_merge_fn = scsi_back_merge_fn;
- q->front_merge_fn = scsi_front_merge_fn;
- q->merge_requests_fn = scsi_merge_requests_fn;
if (SHpnt->unchecked_isa_dma == 0) {
SDpnt->scsi_init_io_fn = scsi_init_io_v;
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index ad5a6ba2e..a9a836da5 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -323,7 +323,7 @@ void scan_scsis(struct Scsi_Host *shpnt,
SDpnt->host = shpnt;
SDpnt->online = TRUE;
- initialize_merge_fn(SDpnt);
+ scsi_initialize_merge_fn(SDpnt);
/*
* Initialize the object that we will use to wait for command blocks.
@@ -765,7 +765,7 @@ static int scan_scsis_single(unsigned int channel, unsigned int dev,
*/
scsi_initialize_queue(SDpnt, shpnt);
SDpnt->host = shpnt;
- initialize_merge_fn(SDpnt);
+ scsi_initialize_merge_fn(SDpnt);
/*
* Mark this device as online, or otherwise we won't be able to do much with it.
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index eb93a833a..8c7c31f31 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1111,7 +1111,7 @@ static int sd_init()
* commands if they know what they're doing and they ask for it
* explicitly via the SHpnt->max_sectors API.
*/
- sd_max_sectors[i] = MAX_SEGMENTS*8;
+ sd_max_sectors[i] = MAX_PHYS_SEGMENTS*8;
}
for (i = 0; i < N_USED_SD_MAJORS; i++) {
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 1d1c27141..89b10acb4 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -428,6 +428,10 @@ static int sr_init_command(Scsi_Cmnd * SCpnt)
/*
* request doesn't start on hw block boundary, add scatter pads
+ *
+ * XXX Jens can this ever happen? The SCSI disk support code
+ * XXX marks this as a hard error. It would be nice to kill
+ * XXX off crap like sr_scatter_pad() -DaveM
*/
if ((SCpnt->request.sector % (s_size >> 9)) || (SCpnt->request_bufflen % s_size))
if (sr_scatter_pad(SCpnt, s_size))
diff --git a/drivers/scsi/sym53c8xx.c b/drivers/scsi/sym53c8xx.c
index 97ed8bb36..826e32ab3 100644
--- a/drivers/scsi/sym53c8xx.c
+++ b/drivers/scsi/sym53c8xx.c
@@ -12126,13 +12126,16 @@ static int ncr_scatter_896R1(ncb_p np, ccb_p cp, Scsi_Cmnd *cmd)
if (!use_sg)
segn = ncr_scatter_no_sglist(np, cp, cmd);
- else if (use_sg > MAX_SCATTER)
- segn = -1;
else {
struct scatterlist *scatter = (struct scatterlist *)cmd->buffer;
struct scr_tblmove *data;
use_sg = map_scsi_sg_data(np, cmd);
+ if (use_sg > MAX_SCATTER) {
+ unmap_scsi_data(np, cmd);
+ return -1;
+ }
+
data = &cp->phys.data[MAX_SCATTER - use_sg];
for (segn = 0; segn < use_sg; segn++) {
@@ -12165,13 +12168,15 @@ static int ncr_scatter(ncb_p np, ccb_p cp, Scsi_Cmnd *cmd)
if (!use_sg)
segment = ncr_scatter_no_sglist(np, cp, cmd);
- else if (use_sg > MAX_SCATTER)
- segment = -1;
else {
struct scatterlist *scatter = (struct scatterlist *)cmd->buffer;
struct scr_tblmove *data;
use_sg = map_scsi_sg_data(np, cmd);
+ if (use_sg > MAX_SCATTER) {
+ unmap_scsi_sg_data(np, cmd);
+ return -1;
+ }
data = &cp->phys.data[MAX_SCATTER - use_sg];
for (segment = 0; segment < use_sg; segment++) {
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index 68157d736..d8c43a9d5 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -647,12 +647,15 @@ static int sym_scatter(hcb_p np, ccb_p cp, Scsi_Cmnd *cmd)
if (!use_sg)
segment = sym_scatter_no_sglist(np, cp, cmd);
- else if (use_sg > SYM_CONF_MAX_SG)
- segment = -1;
else if ((use_sg = map_scsi_sg_data(np, cmd)) > 0) {
struct scatterlist *scatter = (struct scatterlist *)cmd->buffer;
struct sym_tblmove *data;
+ if (use_sg > SYM_CONF_MAX_SG) {
+ unmap_scsi_data(np, cmd);
+ return -1;
+ }
+
data = &cp->phys.data[SYM_CONF_MAX_SG - use_sg];
for (segment = 0; segment < use_sg; segment++) {
diff --git a/fs/bio.c b/fs/bio.c
index 82136b2f4..085247a16 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -111,7 +111,8 @@ inline void bio_init(struct bio *bio)
bio->bi_rw = 0;
bio->bi_vcnt = 0;
bio->bi_idx = 0;
- bio->bi_hw_seg = 0;
+ bio->bi_phys_segments = 0;
+ bio->bi_hw_segments = 0;
bio->bi_size = 0;
bio->bi_end_io = NULL;
atomic_set(&bio->bi_cnt, 1);
@@ -166,12 +167,20 @@ void bio_put(struct bio *bio)
}
}
+inline int bio_phys_segments(request_queue_t *q, struct bio *bio)
+{
+ if (unlikely(!(bio->bi_flags & (1 << BIO_SEG_VALID))))
+ blk_recount_segments(q, bio);
+
+ return bio->bi_phys_segments;
+}
+
inline int bio_hw_segments(request_queue_t *q, struct bio *bio)
{
- if (unlikely(!(bio->bi_flags & BIO_SEG_VALID)))
+ if (unlikely(!(bio->bi_flags & (1 << BIO_SEG_VALID))))
blk_recount_segments(q, bio);
- return bio->bi_hw_seg;
+ return bio->bi_hw_segments;
}
/**
@@ -199,7 +208,8 @@ inline void __bio_clone(struct bio *bio, struct bio *bio_src)
bio->bi_vcnt = bio_src->bi_vcnt;
bio->bi_idx = bio_src->bi_idx;
if (bio_src->bi_flags & (1 << BIO_SEG_VALID)) {
- bio->bi_hw_seg = bio_src->bi_hw_seg;
+ bio->bi_phys_segments = bio_src->bi_phys_segments;
+ bio->bi_hw_segments = bio_src->bi_hw_segments;
bio->bi_flags |= (1 << BIO_SEG_VALID);
}
bio->bi_size = bio_src->bi_size;
@@ -513,4 +523,5 @@ EXPORT_SYMBOL(bio_init);
EXPORT_SYMBOL(bio_copy);
EXPORT_SYMBOL(__bio_clone);
EXPORT_SYMBOL(bio_clone);
+EXPORT_SYMBOL(bio_phys_segments);
EXPORT_SYMBOL(bio_hw_segments);
diff --git a/include/asm-sparc64/io.h b/include/asm-sparc64/io.h
index be6c5546b..ec3a75c92 100644
--- a/include/asm-sparc64/io.h
+++ b/include/asm-sparc64/io.h
@@ -1,4 +1,4 @@
-/* $Id: io.h,v 1.46 2001-12-13 04:16:52 davem Exp $ */
+/* $Id: io.h,v 1.47 2001-12-13 10:36:02 davem Exp $ */
#ifndef __SPARC64_IO_H
#define __SPARC64_IO_H
@@ -18,8 +18,10 @@ extern unsigned long virt_to_bus_not_defined_use_pci_map(volatile void *addr);
extern unsigned long bus_to_virt_not_defined_use_pci_map(volatile void *addr);
#define bus_to_virt bus_to_virt_not_defined_use_pci_map
+/* BIO layer definitions. */
extern unsigned long phys_base;
#define page_to_phys(page) ((((page) - mem_map) << PAGE_SHIFT)+phys_base)
+#define BIO_VMERGE_BOUNDARY 8192
/* Different PCI controllers we support have their PCI MEM space
* mapped to an either 2GB (Psycho) or 4GB (Sabre) aligned area,
diff --git a/include/linux/bio.h b/include/linux/bio.h
index dca5debcf..5b33fa7e6 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -61,7 +61,17 @@ struct bio {
unsigned short bi_vcnt; /* how many bio_vec's */
unsigned short bi_idx; /* current index into bvl_vec */
- unsigned short bi_hw_seg; /* actual mapped segments */
+
+ /* Number of segments in this BIO after
+ * physical address coalescing is performed.
+ */
+ unsigned short bi_phys_segments;
+
+ /* Number of segments after physical and DMA remapping
+ * hardware coalescing is performed.
+ */
+ unsigned short bi_hw_segments;
+
unsigned int bi_size; /* residual I/O count */
unsigned int bi_max; /* max bvl_vecs we can hold,
used as index into pool */
@@ -128,12 +138,19 @@ struct bio {
/*
* merge helpers etc
*/
+
+/* Platforms may set this to teach the BIO layer about IOMMU hardware. */
+#include <asm/io.h>
+#ifndef BIO_VMERGE_BOUNDARY
+#define BIO_VMERGE_BOUNDARY 0
+#endif
+
#define __BVEC_END(bio) bio_iovec_idx((bio), (bio)->bi_vcnt - 1)
#define __BVEC_START(bio) bio_iovec_idx((bio), 0)
-#define BIOVEC_MERGEABLE(vec1, vec2) \
+#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
-#define BIO_CONTIG(bio, nxt) \
- BIOVEC_MERGEABLE(__BVEC_END((bio)), __BVEC_START((nxt)))
+#define BIOVEC_VIRT_MERGEABLE(vec1, vec2) \
+ ((((bvec_to_phys((vec1)) + (vec1)->bv_len) | bvec_to_phys((vec2))) & (BIO_VMERGE_BOUNDARY - 1)) == 0)
#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
(((addr1) | (mask)) == (((addr2) - 1) | (mask)))
#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
@@ -176,6 +193,7 @@ extern void bio_put(struct bio *);
extern int bio_endio(struct bio *, int, int);
struct request_queue;
+extern inline int bio_phys_segments(struct request_queue *, struct bio *);
extern inline int bio_hw_segments(struct request_queue *, struct bio *);
extern inline void __bio_clone(struct bio *, struct bio *);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 658f94e7e..8f82353c7 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -177,7 +177,8 @@ struct request_queue
* queue settings
*/
unsigned short max_sectors;
- unsigned short max_segments;
+ unsigned short max_phys_segments;
+ unsigned short max_hw_segments;
unsigned short hardsect_size;
unsigned int max_segment_size;
@@ -268,7 +269,8 @@ extern struct request *blk_get_request(request_queue_t *, int, int);
extern void blk_put_request(struct request *);
extern void blk_plug_device(request_queue_t *);
extern void blk_recount_segments(request_queue_t *, struct bio *);
-extern inline int blk_contig_segment(request_queue_t *q, struct bio *, struct bio *);
+extern inline int blk_phys_contig_segment(request_queue_t *q, struct bio *, struct bio *);
+extern inline int blk_hw_contig_segment(request_queue_t *q, struct bio *, struct bio *);
extern int block_ioctl(kdev_t, unsigned int, unsigned long);
@@ -280,7 +282,8 @@ extern void blk_cleanup_queue(request_queue_t *);
extern void blk_queue_make_request(request_queue_t *, make_request_fn *);
extern void blk_queue_bounce_limit(request_queue_t *, u64);
extern void blk_queue_max_sectors(request_queue_t *q, unsigned short);
-extern void blk_queue_max_segments(request_queue_t *q, unsigned short);
+extern void blk_queue_max_phys_segments(request_queue_t *q, unsigned short);
+extern void blk_queue_max_hw_segments(request_queue_t *q, unsigned short);
extern void blk_queue_max_segment_size(request_queue_t *q, unsigned int);
extern void blk_queue_hardsect_size(request_queue_t *q, unsigned short);
extern void blk_queue_segment_boundary(request_queue_t *q, unsigned long);
@@ -294,7 +297,8 @@ extern int * blksize_size[MAX_BLKDEV];
extern int * max_readahead[MAX_BLKDEV];
-#define MAX_SEGMENTS 128
+#define MAX_PHYS_SEGMENTS 128
+#define MAX_HW_SEGMENTS 128
#define MAX_SECTORS 255
#define MAX_SEGMENT_SIZE 65536
diff --git a/kernel/ksyms.c b/kernel/ksyms.c
index 7ed05fafe..8fab28b20 100644
--- a/kernel/ksyms.c
+++ b/kernel/ksyms.c
@@ -61,7 +61,7 @@ extern void set_device_ro(kdev_t dev,int flag);
extern void *sys_call_table;
extern struct timezone sys_tz;
-extern int request_dma(unsigned int dmanr, char * deviceID);
+extern int request_dma(unsigned int dmanr, const char * deviceID);
extern void free_dma(unsigned int dmanr);
extern spinlock_t dma_spin_lock;