aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authordavem <davem>2001-12-13 11:37:41 +0000
committerdavem <davem>2001-12-13 11:37:41 +0000
commit6cd01a554443baeb90df209d3e279142aa1709da (patch)
tree1e842a3828fbe3ca5d94edb148c1ecc30582f372
parent52c281714563d37cb08de560eef8d1a6c298fb18 (diff)
downloadnetdev-vger-cvs-6cd01a554443baeb90df209d3e279142aa1709da.tar.gz
Purge ISA DMA bouncing garbage from scsi_merge.c
Handle unchecked_isa_dma in scsi layer by setting the bounce_limit for the queue not the segment boundary mask :-) Patch sent to Jens.
-rw-r--r--drivers/scsi/scsi.c2
-rw-r--r--drivers/scsi/scsi.h5
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/scsi_merge.c256
4 files changed, 13 insertions, 252 deletions
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 35d336de5..01fcfd78b 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -197,8 +197,6 @@ void scsi_initialize_queue(Scsi_Device * SDpnt, struct Scsi_Host * SHpnt)
if (!SHpnt->use_clustering)
clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
- if (SHpnt->unchecked_isa_dma)
- blk_queue_segment_boundary(q, ISA_DMA_THRESHOLD);
}
#ifdef MODULE
diff --git a/drivers/scsi/scsi.h b/drivers/scsi/scsi.h
index efa95d8a0..74ce5398b 100644
--- a/drivers/scsi/scsi.h
+++ b/drivers/scsi/scsi.h
@@ -449,7 +449,8 @@ int scsi_free(void *, unsigned int);
/*
* Prototypes for functions in scsi_merge.c
*/
-extern void scsi_initialize_merge_fn(Scsi_Device * SDpnt);
+extern void scsi_initialize_merge_fn(Scsi_Device *SDpnt);
+extern int scsi_init_io(Scsi_Cmnd *SCpnt);
/*
* Prototypes for functions in scsi_queue.c
@@ -554,8 +555,6 @@ struct scsi_device {
request_queue_t request_queue;
atomic_t device_active; /* commands checked out for device */
volatile unsigned short device_busy; /* commands actually active on low-level */
- int (*scsi_init_io_fn) (Scsi_Cmnd *); /* Used to initialize
- new request */
Scsi_Cmnd *device_queue; /* queue of SCSI Command structures */
/* public: */
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 00e931304..7ce480796 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -996,7 +996,7 @@ void scsi_request_fn(request_queue_t * q)
* required). Hosts that need bounce buffers will also
* get those allocated here.
*/
- if (!SDpnt->scsi_init_io_fn(SCpnt)) {
+ if (!scsi_init_io(SCpnt)) {
SCpnt = __scsi_end_request(SCpnt, 0,
SCpnt->request.nr_sectors, 0, 0);
if( SCpnt != NULL )
diff --git a/drivers/scsi/scsi_merge.c b/drivers/scsi/scsi_merge.c
index 285f4c218..e37500f56 100644
--- a/drivers/scsi/scsi_merge.c
+++ b/drivers/scsi/scsi_merge.c
@@ -46,81 +46,28 @@
#include "constants.h"
#include <scsi/scsi_ioctl.h>
-static void dma_exhausted(Scsi_Cmnd *SCpnt, int i)
-{
- int jj;
- struct scatterlist *sgpnt;
- void **bbpnt;
- int consumed = 0;
-
- sgpnt = (struct scatterlist *) SCpnt->request_buffer;
- bbpnt = SCpnt->bounce_buffers;
-
- /*
- * Now print out a bunch of stats. First, start with the request
- * size.
- */
- printk("dma_free_sectors:%d\n", scsi_dma_free_sectors);
- printk("use_sg:%d\ti:%d\n", SCpnt->use_sg, i);
- printk("request_bufflen:%d\n", SCpnt->request_bufflen);
- /*
- * Now dump the scatter-gather table, up to the point of failure.
- */
- for (jj = 0; jj < SCpnt->use_sg; jj++) {
- printk("[%d]\tlen:%d\taddr:%p\tbounce:%p\n",
- jj,
- sgpnt[jj].length,
- sgpnt[jj].address,
- (bbpnt ? bbpnt[jj] : NULL));
- if (bbpnt && bbpnt[jj])
- consumed += sgpnt[jj].length;
- }
- printk("Total %d sectors consumed\n", consumed);
- panic("DMA pool exhausted");
-}
-
/*
- * Function: __init_io()
+ * Function: scsi_init_io()
*
- * Purpose: Prototype for io initialize function.
+ * Purpose: SCSI I/O initialize function.
*
* Arguments: SCpnt - Command descriptor we wish to initialize
- * dma_host - 1 if this host has ISA DMA issues (bus doesn't
- * expose all of the address lines, so that DMA cannot
- * be done from an arbitrary address).
*
* Returns: 1 on success.
*
* Lock status:
*
- * Notes: Only the SCpnt argument should be a non-constant variable.
- * This function is designed in such a way that it will be
- * invoked from a series of small stubs, each of which would
- * be optimized for specific circumstances.
- *
- * The advantage of this is that hosts that don't do DMA
- * get versions of the function that essentially don't have
- * any of the DMA code. Same goes for clustering - in the
- * case of hosts with no need for clustering, there is no point
- * in a whole bunch of overhead.
- *
- * Finally, in the event that a host has set can_queue to SG_ALL
- * implying that there is no limit to the length of a scatter
- * gather list, the sg count in the request won't be valid
- * (mainly because we don't need queue management functions
- * which keep the tally uptodate.
+ * Notes: The generic block layer takes care of ISA bounce
+ * buffering issues for us.
*/
-__inline static int __init_io(Scsi_Cmnd *SCpnt, int dma_host)
+int scsi_init_io(Scsi_Cmnd *SCpnt)
{
struct bio * bio;
char * buff;
int count;
- int i;
struct request * req;
- int sectors;
struct scatterlist * sgpnt;
int this_count;
- void ** bbpnt;
req = &SCpnt->request;
@@ -130,16 +77,6 @@ __inline static int __init_io(Scsi_Cmnd *SCpnt, int dma_host)
count = req->nr_phys_segments;
/*
- * If the dma pool is nearly empty, then queue a minimal request
- * with a single segment. Typically this will satisfy a single
- * buffer.
- */
- if (dma_host && scsi_dma_free_sectors <= 10) {
- this_count = req->current_nr_sectors;
- goto single_segment;
- }
-
- /*
* we used to not use scatter-gather for single segment request,
* but now we do (it makes highmem I/O easier to support without
* kmapping pages)
@@ -151,12 +88,6 @@ __inline static int __init_io(Scsi_Cmnd *SCpnt, int dma_host)
*/
SCpnt->sglist_len = (SCpnt->use_sg * sizeof(struct scatterlist));
- /* If we could potentially require ISA bounce buffers, allocate
- * space for this array here.
- */
- if (dma_host)
- SCpnt->sglist_len += (SCpnt->use_sg * sizeof(void *));
-
/* scsi_malloc can only allocate in chunks of 512 bytes so
* round it up.
*/
@@ -182,14 +113,6 @@ __inline static int __init_io(Scsi_Cmnd *SCpnt, int dma_host)
SCpnt->request_bufflen = 0;
req->buffer = NULL;
- if (dma_host)
- bbpnt = (void **) ((char *)sgpnt +
- (SCpnt->use_sg * sizeof(struct scatterlist)));
- else
- bbpnt = NULL;
-
- SCpnt->bounce_buffers = bbpnt;
-
/*
* Next, walk the list, and fill in the addresses and sizes of
* each segment.
@@ -211,174 +134,22 @@ __inline static int __init_io(Scsi_Cmnd *SCpnt, int dma_host)
SCpnt->use_sg = count;
- if (!dma_host)
- return 1;
-
- /*
- * Now allocate bounce buffers, if needed.
- */
- SCpnt->request_bufflen = 0;
- for (i = 0; i < count; i++) {
- sectors = (sgpnt[i].length >> 9);
- SCpnt->request_bufflen += sgpnt[i].length;
- if (virt_to_phys(sgpnt[i].address) + sgpnt[i].length - 1 >
- ISA_DMA_THRESHOLD) {
- if( scsi_dma_free_sectors - sectors <= 10 ) {
- /*
- * If this would nearly drain the DMA
- * pool empty, then let's stop here.
- * Don't make this request any larger.
- * This is kind of a safety valve that
- * we use - we could get screwed later
- * on if we run out completely.
- */
- SCpnt->request_bufflen -= sgpnt[i].length;
- SCpnt->use_sg = i;
- if (i == 0) {
- goto big_trouble;
- }
- break;
- }
-
- /*
- * this is not a dma host, so it will never
- * be a highmem page
- */
- bbpnt[i] = page_address(sgpnt[i].page) +sgpnt[i].offset;
- sgpnt[i].address = (char *)scsi_malloc(sgpnt[i].length);
- /*
- * If we cannot allocate memory for this DMA bounce
- * buffer, then queue just what we have done so far.
- */
- if (sgpnt[i].address == NULL) {
- printk("Warning - running low on DMA memory\n");
- SCpnt->request_bufflen -= sgpnt[i].length;
- SCpnt->use_sg = i;
- if (i == 0) {
- goto big_trouble;
- }
- break;
- }
- if (rq_data_dir(req) == WRITE)
- memcpy(sgpnt[i].address, bbpnt[i],
- sgpnt[i].length);
- }
- }
return 1;
- big_trouble:
- /*
- * We come here in the event that we get one humongous
- * request, where we need a bounce buffer, and the buffer is
- * more than we can allocate in a single call to
- * scsi_malloc(). In addition, we only come here when it is
- * the 0th element of the scatter-gather table that gets us
- * into this trouble. As a fallback, we fall back to
- * non-scatter-gather, and ask for a single segment. We make
- * a half-hearted attempt to pick a reasonably large request
- * size mainly so that we don't thrash the thing with
- * iddy-biddy requests.
- */
+single_segment:
/*
- * The original number of sectors in the 0th element of the
- * scatter-gather table.
- */
- sectors = sgpnt[0].length >> 9;
-
- /*
- * Free up the original scatter-gather table. Note that since
- * it was the 0th element that got us here, we don't have to
- * go in and free up memory from the other slots.
- */
- SCpnt->request_bufflen = 0;
- SCpnt->use_sg = 0;
- scsi_free(SCpnt->request_buffer, SCpnt->sglist_len);
-
- /*
- * Make an attempt to pick up as much as we reasonably can.
- * Just keep adding sectors until the pool starts running kind of
- * low. The limit of 30 is somewhat arbitrary - the point is that
- * it would kind of suck if we dropped down and limited ourselves to
- * single-block requests if we had hundreds of free sectors.
- */
- if( scsi_dma_free_sectors > 30 ) {
- for (this_count = 0, bio = req->bio; bio; bio = bio->bi_next) {
- if( scsi_dma_free_sectors - this_count < 30
- || this_count == sectors )
- {
- break;
- }
- this_count += bio_sectors(bio);
- }
-
- } else {
- /*
- * Yow! Take the absolute minimum here.
- */
- this_count = req->current_nr_sectors;
- }
-
- /*
- * Now drop through into the single-segment case.
- */
-
- single_segment:
- /*
* Come here if for any reason we choose to do this as a single
- * segment. Possibly the entire request, or possibly a small
- * chunk of the entire request.
+ * segment.
*/
-
bio = req->bio;
buff = req->buffer = bio_data(bio);
-
- if (dma_host || PageHighMem(bio_page(bio))) {
- /*
- * Allocate a DMA bounce buffer. If the allocation fails, fall
- * back and allocate a really small one - enough to satisfy
- * the first buffer.
- */
- if (bio_to_phys(bio) + bio->bi_size - 1 > ISA_DMA_THRESHOLD) {
- buff = (char *) scsi_malloc(this_count << 9);
- if (!buff) {
- printk("Warning - running low on DMA memory\n");
- this_count = req->current_nr_sectors;
- buff = (char *) scsi_malloc(this_count << 9);
- if (!buff) {
- dma_exhausted(SCpnt, 0);
- return 0;
- }
- }
- if (rq_data_dir(req) == WRITE) {
- unsigned long flags;
- char *buf = bio_kmap_irq(bio, &flags);
- memcpy(buff, buf, this_count << 9);
- bio_kunmap_irq(buf, &flags);
- }
- }
- }
SCpnt->request_bufflen = this_count << 9;
SCpnt->request_buffer = buff;
SCpnt->use_sg = 0;
return 1;
}
-#define INITIO(_FUNCTION, _DMA) \
-static int _FUNCTION(Scsi_Cmnd * SCpnt) \
-{ \
- return __init_io(SCpnt, _DMA); \
-}
-
-/*
- * ll_rw_blk.c now keeps track of the number of segments in
- * a request. Thus we don't have to do it any more here.
- * We always force "_VALID" to 1. Eventually clean this up
- * and get rid of the extra argument.
- */
-INITIO(scsi_init_io_v, 0)
-INITIO(scsi_init_io_vd, 1)
-
/*
* Function: scsi_initialize_merge_fn()
*
@@ -400,16 +171,7 @@ void scsi_initialize_merge_fn(Scsi_Device * SDpnt)
/*
* The generic merging functions work just fine for us.
- */
-
- if (SHpnt->unchecked_isa_dma == 0) {
- SDpnt->scsi_init_io_fn = scsi_init_io_v;
- } else {
- SDpnt->scsi_init_io_fn = scsi_init_io_vd;
- }
-
- /*
- * now enable highmem I/O, if appropriate
+ * Enable highmem I/O, if appropriate.
*/
bounce_limit = BLK_BOUNCE_HIGH;
if (SHpnt->highmem_io && (SDpnt->type == TYPE_DISK)) {
@@ -421,6 +183,8 @@ void scsi_initialize_merge_fn(Scsi_Device * SDpnt)
else
bounce_limit = SHpnt->pci_dev->dma_mask;
}
+ if (SHpnt->unchecked_isa_dma)
+ bounce_limit = ISA_DMA_THRESHOLD;
blk_queue_bounce_limit(q, bounce_limit);
}