aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authordavem <davem>2001-12-11 06:11:51 +0000
committerdavem <davem>2001-12-11 06:11:51 +0000
commit414ce907724d3059d11d18410b6b64d56866c95a (patch)
tree471e3336a50a03f0df9bf55c09bcd23af17d4a0a
parent470e747d5619a63fadeda686b0a73cde195c787c (diff)
downloadnetdev-vger-cvs-414ce907724d3059d11d18410b6b64d56866c95a.tar.gz
Quick unchecked merge to 2.5.1-pre9
-rw-r--r--Documentation/DocBook/via-audio.tmpl7
-rw-r--r--Documentation/filesystems/ntfs.txt10
-rw-r--r--MAINTAINERS4
-rw-r--r--Makefile2
-rw-r--r--arch/alpha/kernel/irq_i8259.c2
-rw-r--r--arch/ia64/kernel/efivars.c2
-rw-r--r--arch/ia64/kernel/pci.c2
-rw-r--r--arch/ia64/sn/io/hubspc.c2
-rw-r--r--arch/ppc/kernel/i8259.c2
-rw-r--r--arch/ppc/kernel/pmac_pic.c2
-rw-r--r--arch/ppc/kernel/prom.c2
-rw-r--r--arch/sparc64/defconfig22
-rw-r--r--arch/sparc64/solaris/timod.c4
-rw-r--r--drivers/block/ll_rw_blk.c133
-rw-r--r--drivers/block/ps2esdi.c5
-rw-r--r--drivers/char/sysrq.c2
-rw-r--r--drivers/ide/ide-dma.c5
-rw-r--r--drivers/ide/ide-tape.c6
-rw-r--r--drivers/ide/ide.c2
-rw-r--r--drivers/net/Config.in3
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/de2104x.c2141
-rw-r--r--drivers/scsi/cpqfc.Readme5
-rw-r--r--drivers/scsi/cpqfcTScontrol.c26
-rw-r--r--drivers/scsi/cpqfcTSinit.c28
-rw-r--r--drivers/scsi/cpqfcTSstructs.h4
-rw-r--r--drivers/scsi/cpqfcTSworker.c9
-rw-r--r--drivers/scsi/ide-scsi.c2
-rw-r--r--drivers/scsi/scsi.c11
-rw-r--r--drivers/scsi/scsi_merge.c179
-rw-r--r--drivers/sound/via82cxxx_audio.c7
-rw-r--r--fs/bio.c37
-rw-r--r--fs/buffer.c7
-rw-r--r--fs/dcache.c2
-rw-r--r--fs/dquot.c17
-rw-r--r--fs/inode.c12
-rw-r--r--fs/ntfs/Makefile2
-rw-r--r--fs/ntfs/attr.c4
-rw-r--r--fs/ntfs/fs.c6
-rw-r--r--fs/ntfs/inode.c106
-rw-r--r--fs/ntfs/support.c2
-rw-r--r--include/asm-i386/io.h7
-rw-r--r--include/asm-sparc64/io.h5
-rw-r--r--include/linux/bio.h22
-rw-r--r--include/linux/blk.h3
-rw-r--r--include/linux/blkdev.h2
-rw-r--r--init/do_mounts.c34
-rw-r--r--kernel/sysctl.c3
-rw-r--r--net/ipv4/tcp_input.c3
49 files changed, 2629 insertions, 277 deletions
diff --git a/Documentation/DocBook/via-audio.tmpl b/Documentation/DocBook/via-audio.tmpl
index 22588c519..f06641a62 100644
--- a/Documentation/DocBook/via-audio.tmpl
+++ b/Documentation/DocBook/via-audio.tmpl
@@ -8,11 +8,6 @@
<author>
<firstname>Jeff</firstname>
<surname>Garzik</surname>
- <affiliation>
- <address>
- <email>jgarzik@mandrakesoft.com</email>
- </address>
- </affiliation>
</author>
</authorgroup>
@@ -115,7 +110,7 @@
<sect1 id="bugrepdiag"><title>Diagnostic output</title>
<para>
Obtain the via-audio-diag diagnostics program from
- http://gtf.org/garzik/drivers/via82cxxx/ and provide a dump of the
+ http://sf.net/projects/gkernel/ and provide a dump of the
audio chip's registers while the problem is occurring. Sample command line:
</para>
<programlisting>
diff --git a/Documentation/filesystems/ntfs.txt b/Documentation/filesystems/ntfs.txt
index ad4a9f553..0624f1dd2 100644
--- a/Documentation/filesystems/ntfs.txt
+++ b/Documentation/filesystems/ntfs.txt
@@ -98,6 +98,16 @@ list at sourceforge: linux-ntfs-dev@lists.sourceforge.net
ChangeLog
=========
+NTFS 1.1.21:
+ - Fixed bug with reading $MFT where we try to read higher mft records
+ before having read the $DATA attribute of $MFT. (Note this is only a
+ partial solution which will only work in the case that the attribute
+ list is resident or non-resident but $DATA is in the first 1024
+ bytes. But this should be enough in the majority of cases. I am not
+ going to bother fixing the general case until someone finds this to
+ be a problem for them, which I doubt very much will ever happen...)
+ - Fixed bogus BUG() call in readdir().
+
NTFS 1.1.20:
- Fixed two bugs in ntfs_readwrite_attr(). Thanks to Jan Kara for
spotting the out of bounds one.
diff --git a/MAINTAINERS b/MAINTAINERS
index 49d63240b..47cabeef9 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1704,10 +1704,8 @@ S: Maintained
VIA 82Cxxx AUDIO DRIVER
P: Jeff Garzik
-M: jgarzik@mandrakesoft.com
L: linux-via@gtf.org
-W: http://sourceforge.net/projects/gkernel/
-S: Maintained
+S: Odd fixes
USB DIAMOND RIO500 DRIVER
P: Cesar Miquel
diff --git a/Makefile b/Makefile
index 8fe142a3a..a859f46fd 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 5
SUBLEVEL = 1
-EXTRAVERSION =-pre8
+EXTRAVERSION =-pre9
KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
diff --git a/arch/alpha/kernel/irq_i8259.c b/arch/alpha/kernel/irq_i8259.c
index 42029b33f..4635ae7ac 100644
--- a/arch/alpha/kernel/irq_i8259.c
+++ b/arch/alpha/kernel/irq_i8259.c
@@ -22,7 +22,7 @@
/* Note mask bit is true for DISABLED irqs. */
static unsigned int cached_irq_mask = 0xffff;
-spinlock_t i8259_irq_lock = SPIN_LOCK_UNLOCKED;
+static spinlock_t i8259_irq_lock = SPIN_LOCK_UNLOCKED;
static inline void
i8259_update_irq_hw(unsigned int irq, unsigned long mask)
diff --git a/arch/ia64/kernel/efivars.c b/arch/ia64/kernel/efivars.c
index 369a3fd7f..5a5cf77ea 100644
--- a/arch/ia64/kernel/efivars.c
+++ b/arch/ia64/kernel/efivars.c
@@ -100,7 +100,7 @@ typedef struct _efivar_entry_t {
struct list_head list;
} efivar_entry_t;
-spinlock_t efivars_lock = SPIN_LOCK_UNLOCKED;
+static spinlock_t efivars_lock = SPIN_LOCK_UNLOCKED;
static LIST_HEAD(efivar_list);
static struct proc_dir_entry *efi_vars_dir = NULL;
diff --git a/arch/ia64/kernel/pci.c b/arch/ia64/kernel/pci.c
index 1448db834..fb2434023 100644
--- a/arch/ia64/kernel/pci.c
+++ b/arch/ia64/kernel/pci.c
@@ -46,7 +46,7 @@ extern void ia64_mca_check_errors( void );
* This interrupt-safe spinlock protects all accesses to PCI
* configuration space.
*/
-spinlock_t pci_lock = SPIN_LOCK_UNLOCKED;
+static spinlock_t pci_lock = SPIN_LOCK_UNLOCKED;
struct pci_fixup pcibios_fixups[] = {
{ 0 }
diff --git a/arch/ia64/sn/io/hubspc.c b/arch/ia64/sn/io/hubspc.c
index 6dae96e14..ed1f550db 100644
--- a/arch/ia64/sn/io/hubspc.c
+++ b/arch/ia64/sn/io/hubspc.c
@@ -61,7 +61,7 @@ typedef struct cpuprom_info {
}cpuprom_info_t;
static cpuprom_info_t *cpuprom_head;
-spinlock_t cpuprom_spinlock;
+static spinlock_t cpuprom_spinlock;
#define PROM_LOCK() mutex_spinlock(&cpuprom_spinlock)
#define PROM_UNLOCK(s) mutex_spinunlock(&cpuprom_spinlock, (s))
diff --git a/arch/ppc/kernel/i8259.c b/arch/ppc/kernel/i8259.c
index 621899873..3f5bed25e 100644
--- a/arch/ppc/kernel/i8259.c
+++ b/arch/ppc/kernel/i8259.c
@@ -13,7 +13,7 @@ unsigned char cached_8259[2] = { 0xff, 0xff };
#define cached_A1 (cached_8259[0])
#define cached_21 (cached_8259[1])
-spinlock_t i8259_lock = SPIN_LOCK_UNLOCKED;
+static spinlock_t i8259_lock = SPIN_LOCK_UNLOCKED;
int i8259_pic_irq_offset;
diff --git a/arch/ppc/kernel/pmac_pic.c b/arch/ppc/kernel/pmac_pic.c
index 24115a495..295e4a44b 100644
--- a/arch/ppc/kernel/pmac_pic.c
+++ b/arch/ppc/kernel/pmac_pic.c
@@ -36,7 +36,7 @@ static volatile struct pmac_irq_hw *pmac_irq_hw[4] = {
static int max_irqs;
static int max_real_irqs;
-spinlock_t pmac_pic_lock = SPIN_LOCK_UNLOCKED;
+static spinlock_t pmac_pic_lock = SPIN_LOCK_UNLOCKED;
#define GATWICK_IRQ_POOL_SIZE 10
diff --git a/arch/ppc/kernel/prom.c b/arch/ppc/kernel/prom.c
index 7b9f64562..f0daf9d5f 100644
--- a/arch/ppc/kernel/prom.c
+++ b/arch/ppc/kernel/prom.c
@@ -1928,7 +1928,7 @@ print_properties(struct device_node *np)
}
#endif
-spinlock_t rtas_lock = SPIN_LOCK_UNLOCKED;
+static spinlock_t rtas_lock = SPIN_LOCK_UNLOCKED;
/* this can be called after setup -- Cort */
int __openfirmware
diff --git a/arch/sparc64/defconfig b/arch/sparc64/defconfig
index 193e914a9..6f06227d5 100644
--- a/arch/sparc64/defconfig
+++ b/arch/sparc64/defconfig
@@ -162,12 +162,11 @@ CONFIG_MD=y
CONFIG_BLK_DEV_MD=m
CONFIG_MD_LINEAR=m
CONFIG_MD_RAID0=m
-CONFIG_MD_RAID1=m
-CONFIG_MD_RAID5=m
-CONFIG_MD_MULTIPATH=m
-CONFIG_BLK_DEV_LVM=m
-CONFIG_BLK_DEV_RAM=m
-CONFIG_BLK_DEV_RAM_SIZE=4096
+# CONFIG_MD_RAID1 is not set
+# CONFIG_MD_RAID5 is not set
+# CONFIG_MD_MULTIPATH is not set
+# CONFIG_BLK_DEV_LVM is not set
+# CONFIG_BLK_DEV_RAM is not set
# CONFIG_BLK_DEV_INITRD is not set
#
@@ -265,8 +264,8 @@ CONFIG_BLK_DEV_IDEDISK=y
# CONFIG_BLK_DEV_TIVO is not set
# CONFIG_BLK_DEV_IDECS is not set
CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_IDETAPE=m
-CONFIG_BLK_DEV_IDEFLOPPY=m
+# CONFIG_BLK_DEV_IDETAPE is not set
+# CONFIG_BLK_DEV_IDEFLOPPY is not set
# CONFIG_BLK_DEV_IDESCSI is not set
#
@@ -312,9 +311,9 @@ CONFIG_IDEDMA_AUTO=y
# CONFIG_IDEDMA_IVB is not set
# CONFIG_DMA_NONPCI is not set
CONFIG_BLK_DEV_IDE_MODES=y
-CONFIG_BLK_DEV_ATARAID=m
-CONFIG_BLK_DEV_ATARAID_PDC=m
-CONFIG_BLK_DEV_ATARAID_HPT=m
+# CONFIG_BLK_DEV_ATARAID is not set
+# CONFIG_BLK_DEV_ATARAID_PDC is not set
+# CONFIG_BLK_DEV_ATARAID_HPT is not set
#
# SCSI support
@@ -465,6 +464,7 @@ CONFIG_PCNET32=m
CONFIG_ADAPTEC_STARFIRE=m
# CONFIG_APRICOT is not set
# CONFIG_CS89x0 is not set
+CONFIG_DE2104X=m
CONFIG_TULIP=m
# CONFIG_TULIP_MWI is not set
# CONFIG_TULIP_MMIO is not set
diff --git a/arch/sparc64/solaris/timod.c b/arch/sparc64/solaris/timod.c
index a8a053bbb..2b113bb71 100644
--- a/arch/sparc64/solaris/timod.c
+++ b/arch/sparc64/solaris/timod.c
@@ -1,4 +1,4 @@
-/* $Id: timod.c,v 1.16 2001-09-18 22:29:06 davem Exp $
+/* $Id: timod.c,v 1.17 2001-12-11 06:11:52 davem Exp $
* timod.c: timod emulation.
*
* Copyright (C) 1998 Patrik Rak (prak3264@ss1000.ms.mff.cuni.cz)
@@ -33,7 +33,7 @@ extern asmlinkage int sys32_ioctl(unsigned int fd, unsigned int cmd,
u32 arg);
asmlinkage int solaris_ioctl(unsigned int fd, unsigned int cmd, u32 arg);
-spinlock_t timod_pagelock = SPIN_LOCK_UNLOCKED;
+static spinlock_t timod_pagelock = SPIN_LOCK_UNLOCKED;
static char * page = NULL ;
#ifndef DEBUG_SOLARIS_KMALLOC
diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c
index 0f7a51edc..942d9f7a1 100644
--- a/drivers/block/ll_rw_blk.c
+++ b/drivers/block/ll_rw_blk.c
@@ -257,7 +257,8 @@ void blk_queue_segment_boundary(request_queue_t *q, unsigned long mask)
static char *rq_flags[] = { "REQ_RW", "REQ_RW_AHEAD", "REQ_BARRIER",
"REQ_CMD", "REQ_NOMERGE", "REQ_STARTED",
"REQ_DONTPREP", "REQ_DRIVE_CMD", "REQ_DRIVE_TASK",
- "REQ_PC", "REQ_SENSE", "REQ_SPECIAL" };
+ "REQ_PC", "REQ_BLOCK_PC", "REQ_SENSE",
+ "REQ_SPECIAL" };
void blk_dump_rq_flags(struct request *rq, char *msg)
{
@@ -315,15 +316,46 @@ static int ll_10byte_cmd_build(request_queue_t *q, struct request *rq)
return 0;
}
-/*
- * can we merge the two segments, or do we need to start a new one?
- */
-inline int blk_same_segment(request_queue_t *q, struct bio *bio,
+void blk_recount_segments(request_queue_t *q, struct bio *bio)
+{
+ struct bio_vec *bv, *bvprv = NULL;
+ int i, nr_segs, seg_size, cluster;
+
+ if (unlikely(!bio->bi_io_vec))
+ return;
+
+ cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
+ seg_size = nr_segs = 0;
+ bio_for_each_segment(bv, bio, i) {
+ if (bvprv && cluster) {
+ if (seg_size + bv->bv_len > q->max_segment_size)
+ goto new_segment;
+ if (!BIOVEC_MERGEABLE(bvprv, bv))
+ goto new_segment;
+ if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
+ goto new_segment;
+
+ seg_size += bv->bv_len;
+ bvprv = bv;
+ continue;
+ }
+new_segment:
+ nr_segs++;
+ bvprv = bv;
+ seg_size = 0;
+ }
+
+ bio->bi_hw_seg = nr_segs;
+ bio->bi_flags |= (1 << BIO_SEG_VALID);
+}
+
+
+inline int blk_contig_segment(request_queue_t *q, struct bio *bio,
struct bio *nxt)
{
- /*
- * not contigous, just forget it
- */
+ if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER)))
+ return 0;
+
if (!BIO_CONTIG(bio, nxt))
return 0;
@@ -343,19 +375,17 @@ inline int blk_same_segment(request_queue_t *q, struct bio *bio,
*/
int blk_rq_map_sg(request_queue_t *q, struct request *rq, struct scatterlist *sg)
{
- unsigned long long lastend;
- struct bio_vec *bvec;
+ struct bio_vec *bvec, *bvprv;
struct bio *bio;
int nsegs, i, cluster;
nsegs = 0;
- bio = rq->bio;
- lastend = ~0ULL;
cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
/*
* for each bio in rq
*/
+ bvprv = NULL;
rq_for_each_bio(bio, rq) {
/*
* for each segment in bio
@@ -363,26 +393,20 @@ int blk_rq_map_sg(request_queue_t *q, struct request *rq, struct scatterlist *sg
bio_for_each_segment(bvec, bio, i) {
int nbytes = bvec->bv_len;
- BIO_BUG_ON(i > bio->bi_vcnt);
-
- if (cluster && bvec_to_phys(bvec) == lastend) {
+ if (bvprv && cluster) {
if (sg[nsegs - 1].length + nbytes > q->max_segment_size)
goto new_segment;
- /*
- * make sure to not map a segment across a
- * boundary that the queue doesn't want
- */
- if (!__BIO_SEG_BOUNDARY(lastend, lastend + nbytes, q->seg_boundary_mask))
- lastend = ~0ULL;
- else
- lastend += nbytes;
+ if (!BIOVEC_MERGEABLE(bvprv, bvec))
+ goto new_segment;
+ if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
+ goto new_segment;
sg[nsegs - 1].length += nbytes;
} else {
new_segment:
- if (nsegs > q->max_segments) {
- printk("map: %d >= %d\n", nsegs, q->max_segments);
+ if (nsegs >= q->max_segments) {
+ printk("map: %d >= %d, i %d, segs %d, size %ld\n", nsegs, q->max_segments, i, rq->nr_segments, rq->nr_sectors);
BUG();
}
@@ -391,9 +415,9 @@ new_segment:
sg[nsegs].length = nbytes;
sg[nsegs].offset = bvec->bv_offset;
- lastend = bvec_to_phys(bvec) + nbytes;
nsegs++;
}
+ bvprv = bvec;
} /* segments in bio */
} /* bios in rq */
@@ -405,35 +429,55 @@ new_segment:
* specific ones if so desired
*/
static inline int ll_new_segment(request_queue_t *q, struct request *req,
- struct bio *bio)
+ struct bio *bio, int nr_segs)
{
- if (req->nr_segments + bio->bi_vcnt <= q->max_segments) {
- req->nr_segments += bio->bi_vcnt;
+ if (req->nr_segments + nr_segs <= q->max_segments) {
+ req->nr_segments += nr_segs;
return 1;
}
+
+ req->flags |= REQ_NOMERGE;
return 0;
}
static int ll_back_merge_fn(request_queue_t *q, struct request *req,
struct bio *bio)
{
- if (req->nr_sectors + bio_sectors(bio) > q->max_sectors)
+ int bio_segs;
+
+ if (req->nr_sectors + bio_sectors(bio) > q->max_sectors) {
+ req->flags |= REQ_NOMERGE;
return 0;
- if (blk_same_segment(q, req->biotail, bio))
+ }
+
+ bio_segs = bio_hw_segments(q, bio);
+ if (blk_contig_segment(q, req->biotail, bio))
+ bio_segs--;
+
+ if (!bio_segs)
return 1;
- return ll_new_segment(q, req, bio);
+ return ll_new_segment(q, req, bio, bio_segs);
}
static int ll_front_merge_fn(request_queue_t *q, struct request *req,
struct bio *bio)
{
- if (req->nr_sectors + bio_sectors(bio) > q->max_sectors)
+ int bio_segs;
+
+ if (req->nr_sectors + bio_sectors(bio) > q->max_sectors) {
+ req->flags |= REQ_NOMERGE;
return 0;
- if (blk_same_segment(q, bio, req->bio))
+ }
+
+ bio_segs = bio_hw_segments(q, bio);
+ if (blk_contig_segment(q, bio, req->bio))
+ bio_segs--;
+
+ if (!bio_segs)
return 1;
- return ll_new_segment(q, req, bio);
+ return ll_new_segment(q, req, bio, bio_segs);
}
static int ll_merge_requests_fn(request_queue_t *q, struct request *req,
@@ -441,7 +485,7 @@ static int ll_merge_requests_fn(request_queue_t *q, struct request *req,
{
int total_segments = req->nr_segments + next->nr_segments;
- if (blk_same_segment(q, req->biotail, next->bio))
+ if (blk_contig_segment(q, req->biotail, next->bio))
total_segments--;
if (total_segments > q->max_segments)
@@ -643,7 +687,7 @@ int blk_init_queue(request_queue_t *q, request_fn_proc *rfn)
return ret;
}
- q->request_fn = rfn;
+ q->request_fn = rfn;
q->back_merge_fn = ll_back_merge_fn;
q->front_merge_fn = ll_front_merge_fn;
q->merge_requests_fn = ll_merge_requests_fn;
@@ -937,8 +981,8 @@ static int __make_request(request_queue_t *q, struct bio *bio)
cur_nr_sectors = bio_iovec(bio)->bv_len >> 9;
rw = bio_data_dir(bio);
-#error ONLY RUN 2.5.1-PRE KERNELS IF LOSING YOUR FILESYSTEMS IS OK
-#if 0
+#error THIS KERNEL EATS DISKS FOR LUNCH
+#if 1
if (rw & WRITE) {
set_bit(BIO_UPTODATE, &bio->bi_flags);
bio->bi_end_io(bio, nr_sectors);
@@ -981,6 +1025,7 @@ again:
BUG_ON(req->flags & REQ_NOMERGE);
if (!q->back_merge_fn(q, req, bio))
break;
+
elevator->elevator_merge_cleanup_fn(q, req, nr_sectors);
req->biotail->bi_next = bio;
@@ -995,6 +1040,7 @@ again:
BUG_ON(req->flags & REQ_NOMERGE);
if (!q->front_merge_fn(q, req, bio))
break;
+
elevator->elevator_merge_cleanup_fn(q, req, nr_sectors);
bio->bi_next = req->bio;
@@ -1078,7 +1124,7 @@ get_rq:
req->hard_nr_sectors = req->nr_sectors = nr_sectors;
req->current_nr_sectors = req->hard_cur_sectors = cur_nr_sectors;
req->nr_segments = bio->bi_vcnt;
- req->nr_hw_segments = req->nr_segments;
+ req->nr_hw_segments = bio_hw_segments(q, bio);
req->buffer = bio_data(bio); /* see ->buffer comment above */
req->waiting = NULL;
req->bio = req->biotail = bio;
@@ -1459,7 +1505,6 @@ int end_that_request_first(struct request *req, int uptodate, int nr_sectors)
while ((bio = req->bio)) {
nsect = bio_iovec(bio)->bv_len >> 9;
- bio->bi_size -= bio_iovec(bio)->bv_len;
/*
* not a complete bvec done
@@ -1467,12 +1512,18 @@ int end_that_request_first(struct request *req, int uptodate, int nr_sectors)
if (unlikely(nsect > nr_sectors)) {
int residual = (nsect - nr_sectors) << 9;
+ bio->bi_size -= residual;
bio_iovec(bio)->bv_offset += residual;
bio_iovec(bio)->bv_len -= residual;
blk_recalc_request(req, nr_sectors);
return 1;
}
+ /*
+ * account transfer
+ */
+ bio->bi_size -= bio_iovec(bio)->bv_len;
+
nr_sectors -= nsect;
total_nsect += nsect;
diff --git a/drivers/block/ps2esdi.c b/drivers/block/ps2esdi.c
index 40ef290e7..01c8805b8 100644
--- a/drivers/block/ps2esdi.c
+++ b/drivers/block/ps2esdi.c
@@ -483,7 +483,8 @@ static void do_ps2esdi_request(request_queue_t * q)
} /* check for above 16Mb dmas */
else if ((CURRENT_DEV < ps2esdi_drives) &&
(CURRENT->sector + CURRENT->current_nr_sectors <=
- ps2esdi[MINOR(CURRENT->rq_dev)].nr_sects)) {
+ ps2esdi[MINOR(CURRENT->rq_dev)].nr_sects) &&
+ CURRENT->flags & REQ_CMD) {
#if 0
printk("%s:got request. device : %d minor : %d command : %d sector : %ld count : %ld\n",
DEVICE_NAME,
@@ -495,7 +496,7 @@ static void do_ps2esdi_request(request_queue_t * q)
block = CURRENT->sector;
count = CURRENT->current_nr_sectors;
- switch (CURRENT->cmd) {
+ switch (rq_data_dir(CURRENT)) {
case READ:
ps2esdi_readwrite(READ, CURRENT_DEV, block, count);
break;
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index 07e235f16..5ba8a313a 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -336,7 +336,7 @@ static struct sysrq_key_op sysrq_killall_op = {
/* Key Operations table and lock */
-spinlock_t sysrq_key_table_lock = SPIN_LOCK_UNLOCKED;
+static spinlock_t sysrq_key_table_lock = SPIN_LOCK_UNLOCKED;
#define SYSRQ_KEY_TABLE_LENGTH 36
static struct sysrq_key_op *sysrq_key_table[SYSRQ_KEY_TABLE_LENGTH] = {
/* 0 */ &sysrq_loglevel_op,
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index cc0d6b992..ecdcd85d5 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -226,12 +226,13 @@ ide_startstop_t ide_dma_intr (ide_drive_t *drive)
static int ide_build_sglist (ide_hwif_t *hwif, struct request *rq)
{
+ request_queue_t *q = &hwif->drives[DEVICE_NR(rq->rq_dev) & 1].queue;
struct scatterlist *sg = hwif->sg_table;
int nents;
- nents = blk_rq_map_sg(rq->q, rq, hwif->sg_table);
+ nents = blk_rq_map_sg(q, rq, hwif->sg_table);
- if (nents > rq->nr_segments)
+ if (rq->q && nents > rq->nr_segments)
printk("ide-dma: received %d segments, build %d\n", rq->nr_segments, nents);
if (rq_data_dir(rq) == READ)
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index 2faa1bab9..6460e3387 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -1887,7 +1887,8 @@ static void idetape_end_request (byte uptodate, ide_hwgroup_t *hwgroup)
printk("ide-tape: %s: skipping over config parition..\n", tape->name);
#endif
tape->onstream_write_error = OS_PART_ERROR;
- complete(tape->waiting);
+ if (tape->waiting)
+ complete(tape->waiting);
}
}
remove_stage = 1;
@@ -1903,7 +1904,8 @@ static void idetape_end_request (byte uptodate, ide_hwgroup_t *hwgroup)
tape->nr_pending_stages++;
tape->next_stage = tape->first_stage;
rq->current_nr_sectors = rq->nr_sectors;
- complete(tape->waiting);
+ if (tape->waiting)
+ complete(tape->waiting);
}
}
} else if (rq->cmd == IDETAPE_READ_RQ) {
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index a9ff55e85..8a941308a 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -180,7 +180,7 @@ static int initializing; /* set while initializing built-in drivers */
*
* anti-deadlock ordering: ide_lock -> DRIVE_LOCK
*/
-spinlock_t ide_lock = SPIN_LOCK_UNLOCKED;
+spinlock_t ide_lock __cacheline_aligned = SPIN_LOCK_UNLOCKED;
#ifdef CONFIG_BLK_DEV_IDEPCI
static int ide_scan_direction; /* THIS was formerly 2.2.x pci=reverse */
diff --git a/drivers/net/Config.in b/drivers/net/Config.in
index 1ffc9fe2e..e78f8ad62 100644
--- a/drivers/net/Config.in
+++ b/drivers/net/Config.in
@@ -157,7 +157,8 @@ if [ "$CONFIG_NET_ETHERNET" = "y" ]; then
dep_tristate ' Apricot Xen-II on board Ethernet' CONFIG_APRICOT $CONFIG_ISA
dep_tristate ' CS89x0 support' CONFIG_CS89x0 $CONFIG_ISA
- dep_tristate ' DECchip Tulip (dc21x4x) PCI support' CONFIG_TULIP $CONFIG_PCI
+ dep_tristate ' Early DECchip Tulip (dc2104x) PCI support (EXPERIMENTAL)' CONFIG_DE2104X $CONFIG_PCI $CONFIG_EXPERIMENTAL
+ dep_tristate ' DECchip Tulip (dc2114x) PCI support' CONFIG_TULIP $CONFIG_PCI
if [ "$CONFIG_TULIP" = "y" -o "$CONFIG_TULIP" = "m" ]; then
dep_bool ' New bus configuration (EXPERIMENTAL)' CONFIG_TULIP_MWI $CONFIG_EXPERIMENTAL
bool ' Use PCI shared mem for NIC registers' CONFIG_TULIP_MMIO
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index f8cacc935..caea6b027 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -175,6 +175,7 @@ obj-$(CONFIG_DEPCA) += depca.o
obj-$(CONFIG_EWRK3) += ewrk3.o
obj-$(CONFIG_ATP) += atp.o
obj-$(CONFIG_DE4X5) += de4x5.o
+obj-$(CONFIG_DE2104X) += de2104x.o
obj-$(CONFIG_NI5010) += ni5010.o
obj-$(CONFIG_NI52) += ni52.o
obj-$(CONFIG_NI65) += ni65.o
diff --git a/drivers/net/de2104x.c b/drivers/net/de2104x.c
new file mode 100644
index 000000000..15a0ea68c
--- /dev/null
+++ b/drivers/net/de2104x.c
@@ -0,0 +1,2141 @@
+/* de2104x.c: A Linux PCI Ethernet driver for Intel/Digital 21040/1 chips. */
+/*
+ Copyright 2001 Jeff Garzik <jgarzik@mandrakesoft.com>
+
+ Copyright 1994, 1995 Digital Equipment Corporation. [de4x5.c]
+ Written/copyright 1994-2001 by Donald Becker. [tulip.c]
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ See the file COPYING in this distribution for more information.
+
+ TODO, in rough priority order:
+ * Support forcing media type with a module parameter,
+ like dl2k.c/sundance.c
+ * Constants (module parms?) for Rx work limit
+ * Complete reset on PciErr
+ * Jumbo frames / dev->change_mtu
+ * Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error
+ * Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error
+ * Implement Tx software interrupt mitigation via
+ Tx descriptor bit
+
+ */
+
+#define DRV_NAME "de2104x"
+#define DRV_VERSION "0.5.1"
+#define DRV_RELDATE "Nov 20, 2001"
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/compiler.h>
+#include <linux/sched.h>
+#include <linux/rtnetlink.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+
+/* These identify the driver base version and may not be removed. */
+static char version[] __initdata =
+KERN_INFO DRV_NAME " PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
+
+MODULE_AUTHOR("Jeff Garzik <jgarzik@mandrakesoft.com>");
+MODULE_DESCRIPTION("Intel/Digital 21040/1 series PCI Ethernet driver");
+MODULE_LICENSE("GPL");
+
+static int debug = -1;
+MODULE_PARM (debug, "i");
+MODULE_PARM_DESC (debug, "de2104x bitmapped message enable number");
+
+/* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
+#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) \
+ || defined(__sparc_) || defined(__ia64__) \
+ || defined(__sh__) || defined(__mips__)
+static int rx_copybreak = 1518;
+#else
+static int rx_copybreak = 100;
+#endif
+MODULE_PARM (rx_copybreak, "i");
+MODULE_PARM_DESC (rx_copybreak, "de2104x Breakpoint at which Rx packets are copied");
+
+#define PFX DRV_NAME ": "
+
+#define DE_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
+ NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK | \
+ NETIF_MSG_TIMER | \
+ NETIF_MSG_IFDOWN | \
+ NETIF_MSG_IFUP | \
+ NETIF_MSG_RX_ERR | \
+ NETIF_MSG_TX_ERR)
+
+#define DE_RX_RING_SIZE 64
+#define DE_TX_RING_SIZE 64
+#define DE_RING_BYTES \
+ ((sizeof(struct de_desc) * DE_RX_RING_SIZE) + \
+ (sizeof(struct de_desc) * DE_TX_RING_SIZE))
+#define NEXT_TX(N) (((N) + 1) & (DE_TX_RING_SIZE - 1))
+#define NEXT_RX(N) (((N) + 1) & (DE_RX_RING_SIZE - 1))
+#define TX_BUFFS_AVAIL(CP) \
+ (((CP)->tx_tail <= (CP)->tx_head) ? \
+ (CP)->tx_tail + (DE_TX_RING_SIZE - 1) - (CP)->tx_head : \
+ (CP)->tx_tail - (CP)->tx_head - 1)
+
+#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
+#define RX_OFFSET 2
+
+#define DE_SETUP_SKB ((struct sk_buff *) 1)
+#define DE_DUMMY_SKB ((struct sk_buff *) 2)
+#define DE_SETUP_FRAME_WORDS 96
+#define DE_EEPROM_WORDS 256
+#define DE_MAX_MEDIA 5
+
+#define DE_MEDIA_TP_AUTO 0
+#define DE_MEDIA_BNC 1
+#define DE_MEDIA_AUI 2
+#define DE_MEDIA_TP 3
+#define DE_MEDIA_TP_FD 4
+#define DE_MEDIA_INVALID DE_MAX_MEDIA
+#define DE_MEDIA_FIRST 0
+#define DE_MEDIA_LAST (DE_MAX_MEDIA - 1)
+#define DE_TIMER_LINK (60 * HZ)
+#define DE_TIMER_NO_LINK (5 * HZ)
+
+#define DE_NUM_REGS 16
+#define DE_REGS_SIZE (DE_NUM_REGS * sizeof(u32))
+#define DE_REGS_VER 1
+
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (6*HZ)
+
+/* This is a mysterious value that can be written to CSR11 in the 21040 (only)
+ to support a pre-NWay full-duplex signaling mechanism using short frames.
+ No one knows what it should be, but if left at its default value some
+ 10base2(!) packets trigger a full-duplex-request interrupt. */
+#define FULL_DUPLEX_MAGIC 0x6969
+
+enum {
+ /* NIC registers */
+ BusMode = 0x00,
+ TxPoll = 0x08,
+ RxPoll = 0x10,
+ RxRingAddr = 0x18,
+ TxRingAddr = 0x20,
+ MacStatus = 0x28,
+ MacMode = 0x30,
+ IntrMask = 0x38,
+ RxMissed = 0x40,
+ ROMCmd = 0x48,
+ CSR11 = 0x58,
+ SIAStatus = 0x60,
+ CSR13 = 0x68,
+ CSR14 = 0x70,
+ CSR15 = 0x78,
+ PCIPM = 0x40,
+
+ /* BusMode bits */
+ CmdReset = (1 << 0),
+ CacheAlign16 = 0x00008000,
+ BurstLen4 = 0x00000400,
+
+ /* Rx/TxPoll bits */
+ NormalTxPoll = (1 << 0),
+ NormalRxPoll = (1 << 0),
+
+ /* Tx/Rx descriptor status bits */
+ DescOwn = (1 << 31),
+ RxError = (1 << 15),
+ RxErrLong = (1 << 7),
+ RxErrCRC = (1 << 1),
+ RxErrFIFO = (1 << 0),
+ RxErrRunt = (1 << 11),
+ RxErrFrame = (1 << 14),
+ RingEnd = (1 << 25),
+ FirstFrag = (1 << 29),
+ LastFrag = (1 << 30),
+ TxError = (1 << 15),
+ TxFIFOUnder = (1 << 1),
+ TxLinkFail = (1 << 2) | (1 << 10) | (1 << 11),
+ TxMaxCol = (1 << 8),
+ TxOWC = (1 << 9),
+ TxJabber = (1 << 14),
+ SetupFrame = (1 << 27),
+ TxSwInt = (1 << 31),
+
+ /* MacStatus bits */
+ IntrOK = (1 << 16),
+ IntrErr = (1 << 15),
+ RxIntr = (1 << 6),
+ RxEmpty = (1 << 7),
+ TxIntr = (1 << 0),
+ TxEmpty = (1 << 2),
+ PciErr = (1 << 13),
+ TxState = (1 << 22) | (1 << 21) | (1 << 20),
+ RxState = (1 << 19) | (1 << 18) | (1 << 17),
+ LinkFail = (1 << 12),
+ LinkPass = (1 << 4),
+ RxStopped = (1 << 8),
+ TxStopped = (1 << 1),
+
+ /* MacMode bits */
+ TxEnable = (1 << 13),
+ RxEnable = (1 << 1),
+ RxTx = TxEnable | RxEnable,
+ FullDuplex = (1 << 9),
+ AcceptAllMulticast = (1 << 7),
+ AcceptAllPhys = (1 << 6),
+ BOCnt = (1 << 5),
+ MacModeClear = (1<<12) | (1<<11) | (1<<10) | (1<<8) | (1<<3) |
+ RxTx | BOCnt | AcceptAllPhys | AcceptAllMulticast,
+
+ /* ROMCmd bits */
+ EE_SHIFT_CLK = 0x02, /* EEPROM shift clock. */
+ EE_CS = 0x01, /* EEPROM chip select. */
+ EE_DATA_WRITE = 0x04, /* Data from the Tulip to EEPROM. */
+ EE_WRITE_0 = 0x01,
+ EE_WRITE_1 = 0x05,
+ EE_DATA_READ = 0x08, /* Data from the EEPROM chip. */
+ EE_ENB = (0x4800 | EE_CS),
+
+ /* The EEPROM commands include the alway-set leading bit. */
+ EE_READ_CMD = 6,
+
+ /* RxMissed bits */
+ RxMissedOver = (1 << 16),
+ RxMissedMask = 0xffff,
+
+ /* SROM-related bits */
+ SROMC0InfoLeaf = 27,
+ MediaBlockMask = 0x3f,
+ MediaCustomCSRs = (1 << 6),
+
+ /* PCIPM bits */
+ PM_Sleep = (1 << 31),
+ PM_Snooze = (1 << 30),
+ PM_Mask = PM_Sleep | PM_Snooze,
+
+ /* SIAStatus bits */
+ NWayState = (1 << 14) | (1 << 13) | (1 << 12),
+ NWayRestart = (1 << 12),
+ NonselPortActive = (1 << 9),
+ LinkFailStatus = (1 << 2),
+ NetCxnErr = (1 << 1),
+};
+
+static const u32 de_intr_mask =
+ IntrOK | IntrErr | RxIntr | RxEmpty | TxIntr | TxEmpty |
+ LinkPass | LinkFail | PciErr;
+
+/*
+ * Set the programmable burst length to 4 longwords for all:
+ * DMA errors result without these values. Cache align 16 long.
+ */
+static const u32 de_bus_mode = CacheAlign16 | BurstLen4;
+
+struct de_srom_media_block {
+ u8 opts;
+ u16 csr13;
+ u16 csr14;
+ u16 csr15;
+} __attribute__((packed));
+
+struct de_srom_info_leaf {
+ u16 default_media;
+ u8 n_blocks;
+ u8 unused;
+ struct de_srom_media_block media[0];
+} __attribute__((packed));
+
+struct de_desc {
+ u32 opts1;
+ u32 opts2;
+ u32 addr1;
+ u32 addr2;
+};
+
+struct media_info {
+ u16 type; /* DE_MEDIA_xxx */
+ u16 csr13;
+ u16 csr14;
+ u16 csr15;
+};
+
+struct ring_info {
+ struct sk_buff *skb;
+ dma_addr_t mapping;
+};
+
+struct de_private {
+ unsigned tx_head;
+ unsigned tx_tail;
+ unsigned rx_tail;
+
+ void *regs;
+ struct net_device *dev;
+ spinlock_t lock;
+
+ struct de_desc *rx_ring;
+ struct de_desc *tx_ring;
+ struct ring_info tx_skb[DE_TX_RING_SIZE];
+ struct ring_info rx_skb[DE_RX_RING_SIZE];
+ unsigned rx_buf_sz;
+ dma_addr_t ring_dma;
+
+ u32 msg_enable;
+
+ struct net_device_stats net_stats;
+
+ struct pci_dev *pdev;
+ u32 macmode;
+
+ u16 setup_frame[DE_SETUP_FRAME_WORDS];
+
+ u32 media_type;
+ u32 media_supported;
+ u32 media_advertise;
+ struct media_info media[DE_MAX_MEDIA];
+ struct timer_list media_timer;
+
+ unsigned board_idx;
+ unsigned de21040 : 1;
+ unsigned media_lock : 1;
+};
+
+
+static void de_set_rx_mode (struct net_device *dev);
+static void de_tx (struct de_private *de);
+static void de_clean_rings (struct de_private *de);
+static void de_media_interrupt (struct de_private *de, u32 status);
+static void de21040_media_timer (unsigned long data);
+static void de21041_media_timer (unsigned long data);
+
+
+static struct pci_device_id de_pci_tbl[] __initdata = {
+ { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
+ { },
+};
+MODULE_DEVICE_TABLE(pci, de_pci_tbl);
+
+static const char * const media_name[DE_MAX_MEDIA] = {
+ "10baseT auto",
+ "BNC",
+ "AUI",
+ "10baseT-HD",
+ "10baseT-FD"
+};
+
+/* 21040 transceiver register settings:
+ * TP AUTO(unused), BNC(unused), AUI, TP, TP FD*/
+static u16 t21040_csr13[] = { 0, 0, 0x8F09, 0x8F01, 0x8F01, };
+static u16 t21040_csr14[] = { 0, 0, 0x0705, 0xFFFF, 0xFFFD, };
+static u16 t21040_csr15[] = { 0, 0, 0x0006, 0x0000, 0x0000, };
+
+/* 21041 transceiver register settings: TP AUTO, BNC, AUI, TP, TP FD*/
+static u16 t21041_csr13[] = { 0xEF01, 0xEF09, 0xEF09, 0xEF01, 0xEF09, };
+static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x6F3F, 0x6F3D, };
+static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
+
+
+static inline unsigned long
+msec_to_jiffies(unsigned long ms)
+{
+ return (((ms)*HZ+999)/1000);
+}
+
+
+#define dr32(reg) readl(de->regs + (reg))
+#define dw32(reg,val) writel((val), de->regs + (reg))
+
+
+static void de_rx_err_acct (struct de_private *de, unsigned rx_tail,
+ u32 status, u32 len)
+{
+ if (netif_msg_rx_err (de))
+ printk (KERN_DEBUG
+ "%s: rx err, slot %d status 0x%x len %d\n",
+ de->dev->name, rx_tail, status, len);
+
+ if ((status & 0x38000300) != 0x0300) {
+ /* Ingore earlier buffers. */
+ if ((status & 0xffff) != 0x7fff) {
+ if (netif_msg_rx_err(de))
+ printk(KERN_WARNING "%s: Oversized Ethernet frame "
+ "spanned multiple buffers, status %8.8x!\n",
+ de->dev->name, status);
+ de->net_stats.rx_length_errors++;
+ }
+ } else if (status & RxError) {
+ /* There was a fatal error. */
+ de->net_stats.rx_errors++; /* end of a packet.*/
+ if (status & 0x0890) de->net_stats.rx_length_errors++;
+ if (status & RxErrCRC) de->net_stats.rx_crc_errors++;
+ if (status & RxErrFIFO) de->net_stats.rx_fifo_errors++;
+ }
+}
+
+static void de_rx (struct de_private *de)
+{
+ unsigned rx_tail = de->rx_tail;
+ unsigned rx_work = DE_RX_RING_SIZE;
+ unsigned drop = 0;
+ int rc;
+
+ while (rx_work--) {
+ u32 status, len;
+ dma_addr_t mapping;
+ struct sk_buff *skb, *copy_skb;
+ unsigned copying_skb, buflen;
+
+ skb = de->rx_skb[rx_tail].skb;
+ if (!skb)
+ BUG();
+ rmb();
+ status = le32_to_cpu(de->rx_ring[rx_tail].opts1);
+ if (status & DescOwn)
+ break;
+
+ len = ((status >> 16) & 0x7ff) - 4;
+ mapping = de->rx_skb[rx_tail].mapping;
+
+ if (unlikely(drop)) {
+ de->net_stats.rx_dropped++;
+ goto rx_next;
+ }
+
+ if (unlikely((status & 0x38008300) != 0x0300)) {
+ de_rx_err_acct(de, rx_tail, status, len);
+ goto rx_next;
+ }
+
+ copying_skb = (len <= rx_copybreak);
+
+ if (unlikely(netif_msg_rx_status(de)))
+ printk(KERN_DEBUG "%s: rx slot %d status 0x%x len %d copying? %d\n",
+ de->dev->name, rx_tail, status, len,
+ copying_skb);
+
+ buflen = copying_skb ? (len + RX_OFFSET) : de->rx_buf_sz;
+ copy_skb = dev_alloc_skb (buflen);
+ if (unlikely(!copy_skb)) {
+ de->net_stats.rx_dropped++;
+ drop = 1;
+ rx_work = 100;
+ goto rx_next;
+ }
+ copy_skb->dev = de->dev;
+
+ if (!copying_skb) {
+ pci_unmap_single(de->pdev, mapping,
+ buflen, PCI_DMA_FROMDEVICE);
+ skb_put(skb, len);
+
+ mapping =
+ de->rx_skb[rx_tail].mapping =
+ pci_map_single(de->pdev, copy_skb->tail,
+ buflen, PCI_DMA_FROMDEVICE);
+ de->rx_skb[rx_tail].skb = copy_skb;
+ } else {
+ pci_dma_sync_single(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
+ skb_reserve(copy_skb, RX_OFFSET);
+ memcpy(skb_put(copy_skb, len), skb->tail, len);
+
+ /* We'll reuse the original ring buffer. */
+ skb = copy_skb;
+ }
+
+ skb->protocol = eth_type_trans (skb, de->dev);
+
+ de->net_stats.rx_packets++;
+ de->net_stats.rx_bytes += skb->len;
+ de->dev->last_rx = jiffies;
+ rc = netif_rx (skb);
+ if (rc == NET_RX_DROP)
+ drop = 1;
+
+rx_next:
+ de->rx_ring[rx_tail].opts1 = cpu_to_le32(DescOwn);
+ if (rx_tail == (DE_RX_RING_SIZE - 1))
+ de->rx_ring[rx_tail].opts2 =
+ cpu_to_le32(RingEnd | de->rx_buf_sz);
+ else
+ de->rx_ring[rx_tail].opts2 = cpu_to_le32(de->rx_buf_sz);
+ de->rx_ring[rx_tail].addr1 = cpu_to_le32(mapping);
+ rx_tail = NEXT_RX(rx_tail);
+ }
+
+ if (!rx_work)
+ printk(KERN_WARNING "%s: rx work limit reached\n", de->dev->name);
+
+ de->rx_tail = rx_tail;
+}
+
+static void de_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
+{
+ struct net_device *dev = dev_instance;
+ struct de_private *de = dev->priv;
+ u32 status;
+
+ status = dr32(MacStatus);
+ if ((!(status & (IntrOK|IntrErr))) || (status == 0xFFFF))
+ return;
+
+ if (netif_msg_intr(de))
+ printk(KERN_DEBUG "%s: intr, status %08x mode %08x desc %u/%u/%u\n",
+ dev->name, status, dr32(MacMode), de->rx_tail, de->tx_head, de->tx_tail);
+
+ dw32(MacStatus, status);
+
+ if (status & (RxIntr | RxEmpty)) {
+ de_rx(de);
+ if (status & RxEmpty)
+ dw32(RxPoll, NormalRxPoll);
+ }
+
+ spin_lock(&de->lock);
+
+ if (status & (TxIntr | TxEmpty))
+ de_tx(de);
+
+ if (status & (LinkPass | LinkFail))
+ de_media_interrupt(de, status);
+
+ spin_unlock(&de->lock);
+
+ if (status & PciErr) {
+ u16 pci_status;
+
+ pci_read_config_word(de->pdev, PCI_STATUS, &pci_status);
+ pci_write_config_word(de->pdev, PCI_STATUS, pci_status);
+ printk(KERN_ERR "%s: PCI bus error, status=%08x, PCI status=%04x\n",
+ dev->name, status, pci_status);
+ }
+}
+
+static void de_tx (struct de_private *de)
+{
+ unsigned tx_head = de->tx_head;
+ unsigned tx_tail = de->tx_tail;
+
+ while (tx_tail != tx_head) {
+ struct sk_buff *skb;
+ u32 status;
+
+ rmb();
+ status = le32_to_cpu(de->tx_ring[tx_tail].opts1);
+ if (status & DescOwn)
+ break;
+
+ skb = de->tx_skb[tx_tail].skb;
+ if (!skb)
+ BUG();
+ if (unlikely(skb == DE_DUMMY_SKB))
+ goto next;
+
+ if (unlikely(skb == DE_SETUP_SKB)) {
+ pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping,
+ sizeof(de->setup_frame), PCI_DMA_TODEVICE);
+ goto next;
+ }
+
+ pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping,
+ skb->len, PCI_DMA_TODEVICE);
+
+ if (status & LastFrag) {
+ if (status & TxError) {
+ if (netif_msg_tx_err(de))
+ printk(KERN_DEBUG "%s: tx err, status 0x%x\n",
+ de->dev->name, status);
+ de->net_stats.tx_errors++;
+ if (status & TxOWC)
+ de->net_stats.tx_window_errors++;
+ if (status & TxMaxCol)
+ de->net_stats.tx_aborted_errors++;
+ if (status & TxLinkFail)
+ de->net_stats.tx_carrier_errors++;
+ if (status & TxFIFOUnder)
+ de->net_stats.tx_fifo_errors++;
+ } else {
+ de->net_stats.tx_packets++;
+ de->net_stats.tx_bytes += skb->len;
+ if (netif_msg_tx_done(de))
+ printk(KERN_DEBUG "%s: tx done, slot %d\n", de->dev->name, tx_tail);
+ }
+ dev_kfree_skb_irq(skb);
+ }
+
+next:
+ de->tx_skb[tx_tail].skb = NULL;
+
+ tx_tail = NEXT_TX(tx_tail);
+ }
+
+ de->tx_tail = tx_tail;
+
+ if (netif_queue_stopped(de->dev) && (TX_BUFFS_AVAIL(de) > (DE_TX_RING_SIZE / 4)))
+ netif_wake_queue(de->dev);
+}
+
+static int de_start_xmit (struct sk_buff *skb, struct net_device *dev)
+{
+ struct de_private *de = dev->priv;
+ unsigned int entry, tx_free;
+ u32 mapping, len, flags = FirstFrag | LastFrag;
+ struct de_desc *txd;
+
+ spin_lock_irq(&de->lock);
+
+ tx_free = TX_BUFFS_AVAIL(de);
+ if (tx_free == 0) {
+ netif_stop_queue(dev);
+ spin_unlock_irq(&de->lock);
+ return 1;
+ }
+ tx_free--;
+
+ entry = de->tx_head;
+
+ txd = &de->tx_ring[entry];
+
+ len = skb->len;
+ mapping = pci_map_single(de->pdev, skb->data, len, PCI_DMA_TODEVICE);
+ if (entry == (DE_TX_RING_SIZE - 1))
+ flags |= RingEnd;
+ if (!tx_free || (tx_free == (DE_TX_RING_SIZE / 2)))
+ flags |= TxSwInt;
+ flags |= len;
+ txd->opts2 = cpu_to_le32(flags);
+ txd->addr1 = cpu_to_le32(mapping);
+
+ de->tx_skb[entry].skb = skb;
+ de->tx_skb[entry].mapping = mapping;
+ wmb();
+
+ txd->opts1 = cpu_to_le32(DescOwn);
+ wmb();
+
+ de->tx_head = NEXT_TX(entry);
+ if (netif_msg_tx_queued(de))
+ printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",
+ dev->name, entry, skb->len);
+
+ if (tx_free == 0)
+ netif_stop_queue(dev);
+
+ spin_unlock_irq(&de->lock);
+
+ /* Trigger an immediate transmit demand. */
+ dw32(TxPoll, NormalTxPoll);
+ dev->trans_start = jiffies;
+
+ return 0;
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ Note that we only use exclusion around actually queueing the
+ new frame, not around filling de->setup_frame. This is non-deterministic
+ when re-entered but still correct. */
+
+/* The little-endian AUTODIN32 ethernet CRC calculation.
+ N.B. Do not use for bulk data, use a table-based routine instead.
+ This is common code and should be moved to net/core/crc.c */
+static unsigned const ethernet_polynomial_le = 0xedb88320U;
+static inline u32 ether_crc_le(int length, unsigned char *data)
+{
+ u32 crc = 0xffffffff; /* Initial value. */
+ while(--length >= 0) {
+ unsigned char current_octet = *data++;
+ int bit;
+ for (bit = 8; --bit >= 0; current_octet >>= 1) {
+ if ((crc ^ current_octet) & 1) {
+ crc >>= 1;
+ crc ^= ethernet_polynomial_le;
+ } else
+ crc >>= 1;
+ }
+ }
+ return crc;
+}
+static unsigned const ethernet_polynomial = 0x04c11db7U;
+static inline u32 ether_crc(int length, unsigned char *data)
+{
+ int crc = -1;
+
+ while(--length >= 0) {
+ unsigned char current_octet = *data++;
+ int bit;
+ for (bit = 0; bit < 8; bit++, current_octet >>= 1)
+ crc = (crc << 1) ^
+ ((crc < 0) ^ (current_octet & 1) ? ethernet_polynomial : 0);
+ }
+ return crc;
+}
+
+#undef set_bit_le
+#define set_bit_le(i,p) do { ((char *)(p))[(i)/8] |= (1<<((i)%8)); } while(0)
+
+static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
+{
+ struct de_private *de = dev->priv;
+ u16 hash_table[32];
+ struct dev_mc_list *mclist;
+ int i;
+ u16 *eaddrs;
+
+ memset(hash_table, 0, sizeof(hash_table));
+ set_bit_le(255, hash_table); /* Broadcast entry */
+ /* This should work on big-endian machines as well. */
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ int index = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x1ff;
+
+ set_bit_le(index, hash_table);
+
+ for (i = 0; i < 32; i++) {
+ *setup_frm++ = hash_table[i];
+ *setup_frm++ = hash_table[i];
+ }
+ setup_frm = &de->setup_frame[13*6];
+ }
+
+ /* Fill the final entry with our physical address. */
+ eaddrs = (u16 *)dev->dev_addr;
+ *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
+ *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
+ *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
+}
+
+static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
+{
+ struct de_private *de = dev->priv;
+ struct dev_mc_list *mclist;
+ int i;
+ u16 *eaddrs;
+
+ /* We have <= 14 addresses so we can use the wonderful
+ 16 address perfect filtering of the Tulip. */
+ for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ eaddrs = (u16 *)mclist->dmi_addr;
+ *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
+ *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
+ *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
+ }
+ /* Fill the unused entries with the broadcast address. */
+ memset(setup_frm, 0xff, (15-i)*12);
+ setup_frm = &de->setup_frame[15*6];
+
+ /* Fill the final entry with our physical address. */
+ eaddrs = (u16 *)dev->dev_addr;
+ *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
+ *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
+ *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
+}
+
+
+static void __de_set_rx_mode (struct net_device *dev)
+{
+ struct de_private *de = dev->priv;
+ u32 macmode;
+ unsigned int entry;
+ u32 mapping;
+ struct de_desc *txd;
+ struct de_desc *dummy_txd = NULL;
+
+ macmode = de->macmode & ~(AcceptAllMulticast | AcceptAllPhys);
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ macmode |= AcceptAllMulticast | AcceptAllPhys;
+ goto out;
+ }
+
+ if ((dev->mc_count > 1000) || (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to filter well -- accept all multicasts. */
+ macmode |= AcceptAllMulticast;
+ goto out;
+ }
+
+ /* Note that only the low-address shortword of setup_frame is valid!
+ The values are doubled for big-endian architectures. */
+ if (dev->mc_count > 14) /* Must use a multicast hash table. */
+ build_setup_frame_hash (de->setup_frame, dev);
+ else
+ build_setup_frame_perfect (de->setup_frame, dev);
+
+ /*
+ * Now add this frame to the Tx list.
+ */
+
+ entry = de->tx_head;
+
+ /* Avoid a chip errata by prefixing a dummy entry. */
+ if (entry != 0) {
+ de->tx_skb[entry].skb = DE_DUMMY_SKB;
+
+ dummy_txd = &de->tx_ring[entry];
+ dummy_txd->opts2 = (entry == (DE_TX_RING_SIZE - 1)) ?
+ cpu_to_le32(RingEnd) : 0;
+ dummy_txd->addr1 = 0;
+
+ /* Must set DescOwned later to avoid race with chip */
+
+ entry = NEXT_TX(entry);
+ }
+
+ de->tx_skb[entry].skb = DE_SETUP_SKB;
+ de->tx_skb[entry].mapping = mapping =
+ pci_map_single (de->pdev, de->setup_frame,
+ sizeof (de->setup_frame), PCI_DMA_TODEVICE);
+
+ /* Put the setup frame on the Tx list. */
+ txd = &de->tx_ring[entry];
+ if (entry == (DE_TX_RING_SIZE - 1))
+ txd->opts2 = cpu_to_le32(SetupFrame | RingEnd | sizeof (de->setup_frame));
+ else
+ txd->opts2 = cpu_to_le32(SetupFrame | sizeof (de->setup_frame));
+ txd->addr1 = cpu_to_le32(mapping);
+ wmb();
+
+ txd->opts1 = cpu_to_le32(DescOwn);
+ wmb();
+
+ if (dummy_txd) {
+ dummy_txd->opts1 = cpu_to_le32(DescOwn);
+ wmb();
+ }
+
+ de->tx_head = NEXT_TX(entry);
+
+ if (TX_BUFFS_AVAIL(de) < 0)
+ BUG();
+ if (TX_BUFFS_AVAIL(de) == 0)
+ netif_stop_queue(dev);
+
+ /* Trigger an immediate transmit demand. */
+ dw32(TxPoll, NormalTxPoll);
+
+out:
+ if (macmode != de->macmode) {
+ dw32 (MacMode, macmode);
+ de->macmode = macmode;
+ }
+}
+
+static void de_set_rx_mode (struct net_device *dev)
+{
+ unsigned long flags;
+ struct de_private *de = dev->priv;
+
+ spin_lock_irqsave (&de->lock, flags);
+ __de_set_rx_mode(dev);
+ spin_unlock_irqrestore (&de->lock, flags);
+}
+
+static inline void de_rx_missed(struct de_private *de, u32 rx_missed)
+{
+ if (unlikely(rx_missed & RxMissedOver))
+ de->net_stats.rx_missed_errors += RxMissedMask;
+ else
+ de->net_stats.rx_missed_errors += (rx_missed & RxMissedMask);
+}
+
+static void __de_get_stats(struct de_private *de)
+{
+ u32 tmp = dr32(RxMissed); /* self-clearing */
+
+ de_rx_missed(de, tmp);
+}
+
+static struct net_device_stats *de_get_stats(struct net_device *dev)
+{
+ struct de_private *de = dev->priv;
+
+ /* The chip only need report frame silently dropped. */
+ spin_lock_irq(&de->lock);
+ if (netif_running(dev) && netif_device_present(dev))
+ __de_get_stats(de);
+ spin_unlock_irq(&de->lock);
+
+ return &de->net_stats;
+}
+
+static inline int de_is_running (struct de_private *de)
+{
+ return (dr32(MacStatus) & (RxState | TxState)) ? 1 : 0;
+}
+
+static void de_stop_rxtx (struct de_private *de)
+{
+ u32 macmode;
+ unsigned int work = 1000;
+
+ macmode = dr32(MacMode);
+ if (macmode & RxTx) {
+ dw32(MacMode, macmode & ~RxTx);
+ dr32(MacMode);
+ }
+
+ while (--work > 0) {
+ if (!de_is_running(de))
+ return;
+ cpu_relax();
+ }
+
+ printk(KERN_WARNING "%s: timeout expired stopping DMA\n", de->dev->name);
+}
+
+static inline void de_start_rxtx (struct de_private *de)
+{
+ u32 macmode;
+
+ macmode = dr32(MacMode);
+ if ((macmode & RxTx) != RxTx) {
+ dw32(MacMode, macmode | RxTx);
+ dr32(MacMode);
+ }
+}
+
+static void de_stop_hw (struct de_private *de)
+{
+
+ udelay(5);
+ dw32(IntrMask, 0);
+
+ de_stop_rxtx(de);
+
+ dw32(MacStatus, dr32(MacStatus));
+
+ synchronize_irq();
+ udelay(10);
+
+ de->rx_tail = 0;
+ de->tx_head = de->tx_tail = 0;
+}
+
+static void de_link_up(struct de_private *de)
+{
+ if (!netif_carrier_ok(de->dev)) {
+ netif_carrier_on(de->dev);
+ if (netif_msg_link(de))
+ printk(KERN_INFO "%s: link up, media %s\n",
+ de->dev->name, media_name[de->media_type]);
+ }
+}
+
+static void de_link_down(struct de_private *de)
+{
+ if (netif_carrier_ok(de->dev)) {
+ netif_carrier_off(de->dev);
+ if (netif_msg_link(de))
+ printk(KERN_INFO "%s: link down\n", de->dev->name);
+ }
+}
+
+static void de_set_media (struct de_private *de)
+{
+ unsigned media = de->media_type;
+
+ if (de_is_running(de))
+ BUG();
+
+ if (de->de21040)
+ dw32(CSR11, FULL_DUPLEX_MAGIC);
+ dw32(CSR13, 0); /* Reset phy */
+ dw32(CSR14, de->media[media].csr14);
+ dw32(CSR15, de->media[media].csr15);
+ dw32(CSR13, de->media[media].csr13);
+
+ /* must delay 10ms before writing to other registers,
+ * especially CSR6
+ */
+ mdelay(10);
+
+ if (media == DE_MEDIA_TP_FD)
+ de->macmode |= FullDuplex;
+ else
+ de->macmode &= ~FullDuplex;
+
+ if (netif_msg_link(de))
+ printk(KERN_INFO "%s: set link %s, mode %x, sia %x,%x,%x,%x\n"
+ KERN_INFO " set mode %x, set sia %x,%x,%x\n",
+ de->dev->name, media_name[media],
+ dr32(MacMode), dr32(SIAStatus), dr32(CSR13),
+ dr32(CSR14), dr32(CSR15), de->macmode,
+ de->media[media].csr13,
+ de->media[media].csr14,
+ de->media[media].csr15);
+}
+
+static void de21040_media_timer (unsigned long data)
+{
+ struct de_private *de = (struct de_private *) data;
+ struct net_device *dev = de->dev;
+ u32 status = dr32(SIAStatus);
+ unsigned int carrier;
+
+ carrier = (status & NetCxnErr) ? 0 : 1;
+
+ if (carrier) {
+ if (de->media_type != DE_MEDIA_AUI && (status & LinkFailStatus))
+ goto no_link_yet;
+
+ de->media_timer.expires = jiffies + DE_TIMER_LINK;
+ add_timer(&de->media_timer);
+ if (!netif_carrier_ok(dev))
+ de_link_up(de);
+ else
+ if (netif_msg_timer(de))
+ printk(KERN_INFO "%s: %s link ok, status %x\n",
+ dev->name, media_name[de->media_type],
+ status);
+ return;
+ }
+
+ de_link_down(de);
+
+ if (de->media_lock)
+ return;
+
+ if (de->media_type == DE_MEDIA_AUI)
+ de->media_type = DE_MEDIA_TP;
+ else
+ de->media_type = DE_MEDIA_AUI;
+
+ de_stop_rxtx(de);
+ de_set_media(de);
+ de_start_rxtx(de);
+
+no_link_yet:
+ de->media_timer.expires = jiffies + DE_TIMER_NO_LINK;
+ add_timer(&de->media_timer);
+
+ if (netif_msg_timer(de))
+ printk(KERN_INFO "%s: no link, trying media %s, status %x\n",
+ dev->name, media_name[de->media_type], status);
+}
+
+static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media)
+{
+ switch (new_media) {
+ case DE_MEDIA_TP_AUTO:
+ if (!(de->media_advertise & ADVERTISED_Autoneg))
+ return 0;
+ if (!(de->media_advertise & (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full)))
+ return 0;
+ break;
+ case DE_MEDIA_BNC:
+ if (!(de->media_advertise & ADVERTISED_BNC))
+ return 0;
+ break;
+ case DE_MEDIA_AUI:
+ if (!(de->media_advertise & ADVERTISED_AUI))
+ return 0;
+ break;
+ case DE_MEDIA_TP:
+ if (!(de->media_advertise & ADVERTISED_10baseT_Half))
+ return 0;
+ break;
+ case DE_MEDIA_TP_FD:
+ if (!(de->media_advertise & ADVERTISED_10baseT_Full))
+ return 0;
+ break;
+ }
+
+ return 1;
+}
+
+static void de21041_media_timer (unsigned long data)
+{
+ struct de_private *de = (struct de_private *) data;
+ struct net_device *dev = de->dev;
+ u32 status = dr32(SIAStatus), new_media;
+ unsigned int carrier;
+
+ carrier = (status & NetCxnErr) ? 0 : 1;
+
+ if (carrier) {
+ if ((de->media_type == DE_MEDIA_TP_AUTO ||
+ de->media_type == DE_MEDIA_TP ||
+ de->media_type == DE_MEDIA_TP_FD) &&
+ (status & LinkFailStatus))
+ goto no_link_yet;
+
+ de->media_timer.expires = jiffies + DE_TIMER_LINK;
+ add_timer(&de->media_timer);
+ if (!netif_carrier_ok(dev))
+ de_link_up(de);
+ else
+ if (netif_msg_timer(de))
+ printk(KERN_INFO "%s: %s link ok, mode %x status %x\n",
+ dev->name, media_name[de->media_type],
+ dr32(MacMode), status);
+ return;
+ }
+
+ de_link_down(de);
+
+ /* if media type locked, don't switch media */
+ if (de->media_lock)
+ goto set_media;
+
+ /* if activity detected, use that as hint for new media type */
+ if (status & NonselPortActive) {
+ if (de->media_type == DE_MEDIA_AUI)
+ de->media_type = DE_MEDIA_TP;
+ else
+ de->media_type = DE_MEDIA_AUI;
+ goto set_media;
+ }
+
+ /* move to next advertised media */
+ new_media = de->media_type;
+ do {
+ if (new_media == DE_MEDIA_LAST)
+ new_media = DE_MEDIA_FIRST;
+ else
+ new_media++;
+ } while ((!de_ok_to_advertise(de, new_media)) &&
+ (new_media != de->media_type));
+
+ de->media_type = new_media;
+
+set_media:
+ de_stop_rxtx(de);
+ de_set_media(de);
+ de_start_rxtx(de);
+
+no_link_yet:
+ de->media_timer.expires = jiffies + DE_TIMER_NO_LINK;
+ add_timer(&de->media_timer);
+
+ if (netif_msg_timer(de))
+ printk(KERN_INFO "%s: no link, trying media %s, status %x\n",
+ dev->name, media_name[de->media_type], status);
+}
+
+static void de_media_interrupt (struct de_private *de, u32 status)
+{
+ if (status & LinkPass) {
+ de_link_up(de);
+ mod_timer(&de->media_timer, jiffies + DE_TIMER_LINK);
+ return;
+ }
+
+ if (!(status & LinkFail))
+ BUG();
+
+ if (netif_carrier_ok(de->dev)) {
+ de_link_down(de);
+ mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
+ }
+}
+
+static int de_reset_mac (struct de_private *de)
+{
+ unsigned tmp;
+ u32 status;
+
+ /*
+ * Reset MAC. Copied from de4x5.c.
+ */
+
+ dr32 (BusMode);
+ mdelay (1);
+
+ dw32 (BusMode, de_bus_mode | CmdReset);
+ mdelay (1);
+
+ dw32 (BusMode, de_bus_mode);
+ mdelay (1);
+
+ for (tmp = 0; tmp < 5; tmp++) {
+ dr32 (BusMode);
+ mdelay (1);
+ }
+
+ mdelay (1);
+
+ status = dr32(MacStatus);
+ if (status & (RxState | TxState))
+ return -EBUSY;
+ if (status == 0xffffffff)
+ return -ENODEV;
+ return 0;
+}
+
+static void de_adapter_wake (struct de_private *de)
+{
+ u32 pmctl;
+
+ if (de->de21040)
+ return;
+
+ pci_read_config_dword(de->pdev, PCIPM, &pmctl);
+ if (pmctl & PM_Mask) {
+ pmctl &= ~PM_Mask;
+ pci_write_config_dword(de->pdev, PCIPM, pmctl);
+
+ /* de4x5.c delays, so we do too */
+ current->state = TASK_UNINTERRUPTIBLE;
+ schedule_timeout(msec_to_jiffies(10));
+ }
+}
+
+static void de_adapter_sleep (struct de_private *de)
+{
+ u32 pmctl;
+
+ if (de->de21040)
+ return;
+
+ pci_read_config_dword(de->pdev, PCIPM, &pmctl);
+ pmctl |= PM_Sleep;
+ pci_write_config_dword(de->pdev, PCIPM, pmctl);
+}
+
+static int de_init_hw (struct de_private *de)
+{
+ struct net_device *dev = de->dev;
+ int rc;
+
+ de_adapter_wake(de);
+
+ de->macmode = dr32(MacMode) & ~MacModeClear;
+
+ rc = de_reset_mac(de);
+ if (rc)
+ return rc;
+
+ de_set_media(de); /* reset phy */
+
+ dw32(RxRingAddr, de->ring_dma);
+ dw32(TxRingAddr, de->ring_dma + (sizeof(struct de_desc) * DE_RX_RING_SIZE));
+
+ dw32(MacMode, RxTx | de->macmode);
+
+ dr32(RxMissed); /* self-clearing */
+
+ dw32(IntrMask, de_intr_mask);
+
+ de_set_rx_mode(dev);
+
+ return 0;
+}
+
+static int de_refill_rx (struct de_private *de)
+{
+ unsigned i;
+
+ for (i = 0; i < DE_RX_RING_SIZE; i++) {
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(de->rx_buf_sz);
+ if (!skb)
+ goto err_out;
+
+ skb->dev = de->dev;
+
+ de->rx_skb[i].mapping = pci_map_single(de->pdev,
+ skb->tail, de->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ de->rx_skb[i].skb = skb;
+
+ de->rx_ring[i].opts1 = cpu_to_le32(DescOwn);
+ if (i == (DE_RX_RING_SIZE - 1))
+ de->rx_ring[i].opts2 =
+ cpu_to_le32(RingEnd | de->rx_buf_sz);
+ else
+ de->rx_ring[i].opts2 = cpu_to_le32(de->rx_buf_sz);
+ de->rx_ring[i].addr1 = cpu_to_le32(de->rx_skb[i].mapping);
+ de->rx_ring[i].addr2 = 0;
+ }
+
+ return 0;
+
+err_out:
+ de_clean_rings(de);
+ return -ENOMEM;
+}
+
+static int de_init_rings (struct de_private *de)
+{
+ memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE);
+ de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
+
+ de->rx_tail = 0;
+ de->tx_head = de->tx_tail = 0;
+
+ return de_refill_rx (de);
+}
+
+static int de_alloc_rings (struct de_private *de)
+{
+ de->rx_ring = pci_alloc_consistent(de->pdev, DE_RING_BYTES, &de->ring_dma);
+ if (!de->rx_ring)
+ return -ENOMEM;
+ de->tx_ring = &de->rx_ring[DE_RX_RING_SIZE];
+ return de_init_rings(de);
+}
+
+static void de_clean_rings (struct de_private *de)
+{
+ unsigned i;
+
+ memset(de->rx_ring, 0, sizeof(struct de_desc) * DE_RX_RING_SIZE);
+ de->rx_ring[DE_RX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
+ wmb();
+ memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE);
+ de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
+ wmb();
+
+ for (i = 0; i < DE_RX_RING_SIZE; i++) {
+ if (de->rx_skb[i].skb) {
+ pci_unmap_single(de->pdev, de->rx_skb[i].mapping,
+ de->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(de->rx_skb[i].skb);
+ }
+ }
+
+ for (i = 0; i < DE_TX_RING_SIZE; i++) {
+ if (de->tx_skb[i].skb) {
+ struct sk_buff *skb = de->tx_skb[i].skb;
+ pci_unmap_single(de->pdev, de->tx_skb[i].mapping,
+ skb->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb(skb);
+ de->net_stats.tx_dropped++;
+ }
+ }
+
+ memset(&de->rx_skb, 0, sizeof(struct ring_info) * DE_RX_RING_SIZE);
+ memset(&de->tx_skb, 0, sizeof(struct ring_info) * DE_TX_RING_SIZE);
+}
+
+static void de_free_rings (struct de_private *de)
+{
+ de_clean_rings(de);
+ pci_free_consistent(de->pdev, DE_RING_BYTES, de->rx_ring, de->ring_dma);
+ de->rx_ring = NULL;
+ de->tx_ring = NULL;
+}
+
+static int de_open (struct net_device *dev)
+{
+ struct de_private *de = dev->priv;
+ int rc;
+
+ if (netif_msg_ifup(de))
+ printk(KERN_DEBUG "%s: enabling interface\n", dev->name);
+
+ de->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
+
+ rc = de_alloc_rings(de);
+ if (rc) {
+ printk(KERN_ERR "%s: ring allocation failure, err=%d\n",
+ dev->name, rc);
+ return rc;
+ }
+
+ rc = de_init_hw(de);
+ if (rc) {
+ printk(KERN_ERR "%s: h/w init failure, err=%d\n",
+ dev->name, rc);
+ goto err_out_free;
+ }
+
+ rc = request_irq(dev->irq, de_interrupt, SA_SHIRQ, dev->name, dev);
+ if (rc) {
+ printk(KERN_ERR "%s: IRQ %d request failure, err=%d\n",
+ dev->name, dev->irq, rc);
+ goto err_out_hw;
+ }
+
+ netif_start_queue(dev);
+
+ return 0;
+
+err_out_hw:
+ de_stop_hw(de);
+err_out_free:
+ de_free_rings(de);
+ return rc;
+}
+
+static int de_close (struct net_device *dev)
+{
+ struct de_private *de = dev->priv;
+
+ if (netif_msg_ifdown(de))
+ printk(KERN_DEBUG "%s: disabling interface\n", dev->name);
+
+ netif_stop_queue(dev);
+ netif_carrier_off(dev);
+ del_timer_sync(&de->media_timer);
+ de_stop_hw(de);
+ free_irq(dev->irq, dev);
+ de_free_rings(de);
+ de_adapter_sleep(de);
+ pci_disable_device(de->pdev);
+ return 0;
+}
+
+static void de_tx_timeout (struct net_device *dev)
+{
+ struct de_private *de = dev->priv;
+
+ printk(KERN_DEBUG "%s: NIC status %08x mode %08x sia %08x desc %u/%u/%u\n",
+ dev->name, dr32(MacStatus), dr32(MacMode), dr32(SIAStatus),
+ de->rx_tail, de->tx_head, de->tx_tail);
+
+ del_timer_sync(&de->media_timer);
+
+ disable_irq(dev->irq);
+ spin_lock_irq(&de->lock);
+
+ de_stop_hw(de);
+ netif_stop_queue(dev);
+ netif_carrier_off(dev);
+
+ spin_unlock_irq(&de->lock);
+ enable_irq(dev->irq);
+
+ /* Update the error counts. */
+ __de_get_stats(de);
+
+ synchronize_irq();
+ de_clean_rings(de);
+
+ de_init_hw(de);
+
+ netif_wake_queue(dev);
+}
+
+static int de_get_regs(struct de_private *de, u8 *buf)
+{
+ int i;
+ u32 *rbuf = (u32 *)buf;
+
+ /* read all CSRs */
+ for (i = 0; i < DE_NUM_REGS; i++)
+ rbuf[i] = dr32(i * 8);
+
+ /* handle self-clearing RxMissed counter, CSR8 */
+ de_rx_missed(de, rbuf[8]);
+
+ return 0;
+}
+
+static int de_ethtool_gset(struct de_private *de, struct ethtool_cmd *ecmd)
+{
+ ecmd->supported = de->media_supported;
+ ecmd->transceiver = XCVR_INTERNAL;
+ ecmd->phy_address = 0;
+ ecmd->advertising = de->media_advertise;
+
+ switch (de->media_type) {
+ case DE_MEDIA_AUI:
+ ecmd->port = PORT_AUI;
+ ecmd->speed = 5;
+ break;
+ case DE_MEDIA_BNC:
+ ecmd->port = PORT_BNC;
+ ecmd->speed = 2;
+ break;
+ default:
+ ecmd->port = PORT_TP;
+ ecmd->speed = SPEED_10;
+ break;
+ }
+
+ if (de->macmode & FullDuplex)
+ ecmd->duplex = DUPLEX_FULL;
+ else
+ ecmd->duplex = DUPLEX_HALF;
+
+ if (de->media_lock)
+ ecmd->autoneg = AUTONEG_DISABLE;
+ else
+ ecmd->autoneg = AUTONEG_ENABLE;
+
+ /* ignore maxtxpkt, maxrxpkt for now */
+
+ return 0;
+}
+
+static int de_ethtool_sset(struct de_private *de, struct ethtool_cmd *ecmd)
+{
+ u32 new_media;
+ unsigned int media_lock;
+
+ if (ecmd->speed != SPEED_10 && ecmd->speed != 5 && ecmd->speed != 2)
+ return -EINVAL;
+ if (de->de21040 && ecmd->speed == 2)
+ return -EINVAL;
+ if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
+ return -EINVAL;
+ if (ecmd->port != PORT_TP && ecmd->port != PORT_AUI && ecmd->port != PORT_BNC)
+ return -EINVAL;
+ if (de->de21040 && ecmd->port == PORT_BNC)
+ return -EINVAL;
+ if (ecmd->transceiver != XCVR_INTERNAL)
+ return -EINVAL;
+ if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE)
+ return -EINVAL;
+ if (ecmd->advertising & ~de->media_supported)
+ return -EINVAL;
+ if (ecmd->autoneg == AUTONEG_ENABLE &&
+ (!(ecmd->advertising & ADVERTISED_Autoneg)))
+ return -EINVAL;
+
+ switch (ecmd->port) {
+ case PORT_AUI:
+ new_media = DE_MEDIA_AUI;
+ if (!(ecmd->advertising & ADVERTISED_AUI))
+ return -EINVAL;
+ break;
+ case PORT_BNC:
+ new_media = DE_MEDIA_BNC;
+ if (!(ecmd->advertising & ADVERTISED_BNC))
+ return -EINVAL;
+ break;
+ default:
+ if (ecmd->autoneg == AUTONEG_ENABLE)
+ new_media = DE_MEDIA_TP_AUTO;
+ else if (ecmd->duplex == DUPLEX_FULL)
+ new_media = DE_MEDIA_TP_FD;
+ else
+ new_media = DE_MEDIA_TP;
+ if (!(ecmd->advertising & ADVERTISED_TP))
+ return -EINVAL;
+ if (!(ecmd->advertising & (ADVERTISED_10baseT_Full | ADVERTISED_10baseT_Half)))
+ return -EINVAL;
+ break;
+ }
+
+ media_lock = (ecmd->autoneg == AUTONEG_ENABLE) ? 0 : 1;
+
+ if ((new_media == de->media_type) &&
+ (media_lock == de->media_lock) &&
+ (ecmd->advertising == de->media_advertise))
+ return 0; /* nothing to change */
+
+ de_link_down(de);
+ de_stop_rxtx(de);
+
+ de->media_type = new_media;
+ de->media_lock = media_lock;
+ de->media_advertise = ecmd->advertising;
+ de_set_media(de);
+
+ return 0;
+}
+
+static int de_ethtool_ioctl (struct de_private *de, void *useraddr)
+{
+ u32 ethcmd;
+
+ /* dev_ioctl() in ../../net/core/dev.c has already checked
+ capable(CAP_NET_ADMIN), so don't bother with that here. */
+
+ if (get_user(ethcmd, (u32 *)useraddr))
+ return -EFAULT;
+
+ switch (ethcmd) {
+
+ case ETHTOOL_GDRVINFO: {
+ struct ethtool_drvinfo info = { ETHTOOL_GDRVINFO };
+ strcpy (info.driver, DRV_NAME);
+ strcpy (info.version, DRV_VERSION);
+ strcpy (info.bus_info, de->pdev->slot_name);
+ info.regdump_len = DE_REGS_SIZE;
+ if (copy_to_user (useraddr, &info, sizeof (info)))
+ return -EFAULT;
+ return 0;
+ }
+
+ /* get settings */
+ case ETHTOOL_GSET: {
+ struct ethtool_cmd ecmd = { ETHTOOL_GSET };
+ spin_lock_irq(&de->lock);
+ de_ethtool_gset(de, &ecmd);
+ spin_unlock_irq(&de->lock);
+ if (copy_to_user(useraddr, &ecmd, sizeof(ecmd)))
+ return -EFAULT;
+ return 0;
+ }
+ /* set settings */
+ case ETHTOOL_SSET: {
+ struct ethtool_cmd ecmd;
+ int r;
+ if (copy_from_user(&ecmd, useraddr, sizeof(ecmd)))
+ return -EFAULT;
+ spin_lock_irq(&de->lock);
+ r = de_ethtool_sset(de, &ecmd);
+ spin_unlock_irq(&de->lock);
+ return r;
+ }
+
+ /* restart autonegotiation */
+ case ETHTOOL_NWAY_RST: {
+ u32 status;
+
+ if (de->media_type != DE_MEDIA_TP_AUTO)
+ return -EINVAL;
+ if (netif_carrier_ok(de->dev))
+ de_link_down(de);
+
+ status = dr32(SIAStatus);
+ dw32(SIAStatus, (status & ~NWayState) | NWayRestart);
+ if (netif_msg_link(de))
+ printk(KERN_INFO "%s: link nway restart, status %x,%x\n",
+ de->dev->name, status, dr32(SIAStatus));
+ return 0;
+ }
+
+ /* get link status */
+ case ETHTOOL_GLINK: {
+ struct ethtool_value edata = {ETHTOOL_GLINK};
+ edata.data = (netif_carrier_ok(de->dev)) ? 1 : 0;
+ if (copy_to_user(useraddr, &edata, sizeof(edata)))
+ return -EFAULT;
+ return 0;
+ }
+
+ /* get message-level */
+ case ETHTOOL_GMSGLVL: {
+ struct ethtool_value edata = {ETHTOOL_GMSGLVL};
+ edata.data = de->msg_enable;
+ if (copy_to_user(useraddr, &edata, sizeof(edata)))
+ return -EFAULT;
+ return 0;
+ }
+ /* set message-level */
+ case ETHTOOL_SMSGLVL: {
+ struct ethtool_value edata;
+ if (copy_from_user(&edata, useraddr, sizeof(edata)))
+ return -EFAULT;
+ de->msg_enable = edata.data;
+ return 0;
+ }
+
+ /* get registers */
+ case ETHTOOL_GREGS: {
+ struct ethtool_regs regs;
+ u8 regbuf[DE_REGS_SIZE];
+ int r;
+
+ if (copy_from_user(&regs, useraddr, sizeof(regs)))
+ return -EFAULT;
+
+ if (regs.len > DE_REGS_SIZE) {
+ regs.len = DE_REGS_SIZE;
+ }
+ regs.version = (DE_REGS_VER << 2) | de->de21040;
+ if (copy_to_user(useraddr, &regs, sizeof(regs)))
+ return -EFAULT;
+
+ useraddr += offsetof(struct ethtool_regs, data);
+
+ spin_lock_irq(&de->lock);
+ r = de_get_regs(de, regbuf);
+ spin_unlock_irq(&de->lock);
+
+ if (r)
+ return r;
+ if (copy_to_user(useraddr, regbuf, regs.len))
+ return -EFAULT;
+ return 0;
+ }
+
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+
+static int de_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct de_private *de = dev->priv;
+ int rc = 0;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ switch (cmd) {
+ case SIOCETHTOOL:
+ return de_ethtool_ioctl(de, (void *) rq->ifr_data);
+
+ default:
+ rc = -EOPNOTSUPP;
+ break;
+ }
+
+ return rc;
+}
+
+static void __init de21040_get_mac_address (struct de_private *de)
+{
+ unsigned i;
+
+ dw32 (ROMCmd, 0); /* Reset the pointer with a dummy write. */
+
+ for (i = 0; i < 6; i++) {
+ int value, boguscnt = 100000;
+ do
+ value = dr32(ROMCmd);
+ while (value < 0 && --boguscnt > 0);
+ de->dev->dev_addr[i] = value;
+ if (boguscnt <= 0)
+ printk(KERN_WARNING PFX "timeout reading 21040 MAC address byte %u\n", i);
+ }
+}
+
+/* Reading a serial EEPROM is a "bit" grungy, but we work our way through:->.*/
+
+/* Note: this routine returns extra data bits for size detection. */
+static unsigned __init tulip_read_eeprom(void *regs, int location, int addr_len)
+{
+ int i;
+ unsigned retval = 0;
+ void *ee_addr = regs + ROMCmd;
+ int read_cmd = location | (EE_READ_CMD << addr_len);
+
+ writel(EE_ENB & ~EE_CS, ee_addr);
+ writel(EE_ENB, ee_addr);
+
+ /* Shift the read command bits out. */
+ for (i = 4 + addr_len; i >= 0; i--) {
+ short dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
+ writel(EE_ENB | dataval, ee_addr);
+ readl(ee_addr);
+ writel(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
+ readl(ee_addr);
+ retval = (retval << 1) | ((readl(ee_addr) & EE_DATA_READ) ? 1 : 0);
+ }
+ writel(EE_ENB, ee_addr);
+ readl(ee_addr);
+
+ for (i = 16; i > 0; i--) {
+ writel(EE_ENB | EE_SHIFT_CLK, ee_addr);
+ readl(ee_addr);
+ retval = (retval << 1) | ((readl(ee_addr) & EE_DATA_READ) ? 1 : 0);
+ writel(EE_ENB, ee_addr);
+ readl(ee_addr);
+ }
+
+ /* Terminate the EEPROM access. */
+ writel(EE_ENB & ~EE_CS, ee_addr);
+ return retval;
+}
+
+static void __init tulip_get_srom_info (struct de_private *de)
+{
+ unsigned i, sa_offset = 0, ofs;
+ u8 ee_data[DE_EEPROM_WORDS * sizeof(u16)];
+ unsigned ee_addr_size = tulip_read_eeprom(de->regs, 0xff, 8) & 0x40000 ? 8 : 6;
+ struct de_srom_info_leaf *il;
+
+ /* download entire eeprom */
+ for (i = 0; i < sizeof(ee_data)/2; i++)
+ ((u16 *)ee_data)[i] =
+ le16_to_cpu(tulip_read_eeprom(de->regs, i, ee_addr_size));
+
+ /* DEC now has a specification but early board makers
+ just put the address in the first EEPROM locations. */
+ /* This does memcmp(eedata, eedata+16, 8) */
+ for (i = 0; i < 8; i ++)
+ if (ee_data[i] != ee_data[16+i])
+ sa_offset = 20;
+
+ /* store MAC address */
+ for (i = 0; i < 6; i ++)
+ de->dev->dev_addr[i] = ee_data[i + sa_offset];
+
+ /* get offset of controller 0 info leaf. ignore 2nd byte. */
+ ofs = ee_data[SROMC0InfoLeaf];
+ if (ofs >= (sizeof(ee_data) - sizeof(struct de_srom_info_leaf) - sizeof(struct de_srom_media_block)))
+ goto bad_srom;
+
+ /* get pointer to info leaf */
+ il = (struct de_srom_info_leaf *) &ee_data[ofs];
+
+ /* paranoia checks */
+ if (il->n_blocks == 0)
+ goto bad_srom;
+ if ((sizeof(ee_data) - ofs) <
+ (sizeof(struct de_srom_info_leaf) + (sizeof(struct de_srom_media_block) * il->n_blocks)))
+ goto bad_srom;
+
+ /* get default media type */
+ switch (il->default_media) {
+ case 0x0001: de->media_type = DE_MEDIA_BNC; break;
+ case 0x0002: de->media_type = DE_MEDIA_AUI; break;
+ case 0x0204: de->media_type = DE_MEDIA_TP_FD; break;
+ default: de->media_type = DE_MEDIA_TP_AUTO; break;
+ }
+
+ /* init SIA register values to defaults */
+ for (i = 0; i < DE_MAX_MEDIA; i++) {
+ de->media[i].type = DE_MEDIA_INVALID;
+ de->media[i].csr13 = 0xffff;
+ de->media[i].csr14 = 0xffff;
+ de->media[i].csr15 = 0xffff;
+ }
+
+ /* parse media blocks to see what medias are supported,
+ * and if any custom CSR values are provided
+ */
+ for (i = 0; i < il->n_blocks; i++) {
+ struct de_srom_media_block *ib = &il[i].media[0];
+ unsigned idx;
+
+ /* index based on media type in media block */
+ switch(ib->opts & MediaBlockMask) {
+ case 0: /* 10baseT */
+ de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Half
+ | SUPPORTED_Autoneg;
+ idx = DE_MEDIA_TP;
+ de->media[DE_MEDIA_TP_AUTO].type = DE_MEDIA_TP_AUTO;
+ break;
+ case 1: /* BNC */
+ de->media_supported |= SUPPORTED_BNC;
+ idx = DE_MEDIA_BNC;
+ break;
+ case 2: /* AUI */
+ de->media_supported |= SUPPORTED_AUI;
+ idx = DE_MEDIA_AUI;
+ break;
+ case 4: /* 10baseT-FD */
+ de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Full
+ | SUPPORTED_Autoneg;
+ idx = DE_MEDIA_TP_FD;
+ de->media[DE_MEDIA_TP_AUTO].type = DE_MEDIA_TP_AUTO;
+ break;
+ default:
+ goto bad_srom;
+ }
+
+ if (ib->opts & MediaCustomCSRs) {
+ de->media[idx].csr13 = ib->csr13;
+ de->media[idx].csr14 = ib->csr14;
+ de->media[idx].csr15 = ib->csr15;
+ }
+
+ de->media[idx].type = idx;
+ de->media_advertise |= de->media_supported;
+ }
+
+fill_defaults:
+ /* fill in defaults, for cases where custom CSRs not used */
+ for (i = 0; i < DE_MAX_MEDIA; i++) {
+ if (de->media[i].csr13 == 0xffff)
+ de->media[i].csr13 = t21041_csr13[i];
+ if (de->media[i].csr14 == 0xffff)
+ de->media[i].csr14 = t21041_csr14[i];
+ if (de->media[i].csr15 == 0xffff)
+ de->media[i].csr15 = t21041_csr15[i];
+ }
+
+ if (netif_msg_link(de))
+ printk(KERN_INFO "de%d: SROM-listed ports: %s%s%s\n",
+ de->board_idx,
+ de->media_supported & SUPPORTED_TP ? "TP " : "",
+ de->media_supported & SUPPORTED_BNC ? "BNC " : "",
+ de->media_supported & SUPPORTED_AUI ? "AUI" : "");
+
+ return;
+
+bad_srom:
+ /* for error cases, it's ok to assume we support all these */
+ for (i = 0; i < DE_MAX_MEDIA; i++)
+ de->media[i].type = i;
+ de->media_supported =
+ SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_Autoneg |
+ SUPPORTED_TP |
+ SUPPORTED_AUI |
+ SUPPORTED_BNC;
+ goto fill_defaults;
+}
+
+static int __init de_init_one (struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *dev;
+ struct de_private *de;
+ int rc;
+ void *regs;
+ long pciaddr;
+ static int board_idx = -1;
+
+ board_idx++;
+
+#ifndef MODULE
+ if (board_idx == 0)
+ printk("%s", version);
+#endif
+
+ /* allocate a new ethernet device structure, and fill in defaults */
+ dev = alloc_etherdev(sizeof(struct de_private));
+ if (!dev)
+ return -ENOMEM;
+
+ SET_MODULE_OWNER(dev);
+ dev->open = de_open;
+ dev->stop = de_close;
+ dev->set_multicast_list = de_set_rx_mode;
+ dev->hard_start_xmit = de_start_xmit;
+ dev->get_stats = de_get_stats;
+ dev->do_ioctl = de_ioctl;
+ dev->tx_timeout = de_tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ dev->irq = pdev->irq;
+
+ de = dev->priv;
+ de->de21040 = ent->driver_data == 0 ? 1 : 0;
+ de->pdev = pdev;
+ de->dev = dev;
+ de->msg_enable = (debug < 0 ? DE_DEF_MSG_ENABLE : debug);
+ de->board_idx = board_idx;
+ spin_lock_init (&de->lock);
+ init_timer(&de->media_timer);
+ if (de->de21040)
+ de->media_timer.function = de21040_media_timer;
+ else
+ de->media_timer.function = de21041_media_timer;
+ de->media_timer.data = (unsigned long) de;
+
+ netif_carrier_off(dev);
+ netif_stop_queue(dev);
+
+ /* wake up device, assign resources */
+ rc = pci_enable_device(pdev);
+ if (rc)
+ goto err_out_free;
+
+ /* reserve PCI resources to ensure driver atomicity */
+ rc = pci_request_regions(pdev, DRV_NAME);
+ if (rc)
+ goto err_out_disable;
+
+ /* check for invalid IRQ value */
+ if (pdev->irq < 2) {
+ rc = -EIO;
+ printk(KERN_ERR PFX "invalid irq (%d) for pci dev %s\n",
+ pdev->irq, pdev->slot_name);
+ goto err_out_res;
+ }
+
+ /* obtain and check validity of PCI I/O address */
+ pciaddr = pci_resource_start(pdev, 1);
+ if (!pciaddr) {
+ rc = -EIO;
+ printk(KERN_ERR PFX "no MMIO resource for pci dev %s\n",
+ pdev->slot_name);
+ goto err_out_res;
+ }
+ if (pci_resource_len(pdev, 1) < DE_REGS_SIZE) {
+ rc = -EIO;
+ printk(KERN_ERR PFX "MMIO resource (%lx) too small on pci dev %s\n",
+ pci_resource_len(pdev, 1), pdev->slot_name);
+ goto err_out_res;
+ }
+
+ /* remap CSR registers */
+ regs = ioremap_nocache(pciaddr, DE_REGS_SIZE);
+ if (!regs) {
+ rc = -EIO;
+ printk(KERN_ERR PFX "Cannot map PCI MMIO (%lx@%lx) on pci dev %s\n",
+ pci_resource_len(pdev, 1), pciaddr, pdev->slot_name);
+ goto err_out_res;
+ }
+ dev->base_addr = (unsigned long) regs;
+ de->regs = regs;
+
+ de_adapter_wake(de);
+
+ /* make sure hardware is not running */
+ de_stop_hw(de);
+
+ /* get MAC address, and some register values related to media types */
+ if (de->de21040) {
+ unsigned i;
+
+ de21040_get_mac_address(de);
+
+ de->media_type = DE_MEDIA_TP;
+ de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Full |
+ SUPPORTED_10baseT_Half | SUPPORTED_AUI;
+ de->media_advertise = de->media_supported;
+ for (i = 0; i < DE_MAX_MEDIA; i++) {
+ switch (i) {
+ case DE_MEDIA_AUI:
+ case DE_MEDIA_TP:
+ case DE_MEDIA_TP_FD:
+ de->media[i].type = i;
+ de->media[i].csr13 = t21040_csr13[i];
+ de->media[i].csr14 = t21040_csr14[i];
+ de->media[i].csr15 = t21040_csr15[i];
+ break;
+ default:
+ de->media[i].type = DE_MEDIA_INVALID;
+ break;
+ }
+ }
+ } else {
+ tulip_get_srom_info(de);
+ }
+
+ /* register new network interface with kernel */
+ rc = register_netdev(dev);
+ if (rc)
+ goto err_out_iomap;
+
+ /* print info about board and interface just registered */
+ printk (KERN_INFO "%s: %s at 0x%lx, "
+ "%02x:%02x:%02x:%02x:%02x:%02x, "
+ "IRQ %d\n",
+ dev->name,
+ de->de21040 ? "21040" : "21041",
+ dev->base_addr,
+ dev->dev_addr[0], dev->dev_addr[1],
+ dev->dev_addr[2], dev->dev_addr[3],
+ dev->dev_addr[4], dev->dev_addr[5],
+ dev->irq);
+
+ pci_set_drvdata(pdev, dev);
+
+ /* enable busmastering */
+ pci_set_master(pdev);
+
+ /* put adapter to sleep */
+ de_adapter_sleep(de);
+
+ return 0;
+
+err_out_iomap:
+ iounmap(regs);
+err_out_res:
+ pci_release_regions(pdev);
+err_out_disable:
+ pci_disable_device(pdev);
+err_out_free:
+ kfree(dev);
+ return rc;
+}
+
+static void __exit de_remove_one (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct de_private *de = dev->priv;
+
+ if (!dev)
+ BUG();
+ unregister_netdev(dev);
+ iounmap(de->regs);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ kfree(dev);
+}
+
+#ifdef CONFIG_PM
+
+static int de_suspend (struct pci_dev *pdev, u32 state)
+{
+ struct net_device *dev = pci_get_drvdata (pdev);
+ struct de_private *de = dev->priv;
+
+ rtnl_lock();
+ if (netif_running (dev)) {
+ del_timer_sync(&de->media_timer);
+
+ disable_irq(dev->irq);
+ spin_lock_irq(&de->lock);
+
+ de_stop_hw(de);
+ netif_stop_queue(dev);
+ netif_device_detach(dev);
+ netif_carrier_off(dev);
+
+ spin_unlock_irq(&de->lock);
+ enable_irq(dev->irq);
+
+ /* Update the error counts. */
+ __de_get_stats(de);
+
+ synchronize_irq();
+ de_clean_rings(de);
+
+ de_adapter_sleep(de);
+ pci_disable_device(pdev);
+ } else {
+ netif_device_detach(dev);
+ }
+ rtnl_unlock();
+ return 0;
+}
+
+static int de_resume (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata (pdev);
+ struct de_private *de = dev->priv;
+
+ rtnl_lock();
+ if (netif_device_present(dev))
+ goto out;
+ if (netif_running(dev)) {
+ pci_enable_device(pdev);
+ de_init_hw(de);
+ netif_device_attach(dev);
+ } else {
+ netif_device_attach(dev);
+ }
+out:
+ rtnl_unlock();
+ return 0;
+}
+
+#endif /* CONFIG_PM */
+
+static struct pci_driver de_driver = {
+ name: DRV_NAME,
+ id_table: de_pci_tbl,
+ probe: de_init_one,
+ remove: de_remove_one,
+#ifdef CONFIG_PM
+ suspend: de_suspend,
+ resume: de_resume,
+#endif
+};
+
+static int __init de_init (void)
+{
+#ifdef MODULE
+ printk("%s", version);
+#endif
+ return pci_module_init (&de_driver);
+}
+
+static void __exit de_exit (void)
+{
+ pci_unregister_driver (&de_driver);
+}
+
+module_init(de_init);
+module_exit(de_exit);
diff --git a/drivers/scsi/cpqfc.Readme b/drivers/scsi/cpqfc.Readme
index 71f999c8f..4bb5831e4 100644
--- a/drivers/scsi/cpqfc.Readme
+++ b/drivers/scsi/cpqfc.Readme
@@ -7,6 +7,11 @@ Tested in single and dual HBA configuration, 32 and 64bit busses,
SEST size 512 Exchanges (simultaneous I/Os) limited by module kmalloc()
max of 128k bytes contiguous.
+Ver 2.5.0 Nov 29, 2001
+ * eliminated io_request_lock. This change makes the driver specific
+ to the 2.5.x kernels.
+ * silenced excessively noisy printks.
+
Ver 2.1.1 Oct 18, 2001
* reinitialize Cmnd->SCp.sent_command (used to identify commands as
passthrus) on calling scsi_done, since the scsi mid layer does not
diff --git a/drivers/scsi/cpqfcTScontrol.c b/drivers/scsi/cpqfcTScontrol.c
index cb92e7374..9aaf86c99 100644
--- a/drivers/scsi/cpqfcTScontrol.c
+++ b/drivers/scsi/cpqfcTScontrol.c
@@ -97,11 +97,11 @@ int CpqTsCreateTachLiteQues( void* pHBA, int opcode)
fcChip->Exchanges = NULL;
cpqfcHBAdata->fcLQ = NULL;
- printk("Allocating %u for %u Exchanges ",
- (ULONG)sizeof(FC_EXCHANGES), TACH_MAX_XID);
+ /* printk("Allocating %u for %u Exchanges ",
+ (ULONG)sizeof(FC_EXCHANGES), TACH_MAX_XID); */
fcChip->Exchanges = pci_alloc_consistent(cpqfcHBAdata->PciDev,
sizeof(FC_EXCHANGES), &fcChip->exch_dma_handle);
- printk("@ %p\n", fcChip->Exchanges);
+ /* printk("@ %p\n", fcChip->Exchanges); */
if( fcChip->Exchanges == NULL ) // fatal error!!
{
@@ -112,10 +112,10 @@ int CpqTsCreateTachLiteQues( void* pHBA, int opcode)
memset( fcChip->Exchanges, 0, sizeof( FC_EXCHANGES));
- printk("Allocating %u for LinkQ ", (ULONG)sizeof(FC_LINK_QUE));
+ /* printk("Allocating %u for LinkQ ", (ULONG)sizeof(FC_LINK_QUE)); */
cpqfcHBAdata->fcLQ = pci_alloc_consistent(cpqfcHBAdata->PciDev,
sizeof( FC_LINK_QUE), &cpqfcHBAdata->fcLQ_dma_handle);
- printk("@ %p (%u elements)\n", cpqfcHBAdata->fcLQ, FC_LINKQ_DEPTH);
+ /* printk("@ %p (%u elements)\n", cpqfcHBAdata->fcLQ, FC_LINKQ_DEPTH); */
memset( cpqfcHBAdata->fcLQ, 0, sizeof( FC_LINK_QUE));
if( cpqfcHBAdata->fcLQ == NULL ) // fatal error!!
@@ -222,8 +222,8 @@ int CpqTsCreateTachLiteQues( void* pHBA, int opcode)
// power-of-2 boundary
// LIVE DANGEROUSLY! Assume the boundary for SEST mem will
// be on physical page (e.g. 4k) boundary.
- printk("Allocating %u for TachSEST for %u Exchanges\n",
- (ULONG)sizeof(TachSEST), TACH_SEST_LEN);
+ /* printk("Allocating %u for TachSEST for %u Exchanges\n",
+ (ULONG)sizeof(TachSEST), TACH_SEST_LEN); */
fcChip->SEST = fcMemManager( cpqfcHBAdata->PciDev,
&cpqfcHBAdata->dynamic_mem[0],
sizeof(TachSEST), 4, 0L, &SESTdma );
@@ -289,7 +289,7 @@ int CpqTsCreateTachLiteQues( void* pHBA, int opcode)
// set the Host's pointer for Tachyon to access
- printk(" cpqfcTS: writing IMQ BASE %Xh ", fcChip->IMQ->base );
+ /* printk(" cpqfcTS: writing IMQ BASE %Xh ", fcChip->IMQ->base ); */
writel( fcChip->IMQ->base,
(fcChip->Registers.ReMapMemBase + IMQ_BASE));
@@ -315,9 +315,9 @@ int CpqTsCreateTachLiteQues( void* pHBA, int opcode)
return -1; // failed
}
#endif
-//#if DBG
+#if DBG
printk(" PI %Xh\n", (ULONG)ulAddr );
-//#endif
+#endif
writel( (ULONG)ulAddr,
(fcChip->Registers.ReMapMemBase + IMQ_PRODUCER_INDEX));
@@ -337,9 +337,9 @@ int CpqTsCreateTachLiteQues( void* pHBA, int opcode)
writel( fcChip->SEST->base,
(fcChip->Registers.ReMapMemBase + TL_MEM_SEST_BASE));
- printk(" cpqfcTS: SEST %p(virt): Wrote base %Xh @ %p\n",
+ /* printk(" cpqfcTS: SEST %p(virt): Wrote base %Xh @ %p\n",
fcChip->SEST, fcChip->SEST->base,
- fcChip->Registers.ReMapMemBase + TL_MEM_SEST_BASE);
+ fcChip->Registers.ReMapMemBase + TL_MEM_SEST_BASE); */
writel( fcChip->SEST->length,
(fcChip->Registers.ReMapMemBase + TL_MEM_SEST_LENGTH));
@@ -1723,7 +1723,7 @@ int CpqTsInitializeTachLite( void *pHBA, int opcode1, int opcode2)
UCHAR Minor = (UCHAR)(RevId & 0x3);
UCHAR Major = (UCHAR)((RevId & 0x1C) >>2);
- printk(" HBA Tachyon RevId %d.%d\n", Major, Minor);
+ /* printk(" HBA Tachyon RevId %d.%d\n", Major, Minor); */
if( (Major == 1) && (Minor == 2) )
{
sprintf( cpqfcHBAdata->fcChip.Name, STACHLITE66_TS12);
diff --git a/drivers/scsi/cpqfcTSinit.c b/drivers/scsi/cpqfcTSinit.c
index 6cc7e178a..aad2a1a42 100644
--- a/drivers/scsi/cpqfcTSinit.c
+++ b/drivers/scsi/cpqfcTSinit.c
@@ -188,7 +188,7 @@ static void Cpqfc_initHBAdata( CPQFCHBA *cpqfcHBAdata, struct pci_dev *PciDev )
DEBUG_PCI(printk(" IOBaseU = %x\n",
cpqfcHBAdata->fcChip.Registers.IOBaseU));
- printk(" ioremap'd Membase: %p\n", cpqfcHBAdata->fcChip.Registers.ReMapMemBase);
+ /* printk(" ioremap'd Membase: %p\n", cpqfcHBAdata->fcChip.Registers.ReMapMemBase); */
DEBUG_PCI(printk(" SFQconsumerIndex.address = %p\n",
cpqfcHBAdata->fcChip.Registers.SFQconsumerIndex.address));
@@ -242,7 +242,7 @@ static void launch_FCworker_thread(struct Scsi_Host *HostAdapter)
cpqfcHBAdata->notify_wt = &sem;
/* must unlock before kernel_thread(), for it may cause a reschedule. */
- spin_unlock_irq(&io_request_lock);
+ spin_unlock_irq(&HostAdapter->host_lock);
kernel_thread((int (*)(void *))cpqfcTSWorkerThread,
(void *) HostAdapter, 0);
/*
@@ -250,7 +250,7 @@ static void launch_FCworker_thread(struct Scsi_Host *HostAdapter)
*/
down (&sem);
- spin_lock_irq(&io_request_lock);
+ spin_lock_irq(&HostAdapter->host_lock);
cpqfcHBAdata->notify_wt = NULL;
LEAVE("launch_FC_worker_thread");
@@ -312,8 +312,8 @@ int cpqfcTS_detect(Scsi_Host_Template *ScsiHostTemplate)
}
// NOTE: (kernel 2.2.12-32) limits allocation to 128k bytes...
- printk(" scsi_register allocating %d bytes for FC HBA\n",
- (ULONG)sizeof(CPQFCHBA));
+ /* printk(" scsi_register allocating %d bytes for FC HBA\n",
+ (ULONG)sizeof(CPQFCHBA)); */
HostAdapter = scsi_register( ScsiHostTemplate, sizeof( CPQFCHBA ) );
@@ -403,9 +403,11 @@ int cpqfcTS_detect(Scsi_Host_Template *ScsiHostTemplate)
DEBUG_PCI(printk(" Requesting 255 I/O addresses @ %x\n",
cpqfcHBAdata->fcChip.Registers.IOBaseU ));
-
+
+
// start our kernel worker thread
+ spin_lock_irq(&HostAdapter->host_lock);
launch_FCworker_thread(HostAdapter);
@@ -445,15 +447,16 @@ int cpqfcTS_detect(Scsi_Host_Template *ScsiHostTemplate)
unsigned long stop_time;
- spin_unlock_irq(&io_request_lock);
+ spin_unlock_irq(&HostAdapter->host_lock);
stop_time = jiffies + 4*HZ;
while ( time_before(jiffies, stop_time) )
schedule(); // (our worker task needs to run)
- spin_lock_irq(&io_request_lock);
}
+ spin_lock_irq(&HostAdapter->host_lock);
NumberOfAdapters++;
+ spin_unlock_irq(&HostAdapter->host_lock);
} // end of while()
}
@@ -1593,9 +1596,9 @@ int cpqfcTS_eh_device_reset(Scsi_Cmnd *Cmnd)
int retval;
Scsi_Device *SDpnt = Cmnd->device;
// printk(" ENTERING cpqfcTS_eh_device_reset() \n");
- spin_unlock_irq(&io_request_lock);
+ spin_unlock_irq(&Cmnd->host->host_lock);
retval = cpqfcTS_TargetDeviceReset( SDpnt, 0);
- spin_lock_irq(&io_request_lock);
+ spin_lock_irq(&Cmnd->host->host_lock);
return retval;
}
@@ -1650,8 +1653,7 @@ void cpqfcTS_intr_handler( int irq,
UCHAR IntPending;
ENTER("intr_handler");
-
- spin_lock_irqsave( &io_request_lock, flags);
+ spin_lock_irqsave( &HostAdapter->host_lock, flags);
// is this our INT?
IntPending = readb( cpqfcHBA->fcChip.Registers.INTPEND.address);
@@ -1700,7 +1702,7 @@ void cpqfcTS_intr_handler( int irq,
}
}
}
- spin_unlock_irqrestore( &io_request_lock, flags);
+ spin_unlock_irqrestore( &HostAdapter->host_lock, flags);
LEAVE("intr_handler");
}
diff --git a/drivers/scsi/cpqfcTSstructs.h b/drivers/scsi/cpqfcTSstructs.h
index 0bdce7d83..29a0fe819 100644
--- a/drivers/scsi/cpqfcTSstructs.h
+++ b/drivers/scsi/cpqfcTSstructs.h
@@ -32,8 +32,8 @@
#define CPQFCTS_DRIVER_VER(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
// don't forget to also change MODULE_DESCRIPTION in cpqfcTSinit.c
#define VER_MAJOR 2
-#define VER_MINOR 1
-#define VER_SUBMINOR 1
+#define VER_MINOR 5
+#define VER_SUBMINOR 0
// Macros for kernel (esp. SMP) tracing using a PCI analyzer
// (e.g. x86).
diff --git a/drivers/scsi/cpqfcTSworker.c b/drivers/scsi/cpqfcTSworker.c
index ad94adb68..f698dc3be 100644
--- a/drivers/scsi/cpqfcTSworker.c
+++ b/drivers/scsi/cpqfcTSworker.c
@@ -227,7 +227,7 @@ void cpqfcTSWorkerThread( void *host)
PCI_TRACE( 0x90)
// first, take the IO lock so the SCSI upper layers can't call
// into our _quecommand function (this also disables INTs)
- spin_lock_irqsave( &io_request_lock, flags); // STOP _que function
+ spin_lock_irqsave( &HostAdapter->host_lock, flags); // STOP _que function
PCI_TRACE( 0x90)
CPQ_SPINLOCK_HBA( cpqfcHBAdata)
@@ -241,7 +241,7 @@ void cpqfcTSWorkerThread( void *host)
PCI_TRACE( 0x90)
// release the IO lock (and re-enable interrupts)
- spin_unlock_irqrestore( &io_request_lock, flags);
+ spin_unlock_irqrestore( &HostAdapter->host_lock, flags);
// disable OUR HBA interrupt (keep them off as much as possible
// during error recovery)
@@ -3077,7 +3077,8 @@ void cpqfcTSheartbeat( unsigned long ptr )
if( cpqfcHBAdata->BoardLock) // Worker Task Running?
goto Skip;
- spin_lock_irqsave( &io_request_lock, flags); // STOP _que function
+ // STOP _que function
+ spin_lock_irqsave( &cpqfcHBAdata->HostAdapter->host_lock, flags);
PCI_TRACE( 0xA8)
@@ -3085,7 +3086,7 @@ void cpqfcTSheartbeat( unsigned long ptr )
cpqfcHBAdata->BoardLock = &BoardLock; // stop Linux SCSI command queuing
// release the IO lock (and re-enable interrupts)
- spin_unlock_irqrestore( &io_request_lock, flags);
+ spin_unlock_irqrestore( &cpqfcHBAdata->HostAdapter->host_lock, flags);
// Ensure no contention from _quecommand or Worker process
CPQ_SPINLOCK_HBA( cpqfcHBAdata)
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c
index d52e503fb..b7dbed8af 100644
--- a/drivers/scsi/ide-scsi.c
+++ b/drivers/scsi/ide-scsi.c
@@ -242,7 +242,7 @@ static inline void idescsi_free_bio (struct bio *bio)
while (bio) {
bhp = bio;
bio = bio->bi_next;
- kfree (bhp);
+ bio_put(bhp);
}
}
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index db5ec9c27..3713c3284 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -181,18 +181,23 @@ void scsi_build_commandblocks(Scsi_Device * SDpnt);
void scsi_initialize_queue(Scsi_Device * SDpnt, struct Scsi_Host * SHpnt)
{
request_queue_t *q = &SDpnt->request_queue;
+ int max_segments = SHpnt->sg_tablesize;
blk_init_queue(q, scsi_request_fn);
q->queuedata = (void *) SDpnt;
+
#ifdef DMA_CHUNK_SIZE
- blk_queue_max_segments(q, 64);
-#else
- blk_queue_max_segments(q, SHpnt->sg_tablesize);
+ if (max_segments > 64)
+ max_segments = 64;
#endif
+
+ blk_queue_max_segments(q, max_segments);
blk_queue_max_sectors(q, SHpnt->max_sectors);
if (!SHpnt->use_clustering)
clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
+ if (SHpnt->unchecked_isa_dma)
+ blk_queue_segment_boundary(q, ISA_DMA_THRESHOLD);
}
#ifdef MODULE
diff --git a/drivers/scsi/scsi_merge.c b/drivers/scsi/scsi_merge.c
index 734ff2ebe..8710d97d5 100644
--- a/drivers/scsi/scsi_merge.c
+++ b/drivers/scsi/scsi_merge.c
@@ -189,25 +189,23 @@ __inline static int __count_segments(struct request *req,
void
recount_segments(Scsi_Cmnd * SCpnt)
{
- struct request *req;
- struct Scsi_Host *SHpnt;
- Scsi_Device * SDpnt;
-
- req = &SCpnt->request;
- SHpnt = SCpnt->host;
- SDpnt = SCpnt->device;
+ struct request *req = &SCpnt->request;
+ struct Scsi_Host *SHpnt = SCpnt->host;
req->nr_segments = __count_segments(req, SHpnt->unchecked_isa_dma,NULL);
}
+/*
+ * IOMMU hackery for sparc64
+ */
+#ifdef DMA_CHUNK_SIZE
+
#define MERGEABLE_BUFFERS(X,Y) \
-((((bio_to_phys((X))+(X)->bi_size)|(bio_to_phys((Y)))) & \
- (DMA_CHUNK_SIZE - 1)) == 0)
+ ((((bvec_to_phys(__BVEC_END((X))) + __BVEC_END((X))->bv_len) | bio_to_phys((Y))) & (DMA_CHUNK_SIZE - 1)) == 0)
-#ifdef DMA_CHUNK_SIZE
static inline int scsi_new_mergeable(request_queue_t * q,
struct request * req,
- struct Scsi_Host *SHpnt)
+ int nr_segs)
{
/*
* pci_map_sg will be able to merge these two
@@ -216,49 +214,51 @@ static inline int scsi_new_mergeable(request_queue_t * q,
* scsi.c allocates for this purpose
* min(64,sg_tablesize) entries.
*/
- if (req->nr_segments >= q->max_segments)
+ if (req->nr_segments + nr_segs > q->max_segments)
return 0;
- req->nr_segments++;
+ req->nr_segments += nr_segs;
return 1;
}
static inline int scsi_new_segment(request_queue_t * q,
struct request * req,
- struct bio *bio)
+ struct bio *bio, int nr_segs)
{
/*
* pci_map_sg won't be able to map these two
* into a single hardware sg entry, so we have to
* check if things fit into sg_tablesize.
*/
- if (req->nr_hw_segments >= q->max_segments)
+ if (req->nr_hw_segments + nr_segs > q->max_segments)
return 0;
- else if (req->nr_segments + bio->bi_vcnt > q->max_segments)
+ else if (req->nr_segments + nr_segs > q->max_segments)
return 0;
- req->nr_hw_segments += bio->bi_vcnt;
- req->nr_segments += bio->bi_vcnt;
+ req->nr_hw_segments += nr_segs;
+ req->nr_segments += nr_segs;
return 1;
}
-#else
+#else /* DMA_CHUNK_SIZE */
static inline int scsi_new_segment(request_queue_t * q,
struct request * req,
- struct bio *bio)
+ struct bio *bio, int nr_segs)
{
- if (req->nr_segments + bio->bi_vcnt > q->max_segments)
+ if (req->nr_segments + nr_segs > q->max_segments) {
+ req->flags |= REQ_NOMERGE;
return 0;
+ }
/*
* This will form the start of a new segment. Bump the
* counter.
*/
- req->nr_segments += bio->bi_vcnt;
+ req->nr_segments += nr_segs;
return 1;
}
-#endif
+#endif /* DMA_CHUNK_SIZE */
/*
* Function: __scsi_merge_fn()
@@ -294,36 +294,47 @@ static inline int scsi_new_segment(request_queue_t * q,
*/
__inline static int __scsi_back_merge_fn(request_queue_t * q,
struct request *req,
- struct bio *bio,
- int dma_host)
+ struct bio *bio)
{
- if (req->nr_sectors + bio_sectors(bio) > q->max_sectors)
- return 0;
- else if (!BIO_SEG_BOUNDARY(q, req->biotail, bio))
+ int bio_segs;
+
+ if (req->nr_sectors + bio_sectors(bio) > q->max_sectors) {
+ req->flags |= REQ_NOMERGE;
return 0;
+ }
+
+ bio_segs = bio_hw_segments(q, bio);
+ if (blk_contig_segment(q, req->biotail, bio))
+ bio_segs--;
#ifdef DMA_CHUNK_SIZE
- if (MERGEABLE_BUFFERS(req->biotail, bio))
- return scsi_new_mergeable(q, req, q->queuedata);
+ if (MERGEABLE_BUFFERS(bio, req->bio))
+ return scsi_new_mergeable(q, req, bio_segs);
#endif
- return scsi_new_segment(q, req, bio);
+
+ return scsi_new_segment(q, req, bio, bio_segs);
}
__inline static int __scsi_front_merge_fn(request_queue_t * q,
struct request *req,
- struct bio *bio,
- int dma_host)
+ struct bio *bio)
{
- if (req->nr_sectors + bio_sectors(bio) > q->max_sectors)
- return 0;
- else if (!BIO_SEG_BOUNDARY(q, bio, req->bio))
+ int bio_segs;
+
+ if (req->nr_sectors + bio_sectors(bio) > q->max_sectors) {
+ req->flags |= REQ_NOMERGE;
return 0;
+ }
+
+ bio_segs = bio_hw_segments(q, bio);
+ if (blk_contig_segment(q, req->biotail, bio))
+ bio_segs--;
#ifdef DMA_CHUNK_SIZE
if (MERGEABLE_BUFFERS(bio, req->bio))
- return scsi_new_mergeable(q, req, q->queuedata);
+ return scsi_new_mergeable(q, req, bio_segs);
#endif
- return scsi_new_segment(q, req, bio);
+ return scsi_new_segment(q, req, bio, bio_segs);
}
/*
@@ -343,7 +354,7 @@ __inline static int __scsi_front_merge_fn(request_queue_t * q,
* Notes: Optimized for different cases depending upon whether
* ISA DMA is in use and whether clustering should be used.
*/
-#define MERGEFCT(_FUNCTION, _BACK_FRONT, _DMA) \
+#define MERGEFCT(_FUNCTION, _BACK_FRONT) \
static int _FUNCTION(request_queue_t * q, \
struct request * req, \
struct bio *bio) \
@@ -351,16 +362,12 @@ static int _FUNCTION(request_queue_t * q, \
int ret; \
ret = __scsi_ ## _BACK_FRONT ## _merge_fn(q, \
req, \
- bio, \
- _DMA); \
+ bio); \
return ret; \
}
-MERGEFCT(scsi_back_merge_fn_, back, 0)
-MERGEFCT(scsi_back_merge_fn_d, back, 1)
-
-MERGEFCT(scsi_front_merge_fn_, front, 0)
-MERGEFCT(scsi_front_merge_fn_d, front, 1)
+MERGEFCT(scsi_back_merge_fn, back)
+MERGEFCT(scsi_front_merge_fn, front)
/*
* Function: __scsi_merge_requests_fn()
@@ -390,8 +397,7 @@ __inline static int __scsi_merge_requests_fn(request_queue_t * q,
struct request *next,
int dma_host)
{
- Scsi_Device *SDpnt;
- struct Scsi_Host *SHpnt;
+ int bio_segs;
/*
* First check if the either of the requests are re-queued
@@ -399,69 +405,44 @@ __inline static int __scsi_merge_requests_fn(request_queue_t * q,
*/
if (req->special || next->special)
return 0;
- else if (!BIO_SEG_BOUNDARY(q, req->biotail, next->bio))
- return 0;
-
- SDpnt = (Scsi_Device *) q->queuedata;
- SHpnt = SDpnt->host;
-#ifdef DMA_CHUNK_SIZE
- /* If it would not fit into prepared memory space for sg chain,
- * then don't allow the merge.
+ /*
+ * will become to large?
*/
- if (req->nr_segments + next->nr_segments - 1 > q->max_segments)
+ if ((req->nr_sectors + next->nr_sectors) > q->max_sectors)
return 0;
- if (req->nr_hw_segments + next->nr_hw_segments - 1 > q->max_segments)
- return 0;
-#else
+ bio_segs = req->nr_segments + next->nr_segments;
+ if (blk_contig_segment(q, req->biotail, next->bio))
+ bio_segs--;
+
/*
- * If the two requests together are too large (even assuming that we
- * can merge the boundary requests into one segment, then don't
- * allow the merge.
+ * exceeds our max allowed segments?
*/
- if (req->nr_segments + next->nr_segments - 1 > q->max_segments) {
- return 0;
- }
-#endif
-
- if ((req->nr_sectors + next->nr_sectors) > SHpnt->max_sectors)
+ if (bio_segs > q->max_segments)
return 0;
#ifdef DMA_CHUNK_SIZE
- if (req->nr_segments + next->nr_segments > q->max_segments)
- return 0;
+ bio_segs = req->nr_hw_segments + next->nr_hw_segments;
+ if (blk_contig_segment(q, req->biotail, next->bio))
+ bio_segs--;
/* If dynamic DMA mapping can merge last segment in req with
* first segment in next, then the check for hw segments was
* done above already, so we can always merge.
*/
- if (MERGEABLE_BUFFERS(req->biotail, next->bio)) {
- req->nr_hw_segments += next->nr_hw_segments - 1;
- } else if (req->nr_hw_segments + next->nr_hw_segments > q->max_segments) {
+ if (bio_segs > q->max_segments)
return 0;
- } else {
- req->nr_hw_segments += next->nr_hw_segments;
- }
- req->nr_segments += next->nr_segments;
- return 1;
-#else
+
+ req->nr_hw_segments = bio_segs;
+#endif
+
/*
- * We know that the two requests at the boundary should not be combined.
- * Make sure we can fix something that is the sum of the two.
- * A slightly stricter test than we had above.
+ * This will form the start of a new segment. Bump the
+ * counter.
*/
- if (req->nr_segments + next->nr_segments > q->max_segments) {
- return 0;
- } else {
- /*
- * This will form the start of a new segment. Bump the
- * counter.
- */
- req->nr_segments += next->nr_segments;
- return 1;
- }
-#endif
+ req->nr_segments = bio_segs;
+ return 1;
}
/*
@@ -530,7 +511,6 @@ __inline static int __init_io(Scsi_Cmnd * SCpnt,
int dma_host)
{
struct bio * bio;
- struct bio * bioprev;
char * buff;
int count;
int i;
@@ -603,7 +583,6 @@ __inline static int __init_io(Scsi_Cmnd * SCpnt,
SCpnt->request_buffer = (char *) sgpnt;
SCpnt->request_bufflen = 0;
req->buffer = NULL;
- bioprev = NULL;
if (dma_host)
bbpnt = (void **) ((char *)sgpnt +
@@ -833,13 +812,13 @@ void initialize_merge_fn(Scsi_Device * SDpnt)
* rather than rely upon the default behavior of ll_rw_blk.
*/
if (SHpnt->unchecked_isa_dma == 0) {
- q->back_merge_fn = scsi_back_merge_fn_;
- q->front_merge_fn = scsi_front_merge_fn_;
+ q->back_merge_fn = scsi_back_merge_fn;
+ q->front_merge_fn = scsi_front_merge_fn;
q->merge_requests_fn = scsi_merge_requests_fn_;
SDpnt->scsi_init_io_fn = scsi_init_io_v;
} else {
- q->back_merge_fn = scsi_back_merge_fn_d;
- q->front_merge_fn = scsi_front_merge_fn_d;
+ q->back_merge_fn = scsi_back_merge_fn;
+ q->front_merge_fn = scsi_front_merge_fn;
q->merge_requests_fn = scsi_merge_requests_fn_d;
SDpnt->scsi_init_io_fn = scsi_init_io_vd;
}
diff --git a/drivers/sound/via82cxxx_audio.c b/drivers/sound/via82cxxx_audio.c
index d338c35be..a860d2889 100644
--- a/drivers/sound/via82cxxx_audio.c
+++ b/drivers/sound/via82cxxx_audio.c
@@ -1,6 +1,6 @@
/*
* Support for VIA 82Cxxx Audio Codecs
- * Copyright 1999,2000 Jeff Garzik <jgarzik@mandrakesoft.com>
+ * Copyright 1999,2000 Jeff Garzik
*
* Distributed under the GNU GENERAL PUBLIC LICENSE (GPL) Version 2.
* See the "COPYING" file distributed with this software for more info.
@@ -8,9 +8,6 @@
* For a list of known bugs (errata) and documentation,
* see via-audio.pdf in linux/Documentation/DocBook.
* If this documentation does not exist, run "make pdfdocs".
- * If "make pdfdocs" fails, obtain the documentation from
- * the driver's Website at
- * http://gtf.org/garzik/drivers/via82cxxx/
*
*/
@@ -3357,7 +3354,7 @@ static void __exit cleanup_via82cxxx_audio(void)
module_init(init_via82cxxx_audio);
module_exit(cleanup_via82cxxx_audio);
-MODULE_AUTHOR("Jeff Garzik <jgarzik@mandrakesoft.com>");
+MODULE_AUTHOR("Jeff Garzik");
MODULE_DESCRIPTION("DSP audio and mixer driver for Via 82Cxxx audio devices");
MODULE_LICENSE("GPL");
diff --git a/fs/bio.c b/fs/bio.c
index f2e5fb160..f53efc19a 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -33,6 +33,7 @@
#include <linux/compiler.h>
#include <asm/uaccess.h>
+#include <asm/io.h>
kmem_cache_t *bio_cachep;
static spinlock_t __cacheline_aligned bio_lock = SPIN_LOCK_UNLOCKED;
@@ -53,7 +54,8 @@ static struct biovec_pool bvec_list[BIOVEC_NR_POOLS];
/*
* if you change this list, also change bvec_alloc or things will
- * break badly!
+ * break badly! cannot be bigger than what you can fit into an
+ * unsigned short
*/
static const int bvec_pool_sizes[BIOVEC_NR_POOLS] = { 1, 4, 16, 64, 128, 256 };
@@ -204,6 +206,7 @@ inline void bio_init(struct bio *bio)
bio->bi_rw = 0;
bio->bi_vcnt = 0;
bio->bi_idx = 0;
+ bio->bi_hw_seg = 0;
bio->bi_size = 0;
bio->bi_end_io = NULL;
atomic_set(&bio->bi_cnt, 1);
@@ -312,6 +315,14 @@ void bio_put(struct bio *bio)
bio_free(bio);
}
+inline int bio_hw_segments(request_queue_t *q, struct bio *bio)
+{
+ if (unlikely(!(bio->bi_flags & BIO_SEG_VALID)))
+ blk_recount_segments(q, bio);
+
+ return bio->bi_hw_seg;
+}
+
/**
* __bio_clone - clone a bio
* @bio: destination bio
@@ -331,11 +342,15 @@ inline void __bio_clone(struct bio *bio, struct bio *bio_src)
bio->bi_rw = bio_src->bi_rw;
/*
- * notes -- maybe just leave bi_idx alone. bi_max has no used
- * on a cloned bio
+ * notes -- maybe just leave bi_idx alone. bi_max has no use
+ * on a cloned bio. assume identical mapping for the clone
*/
bio->bi_vcnt = bio_src->bi_vcnt;
bio->bi_idx = bio_src->bi_idx;
+ if (bio_src->bi_flags & (1 << BIO_SEG_VALID)) {
+ bio->bi_hw_seg = bio_src->bi_hw_seg;
+ bio->bi_flags |= (1 << BIO_SEG_VALID);
+ }
bio->bi_size = bio_src->bi_size;
bio->bi_max = bio_src->bi_max;
}
@@ -387,8 +402,15 @@ struct bio *bio_copy(struct bio *bio, int gfp_mask, int copy)
if (bbv->bv_page == NULL)
goto oom;
+ bbv->bv_len = bv->bv_len;
+ bbv->bv_offset = bv->bv_offset;
+
+ /*
+ * if doing a copy for a READ request, no need
+ * to memcpy page data
+ */
if (!copy)
- goto fill_in;
+ continue;
if (gfp_mask & __GFP_WAIT) {
vfrom = kmap(bv->bv_page);
@@ -408,10 +430,6 @@ struct bio *bio_copy(struct bio *bio, int gfp_mask, int copy)
kunmap_atomic(vfrom, KM_BIO_IRQ);
local_irq_restore(flags);
}
-
-fill_in:
- bbv->bv_len = bv->bv_len;
- bbv->bv_offset = bv->bv_offset;
}
b->bi_sector = bio->bi_sector;
@@ -595,9 +613,6 @@ next_chunk:
}
queue_io:
- if (bio->bi_vcnt > 1)
- bio->bi_flags |= 1 << BIO_PREBUILT;
-
submit_bio(rw, bio);
if (total_nr_pages)
diff --git a/fs/buffer.c b/fs/buffer.c
index 5ede8005e..405e81410 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -2005,12 +2005,12 @@ int generic_direct_IO(int rw, struct inode * inode, struct kiobuf * iobuf, unsig
{
int i, nr_blocks, retval;
sector_t *blocks = iobuf->blocks;
+ struct buffer_head bh;
+ bh.b_dev = inode->i_dev;
nr_blocks = iobuf->length / blocksize;
/* build the blocklist */
for (i = 0; i < nr_blocks; i++, blocknr++) {
- struct buffer_head bh;
-
bh.b_state = 0;
bh.b_dev = inode->i_dev;
bh.b_size = blocksize;
@@ -2036,7 +2036,8 @@ int generic_direct_IO(int rw, struct inode * inode, struct kiobuf * iobuf, unsig
blocks[i] = bh.b_blocknr;
}
- retval = brw_kiovec(rw, 1, &iobuf, inode->i_dev, blocks, blocksize);
+ /* This does not understand multi-device filesystems currently */
+ retval = brw_kiovec(rw, 1, &iobuf, bh.b_dev, blocks, blocksize);
out:
return retval;
diff --git a/fs/dcache.c b/fs/dcache.c
index f04948742..d4379a0ee 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -543,7 +543,7 @@ void shrink_dcache_parent(struct dentry * parent)
* too much.
*
* Priority:
- * 0 - very urgent: shrink everything
+ * 1 - very urgent: shrink everything
* ...
* 6 - base-level: try to shrink a bit.
*/
diff --git a/fs/dquot.c b/fs/dquot.c
index 0f41e8eeb..b90fe6146 100644
--- a/fs/dquot.c
+++ b/fs/dquot.c
@@ -408,10 +408,25 @@ static void prune_dqcache(int count)
}
}
+/*
+ * This is called from kswapd when we think we need some
+ * more memory, but aren't really sure how much. So we
+ * carefully try to free a _bit_ of our dqcache, but not
+ * too much.
+ *
+ * Priority:
+ * 1 - very urgent: shrink everything
+ * ...
+ * 6 - base-level: try to shrink a bit.
+ */
+
int shrink_dqcache_memory(int priority, unsigned int gfp_mask)
{
+ int count = 0;
+
lock_kernel();
- prune_dqcache(nr_free_dquots / (priority + 1));
+ count = nr_free_dquots / priority;
+ prune_dqcache(count);
unlock_kernel();
kmem_cache_shrink(dquot_cachep);
return 0;
diff --git a/fs/inode.c b/fs/inode.c
index 10bbf20f3..184f8f19d 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -707,7 +707,17 @@ void prune_icache(int goal)
if (goal)
schedule_task(&unused_inodes_flush_task);
}
-
+/*
+ * This is called from kswapd when we think we need some
+ * more memory, but aren't really sure how much. So we
+ * carefully try to free a _bit_ of our icache, but not
+ * too much.
+ *
+ * Priority:
+ * 1 - very urgent: shrink everything
+ * ...
+ * 6 - base-level: try to shrink a bit.
+ */
int shrink_icache_memory(int priority, int gfp_mask)
{
int count = 0;
diff --git a/fs/ntfs/Makefile b/fs/ntfs/Makefile
index c178a61d6..d331eb6f9 100644
--- a/fs/ntfs/Makefile
+++ b/fs/ntfs/Makefile
@@ -5,7 +5,7 @@ O_TARGET := ntfs.o
obj-y := fs.o sysctl.o support.o util.o inode.o dir.o super.o attr.o unistr.o
obj-m := $(O_TARGET)
# New version format started 3 February 2001.
-EXTRA_CFLAGS = -DNTFS_VERSION=\"1.1.20\" #-DDEBUG
+EXTRA_CFLAGS = -DNTFS_VERSION=\"1.1.21\" #-DDEBUG
include $(TOPDIR)/Rules.make
diff --git a/fs/ntfs/attr.c b/fs/ntfs/attr.c
index 98b314a08..b64aee7e1 100644
--- a/fs/ntfs/attr.c
+++ b/fs/ntfs/attr.c
@@ -548,7 +548,7 @@ int ntfs_create_attr(ntfs_inode *ino, int anum, char *aname, void *data,
* attribute.
*/
static int ntfs_process_runs(ntfs_inode *ino, ntfs_attribute* attr,
- unsigned char *data)
+ unsigned char *data)
{
int startvcn, endvcn;
int vcn, cnum;
@@ -622,7 +622,7 @@ static int ntfs_process_runs(ntfs_inode *ino, ntfs_attribute* attr,
}
/* Insert the attribute starting at attr in the inode ino. */
-int ntfs_insert_attribute(ntfs_inode *ino, unsigned char* attrdata)
+int ntfs_insert_attribute(ntfs_inode *ino, unsigned char *attrdata)
{
int i, found;
int type;
diff --git a/fs/ntfs/fs.c b/fs/ntfs/fs.c
index fe14088a7..4533c63dc 100644
--- a/fs/ntfs/fs.c
+++ b/fs/ntfs/fs.c
@@ -303,10 +303,8 @@ static int ntfs_readdir(struct file* filp, void *dirent, filldir_t filldir)
ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): After ntfs_getdir_unsorted()"
" calls, f_pos 0x%Lx.\n", filp->f_pos);
if (!err) {
-#ifdef DEBUG
- if (cb.ph != 0x7fff || cb.pl)
- BUG();
done:
+#ifdef DEBUG
if (!cb.ret_code)
ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): EOD, f_pos "
"0x%Lx, returning 0.\n", filp->f_pos);
@@ -314,8 +312,6 @@ done:
ntfs_debug(DEBUG_OTHER, __FUNCTION__ "(): filldir "
"returned %i, returning 0, f_pos "
"0x%Lx.\n", cb.ret_code, filp->f_pos);
-#else
-done:
#endif
return 0;
}
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index c55dc38dc..108465acc 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -159,7 +159,7 @@ static int ntfs_insert_mft_attributes(ntfs_inode* ino, char *mft, int mftno)
* Return 0 on success or -errno on error.
*/
static int ntfs_insert_mft_attribute(ntfs_inode* ino, int mftno,
- ntfs_u8 *attr)
+ ntfs_u8 *attr)
{
int i, error, present = 0;
@@ -207,6 +207,7 @@ static int parse_attributes(ntfs_inode *ino, ntfs_u8 *alist, int *plen)
int mftno, l, error;
int last_mft = -1;
int len = *plen;
+ int tries = 0;
if (!ino->attr) {
ntfs_error("parse_attributes: called on inode 0x%x without a "
@@ -230,9 +231,13 @@ static int parse_attributes(ntfs_inode *ino, ntfs_u8 *alist, int *plen)
* then occur there or the user notified to run
* ntfsck. (AIA) */
if (mftno != ino->i_number && mftno != last_mft) {
+continue_after_loading_mft_data:
last_mft = mftno;
error = ntfs_read_mft_record(ino->vol, mftno, mft);
if (error) {
+ if (error == -EINVAL && !tries)
+ goto force_load_mft_data;
+failed_reading_mft_data:
ntfs_debug(DEBUG_FILE3, "parse_attributes: "
"ntfs_read_mft_record(mftno = 0x%x) "
"failed\n", mftno);
@@ -272,9 +277,106 @@ static int parse_attributes(ntfs_inode *ino, ntfs_u8 *alist, int *plen)
ntfs_free(mft);
*plen = len;
return 0;
+force_load_mft_data:
+{
+ ntfs_u8 *mft2, *attr2;
+ int mftno2;
+ int last_mft2 = last_mft;
+ int len2 = len;
+ int error2;
+ int found2 = 0;
+ ntfs_u8 *alist2 = alist;
+ /*
+ * We only get here if $DATA wasn't found in $MFT which only happens
+ * on volume mount when $MFT has an attribute list and there are
+ * attributes before $DATA which are inside extent mft records. So
+ * we just skip forward to the $DATA attribute and read that. Then we
+ * restart which is safe as an attribute will not be inserted twice.
+ *
+ * This still will not fix the case where the attribute list is non-
+ * resident, larger than 1024 bytes, and the $DATA attribute list entry
+ * is not in the first 1024 bytes. FIXME: This should be implemented
+ * somehow! Perhaps by passing special error code up to
+ * ntfs_load_attributes() so it keeps going trying to get to $DATA
+ * regardless. Then it would have to restart just like we do here.
+ */
+ mft2 = ntfs_malloc(ino->vol->mft_record_size);
+ if (!mft2) {
+ ntfs_free(mft);
+ return -ENOMEM;
+ }
+ ntfs_memcpy(mft2, mft, ino->vol->mft_record_size);
+ while (len2 > 8) {
+ l = NTFS_GETU16(alist2 + 4);
+ if (l > len2)
+ break;
+ if (NTFS_GETU32(alist2 + 0x0) < ino->vol->at_data) {
+ len2 -= l;
+ alist2 += l;
+ continue;
+ }
+ if (NTFS_GETU32(alist2 + 0x0) > ino->vol->at_data) {
+ if (found2)
+ break;
+ /* Uh-oh! It really isn't there! */
+ ntfs_error("Either the $MFT is corrupt or, equally "
+ "likely, the $MFT is too complex for "
+ "the current driver to handle. Please "
+ "email the ntfs maintainer that you "
+ "saw this message. Thank you.\n");
+ goto failed_reading_mft_data;
+ }
+ /* Process attribute description. */
+ mftno2 = NTFS_GETU32(alist2 + 0x10);
+ if (mftno2 != ino->i_number && mftno2 != last_mft2) {
+ last_mft2 = mftno2;
+ error2 = ntfs_read_mft_record(ino->vol, mftno2, mft2);
+ if (error2) {
+ ntfs_debug(DEBUG_FILE3, "parse_attributes: "
+ "ntfs_read_mft_record(mftno2 = 0x%x) "
+ "failed\n", mftno2);
+ ntfs_free(mft2);
+ goto failed_reading_mft_data;
+ }
+ }
+ attr2 = ntfs_find_attr_in_mft_rec(
+ ino->vol, /* ntfs volume */
+ mftno2 == ino->i_number ?/* mft record is: */
+ ino->attr: /* base record */
+ mft2, /* extension record */
+ NTFS_GETU32(alist2 + 0), /* type */
+ (wchar_t*)(alist2 + alist2[7]), /* name */
+ alist2[6], /* name length */
+ 1, /* ignore case */
+ NTFS_GETU16(alist2 + 24) /* instance number */
+ );
+ if (!attr2) {
+ ntfs_error("parse_attributes: mft records 0x%x and/or "
+ "0x%x corrupt!\n", ino->i_number,
+ mftno2);
+ ntfs_free(mft2);
+ goto failed_reading_mft_data;
+ }
+ error2 = ntfs_insert_mft_attribute(ino, mftno2, attr2);
+ if (error2) {
+ ntfs_debug(DEBUG_FILE3, "parse_attributes: "
+ "ntfs_insert_mft_attribute(mftno2 0x%x, "
+ "attribute2 type 0x%x) failed\n", mftno2,
+ NTFS_GETU32(alist2 + 0));
+ ntfs_free(mft2);
+ goto failed_reading_mft_data;
+ }
+ len2 -= l;
+ alist2 += l;
+ found2 = 1;
+ }
+ ntfs_free(mft2);
+ tries = 1;
+ goto continue_after_loading_mft_data;
+}
}
-static void ntfs_load_attributes(ntfs_inode* ino)
+static void ntfs_load_attributes(ntfs_inode *ino)
{
ntfs_attribute *alist;
int datasize;
diff --git a/fs/ntfs/support.c b/fs/ntfs/support.c
index 4cf9cff3a..2f290b4de 100644
--- a/fs/ntfs/support.c
+++ b/fs/ntfs/support.c
@@ -150,7 +150,7 @@ int ntfs_read_mft_record(ntfs_volume *vol, int mftno, char *buf)
* now as we just can't handle some on disk structures
* this way. (AIA) */
printk(KERN_WARNING "NTFS: Invalid MFT record for 0x%x\n", mftno);
- return -EINVAL;
+ return -EIO;
}
ntfs_debug(DEBUG_OTHER, "read_mft_record: Done 0x%x\n", mftno);
return 0;
diff --git a/include/asm-i386/io.h b/include/asm-i386/io.h
index 975f0bf61..0c5e61d14 100644
--- a/include/asm-i386/io.h
+++ b/include/asm-i386/io.h
@@ -105,6 +105,13 @@ extern void iounmap(void *addr);
#define page_to_bus page_to_phys
/*
+ * can the hardware map this into one segment or not, given no other
+ * constraints.
+ */
+#define BIOVEC_MERGEABLE(vec1, vec2) \
+ ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
+
+/*
* readX/writeX() are used to access memory mapped devices. On some
* architectures the memory mapped IO stuff needs to be accessed
* differently. On the x86 architecture, we just read/write the
diff --git a/include/asm-sparc64/io.h b/include/asm-sparc64/io.h
index 8ffeb330a..da391b908 100644
--- a/include/asm-sparc64/io.h
+++ b/include/asm-sparc64/io.h
@@ -1,4 +1,4 @@
-/* $Id: io.h,v 1.42 2001-12-11 04:55:54 davem Exp $ */
+/* $Id: io.h,v 1.43 2001-12-11 06:11:53 davem Exp $ */
#ifndef __SPARC64_IO_H
#define __SPARC64_IO_H
@@ -21,6 +21,9 @@ extern unsigned long bus_to_virt_not_defined_use_pci_map(volatile void *addr);
extern unsigned long phys_base;
#define page_to_phys(page) ((((page) - mem_map) << PAGE_SHIFT)+phys_base)
+#define BIOVEC_MERGEABLE(vec1, vec2) \
+ ((((bvec_to_phys((vec1)) + (vec1)->bv_len) | bvec_to_phys((vec2))) & (DMA_CHUNK_SIZE - 1)) == 0)
+
/* Different PCI controllers we support have their PCI MEM space
* mapped to an either 2GB (Psycho) or 4GB (Sabre) aligned area,
* so need to chop off the top 33 or 32 bits.
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 098198393..8c3de39a5 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -57,8 +57,9 @@ struct bio {
* top bits priority
*/
- unsigned int bi_vcnt; /* how many bio_vec's */
- unsigned int bi_idx; /* current index into bvl_vec */
+ unsigned short bi_vcnt; /* how many bio_vec's */
+ unsigned short bi_idx; /* current index into bvl_vec */
+ unsigned short bi_hw_seg; /* actual mapped segments */
unsigned int bi_size; /* total size in bytes */
unsigned int bi_max; /* max bvl_vecs we can hold,
used as index into pool */
@@ -79,7 +80,7 @@ struct bio {
#define BIO_UPTODATE 0 /* ok after I/O completion */
#define BIO_RW_BLOCK 1 /* RW_AHEAD set, and read/write would block */
#define BIO_EOF 2 /* out-out-bounds error */
-#define BIO_PREBUILT 3 /* not merged big */
+#define BIO_SEG_VALID 3 /* nr_hw_seg valid */
#define BIO_CLONED 4 /* doesn't own data */
/*
@@ -108,8 +109,8 @@ struct bio {
/*
* will die
*/
-#define bio_to_phys(bio) (page_to_phys(bio_page((bio))) + bio_offset((bio)))
-#define bvec_to_phys(bv) (page_to_phys((bv)->bv_page) + (bv)->bv_offset)
+#define bio_to_phys(bio) (page_to_phys(bio_page((bio))) + (unsigned long) bio_offset((bio)))
+#define bvec_to_phys(bv) (page_to_phys((bv)->bv_page) + (unsigned long) (bv)->bv_offset)
/*
* queues that have highmem support enabled may still need to revert to
@@ -125,13 +126,16 @@ struct bio {
/*
* merge helpers etc
*/
-#define __BVEC_END(bio) bio_iovec_idx((bio), (bio)->bi_vcnt - 1)
+#define __BVEC_END(bio) bio_iovec_idx((bio), (bio)->bi_vcnt - 1)
+#define __BVEC_START(bio) bio_iovec_idx((bio), 0)
#define BIO_CONTIG(bio, nxt) \
- (bvec_to_phys(__BVEC_END((bio))) + (bio)->bi_size == bio_to_phys((nxt)))
+ BIOVEC_MERGEABLE(__BVEC_END((bio)), __BVEC_START((nxt)))
#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
(((addr1) | (mask)) == (((addr2) - 1) | (mask)))
+#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
+ __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, (q)->seg_boundary_mask)
#define BIO_SEG_BOUNDARY(q, b1, b2) \
- __BIO_SEG_BOUNDARY(bvec_to_phys(__BVEC_END((b1))), bio_to_phys((b2)) + (b2)->bi_size, (q)->seg_boundary_mask)
+ BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2)))
#define bio_io_error(bio) bio_endio((bio), 0, bio_sectors((bio)))
@@ -167,6 +171,8 @@ extern struct bio *bio_alloc(int, int);
extern void bio_put(struct bio *);
extern int bio_endio(struct bio *, int, int);
+struct request_queue;
+extern inline int bio_hw_segments(struct request_queue *, struct bio *);
extern inline void __bio_clone(struct bio *, struct bio *);
extern struct bio *bio_clone(struct bio *, int);
diff --git a/include/linux/blk.h b/include/linux/blk.h
index e3155fcaa..adaa84c0f 100644
--- a/include/linux/blk.h
+++ b/include/linux/blk.h
@@ -84,11 +84,12 @@ extern inline struct request *elv_next_request(request_queue_t *q)
(q)->elevator.elevator_add_req_fn((q), (rq), (where)); \
} while (0)
-#define __elv_add_request(q, rq, back, p) \
+#define __elv_add_request(q, rq, back, p) do { \
if ((back)) \
__elv_add_request_core((q), (rq), (q)->queue_head.prev, (p)); \
else \
__elv_add_request_core((q), (rq), &(q)->queue_head, 0); \
+} while (0)
#define elv_add_request(q, rq, back) __elv_add_request((q), (rq), (back), 1)
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index e9e0cf210..204ab9765 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -256,6 +256,8 @@ extern void blk_attempt_remerge(request_queue_t *, struct request *);
extern struct request *blk_get_request(request_queue_t *, int, int);
extern void blk_put_request(struct request *);
extern void blk_plug_device(request_queue_t *);
+extern void blk_recount_segments(request_queue_t *, struct bio *);
+extern inline int blk_contig_segment(request_queue_t *q, struct bio *, struct bio *);
extern int block_ioctl(kdev_t, unsigned int, unsigned long);
diff --git a/init/do_mounts.c b/init/do_mounts.c
index 01439aadb..d34fdd7ae 100644
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -9,6 +9,7 @@
#include <linux/smp_lock.h>
#include <linux/blk.h>
#include <linux/tty.h>
+#include <linux/fd.h>
#include <linux/nfs_fs.h>
#include <linux/nfs_fs_sb.h>
@@ -29,6 +30,11 @@ static inline _syscall2(int,umount,char *,name,int,flags);
extern void rd_load(void);
extern void initrd_load(void);
+extern int get_filesystem_list(char * buf);
+extern void wait_for_keypress(void);
+
+asmlinkage long sys_mount(char * dev_name, char * dir_name, char * type,
+ unsigned long flags, void * data);
#ifdef CONFIG_BLK_DEV_INITRD
unsigned int real_root_dev; /* do_proc_dointvec cannot handle kdev_t */
@@ -276,29 +282,25 @@ static void __init mount_root(void)
char path[64];
char *name = "/dev/root";
char *fs_names, *p;
- int err;
int do_devfs = 0;
-#ifdef CONFIG_ROOT_NFS
- void *data;
-#endif
+
root_mountflags |= MS_VERBOSE;
fs_names = __getname();
get_fs_names(fs_names);
#ifdef CONFIG_ROOT_NFS
- if (MAJOR(ROOT_DEV) != UNNAMED_MAJOR)
- goto skip_nfs;
- data = nfs_root_data();
- if (!data)
- goto no_nfs;
- err = mount("/dev/root", "/root", "nfs", root_mountflags, data);
- if (!err)
- goto done;
-no_nfs:
- printk(KERN_ERR "VFS: Unable to mount root fs via NFS, trying floppy.\n");
- ROOT_DEV = MKDEV(FLOPPY_MAJOR, 0);
-skip_nfs:
+ if (MAJOR(ROOT_DEV) == UNNAMED_MAJOR) {
+ void *data;
+ data = nfs_root_data();
+ if (data) {
+ int err = mount("/dev/root", "/root", "nfs", root_mountflags, data);
+ if (!err)
+ goto done;
+ }
+ printk(KERN_ERR "VFS: Unable to mount root fs via NFS, trying floppy.\n");
+ ROOT_DEV = MKDEV(FLOPPY_MAJOR, 0);
+ }
#endif
#ifdef CONFIG_BLK_DEV_FD
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 4b38fec60..e8ebdc156 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -45,7 +45,8 @@ extern int C_A_D;
extern int bdf_prm[], bdflush_min[], bdflush_max[];
extern int sysctl_overcommit_memory;
extern int max_threads;
-extern int nr_queued_signals, max_queued_signals;
+extern atomic_t nr_queued_signals;
+extern int max_queued_signals;
extern int sysrq_enabled;
extern int core_uses_pid;
extern int cad_pid;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 4b61e2b2a..b986f2a2e 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5,7 +5,7 @@
*
* Implementation of the Transmission Control Protocol(TCP).
*
- * Version: $Id: tcp_input.c,v 1.241 2001-11-14 02:48:51 davem Exp $
+ * Version: $Id: tcp_input.c,v 1.242 2001-12-11 06:11:53 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
@@ -3881,6 +3881,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
}
}
/* Fall through */
+ case TCP_LAST_ACK:
case TCP_ESTABLISHED:
tcp_data_queue(sk, skb);
queued = 1;