aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-27 10:05:42 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-27 10:05:42 -0700
commitfc67b16ecaf6ebde04096030c268adddade023f1 (patch)
tree1cce42cdca1fc9e4ec41b9f7f72c60e343cebca7 /arch
parente8108c98dd6d65613fa0ec9d2300f89c48d554bf (diff)
parent2d29306b231a1a0e7a70166c10e4c0f917b21334 (diff)
downloadlinux-fc67b16ecaf6ebde04096030c268adddade023f1.tar.gz
Automatic merge of rsync://rsync.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6.git
Diffstat (limited to 'arch')
-rw-r--r--arch/ia64/Kconfig2
-rw-r--r--arch/ia64/configs/sn2_defconfig2
-rw-r--r--arch/ia64/hp/common/sba_iommu.c96
-rw-r--r--arch/ia64/kernel/entry.S14
-rw-r--r--arch/ia64/kernel/iosapic.c358
-rw-r--r--arch/ia64/kernel/irq_ia64.c16
-rw-r--r--arch/ia64/kernel/perfmon.c59
-rw-r--r--arch/ia64/kernel/perfmon_default_smpl.c13
-rw-r--r--arch/ia64/kernel/setup.c69
-rw-r--r--arch/ia64/kernel/smpboot.c217
-rw-r--r--arch/ia64/kernel/unwind.c27
-rw-r--r--arch/ia64/lib/memcpy_mck.S2
-rw-r--r--arch/ia64/mm/contig.c3
-rw-r--r--arch/ia64/mm/discontig.c3
-rw-r--r--arch/ia64/mm/fault.c9
-rw-r--r--arch/ia64/mm/init.c74
-rw-r--r--arch/ia64/sn/include/pci/pcibr_provider.h6
-rw-r--r--arch/ia64/sn/include/pci/pcibus_provider_defs.h43
-rw-r--r--arch/ia64/sn/include/pci/pcidev.h54
-rw-r--r--arch/ia64/sn/kernel/Makefile1
-rw-r--r--arch/ia64/sn/kernel/bte.c20
-rw-r--r--arch/ia64/sn/kernel/bte_error.c76
-rw-r--r--arch/ia64/sn/kernel/huberror.c9
-rw-r--r--arch/ia64/sn/kernel/io_init.c78
-rw-r--r--arch/ia64/sn/kernel/irq.c19
-rw-r--r--arch/ia64/sn/kernel/setup.c9
-rw-r--r--arch/ia64/sn/kernel/sn2/sn_hwperf.c112
-rw-r--r--arch/ia64/sn/kernel/tiocx.c548
-rw-r--r--arch/ia64/sn/pci/Makefile2
-rw-r--r--arch/ia64/sn/pci/pci_dma.c39
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_ate.c4
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_dma.c107
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_provider.c24
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_reg.c4
-rw-r--r--arch/ia64/sn/pci/tioca_provider.c668
35 files changed, 2308 insertions, 479 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 33fcb205fcb7a..468dbe8a6b9c2 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -329,7 +329,7 @@ menu "Power management and ACPI"
config PM
bool "Power Management support"
- depends on IA64_GENERIC || IA64_DIG || IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB
+ depends on !IA64_HP_SIM
default y
help
"Power Management" means that parts of your computer are shut
diff --git a/arch/ia64/configs/sn2_defconfig b/arch/ia64/configs/sn2_defconfig
index bfeb952fe8e28..6ff7107fee4d6 100644
--- a/arch/ia64/configs/sn2_defconfig
+++ b/arch/ia64/configs/sn2_defconfig
@@ -574,6 +574,8 @@ CONFIG_SERIAL_NONSTANDARD=y
# CONFIG_N_HDLC is not set
# CONFIG_STALDRV is not set
CONFIG_SGI_SNSC=y
+CONFIG_SGI_TIOCX=y
+CONFIG_SGI_MBCS=m
#
# Serial drivers
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index 017c9ab5fc1b2..6a8fcba7a8531 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -1,9 +1,9 @@
/*
** IA64 System Bus Adapter (SBA) I/O MMU manager
**
-** (c) Copyright 2002-2004 Alex Williamson
+** (c) Copyright 2002-2005 Alex Williamson
** (c) Copyright 2002-2003 Grant Grundler
-** (c) Copyright 2002-2004 Hewlett-Packard Company
+** (c) Copyright 2002-2005 Hewlett-Packard Company
**
** Portions (c) 2000 Grant Grundler (from parisc I/O MMU code)
** Portions (c) 1999 Dave S. Miller (from sparc64 I/O MMU code)
@@ -459,21 +459,32 @@ get_iovp_order (unsigned long size)
* sba_search_bitmap - find free space in IO PDIR resource bitmap
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @bits_wanted: number of entries we need.
+ * @use_hint: use res_hint to indicate where to start looking
*
* Find consecutive free bits in resource bitmap.
* Each bit represents one entry in the IO Pdir.
* Cool perf optimization: search for log2(size) bits at a time.
*/
static SBA_INLINE unsigned long
-sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted)
+sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted, int use_hint)
{
- unsigned long *res_ptr = ioc->res_hint;
+ unsigned long *res_ptr;
unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]);
- unsigned long pide = ~0UL;
+ unsigned long flags, pide = ~0UL;
ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0);
ASSERT(res_ptr < res_end);
+ spin_lock_irqsave(&ioc->res_lock, flags);
+
+ /* Allow caller to force a search through the entire resource space */
+ if (likely(use_hint)) {
+ res_ptr = ioc->res_hint;
+ } else {
+ res_ptr = (ulong *)ioc->res_map;
+ ioc->res_bitshift = 0;
+ }
+
/*
* N.B. REO/Grande defect AR2305 can cause TLB fetch timeouts
* if a TLB entry is purged while in use. sba_mark_invalid()
@@ -570,10 +581,12 @@ not_found:
prefetch(ioc->res_map);
ioc->res_hint = (unsigned long *) ioc->res_map;
ioc->res_bitshift = 0;
+ spin_unlock_irqrestore(&ioc->res_lock, flags);
return (pide);
found_it:
ioc->res_hint = res_ptr;
+ spin_unlock_irqrestore(&ioc->res_lock, flags);
return (pide);
}
@@ -594,36 +607,36 @@ sba_alloc_range(struct ioc *ioc, size_t size)
unsigned long itc_start;
#endif
unsigned long pide;
- unsigned long flags;
ASSERT(pages_needed);
ASSERT(0 == (size & ~iovp_mask));
- spin_lock_irqsave(&ioc->res_lock, flags);
-
#ifdef PDIR_SEARCH_TIMING
itc_start = ia64_get_itc();
#endif
/*
** "seek and ye shall find"...praying never hurts either...
*/
- pide = sba_search_bitmap(ioc, pages_needed);
+ pide = sba_search_bitmap(ioc, pages_needed, 1);
if (unlikely(pide >= (ioc->res_size << 3))) {
- pide = sba_search_bitmap(ioc, pages_needed);
+ pide = sba_search_bitmap(ioc, pages_needed, 0);
if (unlikely(pide >= (ioc->res_size << 3))) {
#if DELAYED_RESOURCE_CNT > 0
+ unsigned long flags;
+
/*
** With delayed resource freeing, we can give this one more shot. We're
** getting close to being in trouble here, so do what we can to make this
** one count.
*/
- spin_lock(&ioc->saved_lock);
+ spin_lock_irqsave(&ioc->saved_lock, flags);
if (ioc->saved_cnt > 0) {
struct sba_dma_pair *d;
int cnt = ioc->saved_cnt;
- d = &(ioc->saved[ioc->saved_cnt]);
+ d = &(ioc->saved[ioc->saved_cnt - 1]);
+ spin_lock(&ioc->res_lock);
while (cnt--) {
sba_mark_invalid(ioc, d->iova, d->size);
sba_free_range(ioc, d->iova, d->size);
@@ -631,10 +644,11 @@ sba_alloc_range(struct ioc *ioc, size_t size)
}
ioc->saved_cnt = 0;
READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
+ spin_unlock(&ioc->res_lock);
}
- spin_unlock(&ioc->saved_lock);
+ spin_unlock_irqrestore(&ioc->saved_lock, flags);
- pide = sba_search_bitmap(ioc, pages_needed);
+ pide = sba_search_bitmap(ioc, pages_needed, 0);
if (unlikely(pide >= (ioc->res_size << 3)))
panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n",
ioc->ioc_hpa);
@@ -664,8 +678,6 @@ sba_alloc_range(struct ioc *ioc, size_t size)
(uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map),
ioc->res_bitshift );
- spin_unlock_irqrestore(&ioc->res_lock, flags);
-
return (pide);
}
@@ -950,6 +962,30 @@ sba_map_single(struct device *dev, void *addr, size_t size, int dir)
return SBA_IOVA(ioc, iovp, offset);
}
+#ifdef ENABLE_MARK_CLEAN
+static SBA_INLINE void
+sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size)
+{
+ u32 iovp = (u32) SBA_IOVP(ioc,iova);
+ int off = PDIR_INDEX(iovp);
+ void *addr;
+
+ if (size <= iovp_size) {
+ addr = phys_to_virt(ioc->pdir_base[off] &
+ ~0xE000000000000FFFULL);
+ mark_clean(addr, size);
+ } else {
+ do {
+ addr = phys_to_virt(ioc->pdir_base[off] &
+ ~0xE000000000000FFFULL);
+ mark_clean(addr, min(size, iovp_size));
+ off++;
+ size -= iovp_size;
+ } while (size > 0);
+ }
+}
+#endif
+
/**
* sba_unmap_single - unmap one IOVA and free resources
* @dev: instance of PCI owned by the driver that's asking.
@@ -995,6 +1031,10 @@ void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir)
size += offset;
size = ROUNDUP(size, iovp_size);
+#ifdef ENABLE_MARK_CLEAN
+ if (dir == DMA_FROM_DEVICE)
+ sba_mark_clean(ioc, iova, size);
+#endif
#if DELAYED_RESOURCE_CNT > 0
spin_lock_irqsave(&ioc->saved_lock, flags);
@@ -1021,30 +1061,6 @@ void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir)
READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
spin_unlock_irqrestore(&ioc->res_lock, flags);
#endif /* DELAYED_RESOURCE_CNT == 0 */
-#ifdef ENABLE_MARK_CLEAN
- if (dir == DMA_FROM_DEVICE) {
- u32 iovp = (u32) SBA_IOVP(ioc,iova);
- int off = PDIR_INDEX(iovp);
- void *addr;
-
- if (size <= iovp_size) {
- addr = phys_to_virt(ioc->pdir_base[off] &
- ~0xE000000000000FFFULL);
- mark_clean(addr, size);
- } else {
- size_t byte_cnt = size;
-
- do {
- addr = phys_to_virt(ioc->pdir_base[off] &
- ~0xE000000000000FFFULL);
- mark_clean(addr, min(byte_cnt, iovp_size));
- off++;
- byte_cnt -= iovp_size;
-
- } while (byte_cnt > 0);
- }
- }
-#endif
}
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 0272c010a3ba4..bd86fea49a0c4 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -728,12 +728,8 @@ ENTRY(ia64_leave_syscall)
mov f8=f0 // clear f8
;;
ld8 r30=[r2],16 // M0|1 load cr.ifs
- mov.m ar.ssd=r0 // M2 clear ar.ssd
- cmp.eq p9,p0=r0,r0 // set p9 to indicate that we should restore cr.ifs
- ;;
ld8 r25=[r3],16 // M0|1 load ar.unat
- mov.m ar.csd=r0 // M2 clear ar.csd
- mov r22=r0 // clear r22
+ cmp.eq p9,p0=r0,r0 // set p9 to indicate that we should restore cr.ifs
;;
ld8 r26=[r2],PT(B0)-PT(AR_PFS) // M0|1 load ar.pfs
(pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled
@@ -756,11 +752,15 @@ ENTRY(ia64_leave_syscall)
mov f7=f0 // clear f7
;;
ld8.fill r12=[r2] // restore r12 (sp)
+ mov.m ar.ssd=r0 // M2 clear ar.ssd
+ mov r22=r0 // clear r22
+
ld8.fill r15=[r3] // restore r15
+(pUStk) st1 [r14]=r17
addl r3=THIS_CPU(ia64_phys_stacked_size_p8),r0
;;
-(pUStk) ld4 r3=[r3] // r3 = cpu_data->phys_stacked_size_p8
-(pUStk) st1 [r14]=r17
+(pUStk) ld4 r17=[r3] // r17 = cpu_data->phys_stacked_size_p8
+ mov.m ar.csd=r0 // M2 clear ar.csd
mov b6=r18 // I0 restore b6
;;
mov r14=r0 // clear r14
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index c15be5c38f560..88b014381df5f 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -79,6 +79,7 @@
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/string.h>
+#include <linux/bootmem.h>
#include <asm/delay.h>
#include <asm/hw_irq.h>
@@ -98,19 +99,30 @@
#define DBG(fmt...)
#endif
+#define NR_PREALLOCATE_RTE_ENTRIES (PAGE_SIZE / sizeof(struct iosapic_rte_info))
+#define RTE_PREALLOCATED (1)
+
static DEFINE_SPINLOCK(iosapic_lock);
/* These tables map IA-64 vectors to the IOSAPIC pin that generates this vector. */
-static struct iosapic_intr_info {
+struct iosapic_rte_info {
+ struct list_head rte_list; /* node in list of RTEs sharing the same vector */
char __iomem *addr; /* base address of IOSAPIC */
- u32 low32; /* current value of low word of Redirection table entry */
unsigned int gsi_base; /* first GSI assigned to this IOSAPIC */
- char rte_index; /* IOSAPIC RTE index (-1 => not an IOSAPIC interrupt) */
+ char rte_index; /* IOSAPIC RTE index */
+ int refcnt; /* reference counter */
+ unsigned int flags; /* flags */
+} ____cacheline_aligned;
+
+static struct iosapic_intr_info {
+ struct list_head rtes; /* RTEs using this vector (empty => not an IOSAPIC interrupt) */
+ int count; /* # of RTEs that shares this vector */
+ u32 low32; /* current value of low word of Redirection table entry */
+ unsigned int dest; /* destination CPU physical ID */
unsigned char dmode : 3; /* delivery mode (see iosapic.h) */
unsigned char polarity: 1; /* interrupt polarity (see iosapic.h) */
unsigned char trigger : 1; /* trigger mode (see iosapic.h) */
- int refcnt; /* reference counter */
} iosapic_intr_info[IA64_NUM_VECTORS];
static struct iosapic {
@@ -126,6 +138,8 @@ static int num_iosapic;
static unsigned char pcat_compat __initdata; /* 8259 compatibility flag */
+static int iosapic_kmalloc_ok;
+static LIST_HEAD(free_rte_list);
/*
* Find an IOSAPIC associated with a GSI
@@ -147,10 +161,12 @@ static inline int
_gsi_to_vector (unsigned int gsi)
{
struct iosapic_intr_info *info;
+ struct iosapic_rte_info *rte;
for (info = iosapic_intr_info; info < iosapic_intr_info + IA64_NUM_VECTORS; ++info)
- if (info->gsi_base + info->rte_index == gsi)
- return info - iosapic_intr_info;
+ list_for_each_entry(rte, &info->rtes, rte_list)
+ if (rte->gsi_base + rte->rte_index == gsi)
+ return info - iosapic_intr_info;
return -1;
}
@@ -167,33 +183,52 @@ gsi_to_vector (unsigned int gsi)
int
gsi_to_irq (unsigned int gsi)
{
+ unsigned long flags;
+ int irq;
/*
* XXX fix me: this assumes an identity mapping vetween IA-64 vector and Linux irq
* numbers...
*/
- return _gsi_to_vector(gsi);
+ spin_lock_irqsave(&iosapic_lock, flags);
+ {
+ irq = _gsi_to_vector(gsi);
+ }
+ spin_unlock_irqrestore(&iosapic_lock, flags);
+
+ return irq;
+}
+
+static struct iosapic_rte_info *gsi_vector_to_rte(unsigned int gsi, unsigned int vec)
+{
+ struct iosapic_rte_info *rte;
+
+ list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list)
+ if (rte->gsi_base + rte->rte_index == gsi)
+ return rte;
+ return NULL;
}
static void
-set_rte (unsigned int vector, unsigned int dest, int mask)
+set_rte (unsigned int gsi, unsigned int vector, unsigned int dest, int mask)
{
unsigned long pol, trigger, dmode;
u32 low32, high32;
char __iomem *addr;
int rte_index;
char redir;
+ struct iosapic_rte_info *rte;
DBG(KERN_DEBUG"IOSAPIC: routing vector %d to 0x%x\n", vector, dest);
- rte_index = iosapic_intr_info[vector].rte_index;
- if (rte_index < 0)
+ rte = gsi_vector_to_rte(gsi, vector);
+ if (!rte)
return; /* not an IOSAPIC interrupt */
- addr = iosapic_intr_info[vector].addr;
+ rte_index = rte->rte_index;
+ addr = rte->addr;
pol = iosapic_intr_info[vector].polarity;
trigger = iosapic_intr_info[vector].trigger;
dmode = iosapic_intr_info[vector].dmode;
- vector &= (~IA64_IRQ_REDIRECTED);
redir = (dmode == IOSAPIC_LOWEST_PRIORITY) ? 1 : 0;
@@ -221,6 +256,7 @@ set_rte (unsigned int vector, unsigned int dest, int mask)
iosapic_write(addr, IOSAPIC_RTE_HIGH(rte_index), high32);
iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32);
iosapic_intr_info[vector].low32 = low32;
+ iosapic_intr_info[vector].dest = dest;
}
static void
@@ -237,18 +273,20 @@ mask_irq (unsigned int irq)
u32 low32;
int rte_index;
ia64_vector vec = irq_to_vector(irq);
+ struct iosapic_rte_info *rte;
- addr = iosapic_intr_info[vec].addr;
- rte_index = iosapic_intr_info[vec].rte_index;
-
- if (rte_index < 0)
+ if (list_empty(&iosapic_intr_info[vec].rtes))
return; /* not an IOSAPIC interrupt! */
spin_lock_irqsave(&iosapic_lock, flags);
{
/* set only the mask bit */
low32 = iosapic_intr_info[vec].low32 |= IOSAPIC_MASK;
- iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32);
+ list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list) {
+ addr = rte->addr;
+ rte_index = rte->rte_index;
+ iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32);
+ }
}
spin_unlock_irqrestore(&iosapic_lock, flags);
}
@@ -261,16 +299,19 @@ unmask_irq (unsigned int irq)
u32 low32;
int rte_index;
ia64_vector vec = irq_to_vector(irq);
+ struct iosapic_rte_info *rte;
- addr = iosapic_intr_info[vec].addr;
- rte_index = iosapic_intr_info[vec].rte_index;
- if (rte_index < 0)
+ if (list_empty(&iosapic_intr_info[vec].rtes))
return; /* not an IOSAPIC interrupt! */
spin_lock_irqsave(&iosapic_lock, flags);
{
low32 = iosapic_intr_info[vec].low32 &= ~IOSAPIC_MASK;
- iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32);
+ list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list) {
+ addr = rte->addr;
+ rte_index = rte->rte_index;
+ iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32);
+ }
}
spin_unlock_irqrestore(&iosapic_lock, flags);
}
@@ -286,6 +327,7 @@ iosapic_set_affinity (unsigned int irq, cpumask_t mask)
char __iomem *addr;
int redir = (irq & IA64_IRQ_REDIRECTED) ? 1 : 0;
ia64_vector vec;
+ struct iosapic_rte_info *rte;
irq &= (~IA64_IRQ_REDIRECTED);
vec = irq_to_vector(irq);
@@ -295,10 +337,7 @@ iosapic_set_affinity (unsigned int irq, cpumask_t mask)
dest = cpu_physical_id(first_cpu(mask));
- rte_index = iosapic_intr_info[vec].rte_index;
- addr = iosapic_intr_info[vec].addr;
-
- if (rte_index < 0)
+ if (list_empty(&iosapic_intr_info[vec].rtes))
return; /* not an IOSAPIC interrupt */
set_irq_affinity_info(irq, dest, redir);
@@ -318,8 +357,13 @@ iosapic_set_affinity (unsigned int irq, cpumask_t mask)
low32 |= (IOSAPIC_FIXED << IOSAPIC_DELIVERY_SHIFT);
iosapic_intr_info[vec].low32 = low32;
- iosapic_write(addr, IOSAPIC_RTE_HIGH(rte_index), high32);
- iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32);
+ iosapic_intr_info[vec].dest = dest;
+ list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list) {
+ addr = rte->addr;
+ rte_index = rte->rte_index;
+ iosapic_write(addr, IOSAPIC_RTE_HIGH(rte_index), high32);
+ iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32);
+ }
}
spin_unlock_irqrestore(&iosapic_lock, flags);
#endif
@@ -340,9 +384,11 @@ static void
iosapic_end_level_irq (unsigned int irq)
{
ia64_vector vec = irq_to_vector(irq);
+ struct iosapic_rte_info *rte;
move_irq(irq);
- iosapic_eoi(iosapic_intr_info[vec].addr, vec);
+ list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list)
+ iosapic_eoi(rte->addr, vec);
}
#define iosapic_shutdown_level_irq mask_irq
@@ -422,6 +468,34 @@ iosapic_version (char __iomem *addr)
return iosapic_read(addr, IOSAPIC_VERSION);
}
+static int iosapic_find_sharable_vector (unsigned long trigger, unsigned long pol)
+{
+ int i, vector = -1, min_count = -1;
+ struct iosapic_intr_info *info;
+
+ /*
+ * shared vectors for edge-triggered interrupts are not
+ * supported yet
+ */
+ if (trigger == IOSAPIC_EDGE)
+ return -1;
+
+ for (i = IA64_FIRST_DEVICE_VECTOR; i <= IA64_LAST_DEVICE_VECTOR; i++) {
+ info = &iosapic_intr_info[i];
+ if (info->trigger == trigger && info->polarity == pol &&
+ (info->dmode == IOSAPIC_FIXED || info->dmode == IOSAPIC_LOWEST_PRIORITY)) {
+ if (min_count == -1 || info->count < min_count) {
+ vector = i;
+ min_count = info->count;
+ }
+ }
+ }
+ if (vector < 0)
+ panic("%s: out of interrupt vectors!\n", __FUNCTION__);
+
+ return vector;
+}
+
/*
* if the given vector is already owned by other,
* assign a new vector for the other and make the vector available
@@ -431,19 +505,63 @@ iosapic_reassign_vector (int vector)
{
int new_vector;
- if (iosapic_intr_info[vector].rte_index >= 0 || iosapic_intr_info[vector].addr
- || iosapic_intr_info[vector].gsi_base || iosapic_intr_info[vector].dmode
- || iosapic_intr_info[vector].polarity || iosapic_intr_info[vector].trigger)
- {
+ if (!list_empty(&iosapic_intr_info[vector].rtes)) {
new_vector = assign_irq_vector(AUTO_ASSIGN);
printk(KERN_INFO "Reassigning vector %d to %d\n", vector, new_vector);
memcpy(&iosapic_intr_info[new_vector], &iosapic_intr_info[vector],
sizeof(struct iosapic_intr_info));
+ INIT_LIST_HEAD(&iosapic_intr_info[new_vector].rtes);
+ list_move(iosapic_intr_info[vector].rtes.next, &iosapic_intr_info[new_vector].rtes);
memset(&iosapic_intr_info[vector], 0, sizeof(struct iosapic_intr_info));
- iosapic_intr_info[vector].rte_index = -1;
+ iosapic_intr_info[vector].low32 = IOSAPIC_MASK;
+ INIT_LIST_HEAD(&iosapic_intr_info[vector].rtes);
}
}
+static struct iosapic_rte_info *iosapic_alloc_rte (void)
+{
+ int i;
+ struct iosapic_rte_info *rte;
+ int preallocated = 0;
+
+ if (!iosapic_kmalloc_ok && list_empty(&free_rte_list)) {
+ rte = alloc_bootmem(sizeof(struct iosapic_rte_info) * NR_PREALLOCATE_RTE_ENTRIES);
+ if (!rte)
+ return NULL;
+ for (i = 0; i < NR_PREALLOCATE_RTE_ENTRIES; i++, rte++)
+ list_add(&rte->rte_list, &free_rte_list);
+ }
+
+ if (!list_empty(&free_rte_list)) {
+ rte = list_entry(free_rte_list.next, struct iosapic_rte_info, rte_list);
+ list_del(&rte->rte_list);
+ preallocated++;
+ } else {
+ rte = kmalloc(sizeof(struct iosapic_rte_info), GFP_ATOMIC);
+ if (!rte)
+ return NULL;
+ }
+
+ memset(rte, 0, sizeof(struct iosapic_rte_info));
+ if (preallocated)
+ rte->flags |= RTE_PREALLOCATED;
+
+ return rte;
+}
+
+static void iosapic_free_rte (struct iosapic_rte_info *rte)
+{
+ if (rte->flags & RTE_PREALLOCATED)
+ list_add_tail(&rte->rte_list, &free_rte_list);
+ else
+ kfree(rte);
+}
+
+static inline int vector_is_shared (int vector)
+{
+ return (iosapic_intr_info[vector].count > 1);
+}
+
static void
register_intr (unsigned int gsi, int vector, unsigned char delivery,
unsigned long polarity, unsigned long trigger)
@@ -454,6 +572,7 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery,
int index;
unsigned long gsi_base;
void __iomem *iosapic_address;
+ struct iosapic_rte_info *rte;
index = find_iosapic(gsi);
if (index < 0) {
@@ -464,14 +583,33 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery,
iosapic_address = iosapic_lists[index].addr;
gsi_base = iosapic_lists[index].gsi_base;
- rte_index = gsi - gsi_base;
- iosapic_intr_info[vector].rte_index = rte_index;
+ rte = gsi_vector_to_rte(gsi, vector);
+ if (!rte) {
+ rte = iosapic_alloc_rte();
+ if (!rte) {
+ printk(KERN_WARNING "%s: cannot allocate memory\n", __FUNCTION__);
+ return;
+ }
+
+ rte_index = gsi - gsi_base;
+ rte->rte_index = rte_index;
+ rte->addr = iosapic_address;
+ rte->gsi_base = gsi_base;
+ rte->refcnt++;
+ list_add_tail(&rte->rte_list, &iosapic_intr_info[vector].rtes);
+ iosapic_intr_info[vector].count++;
+ }
+ else if (vector_is_shared(vector)) {
+ struct iosapic_intr_info *info = &iosapic_intr_info[vector];
+ if (info->trigger != trigger || info->polarity != polarity) {
+ printk (KERN_WARNING "%s: cannot override the interrupt\n", __FUNCTION__);
+ return;
+ }
+ }
+
iosapic_intr_info[vector].polarity = polarity;
iosapic_intr_info[vector].dmode = delivery;
- iosapic_intr_info[vector].addr = iosapic_address;
- iosapic_intr_info[vector].gsi_base = gsi_base;
iosapic_intr_info[vector].trigger = trigger;
- iosapic_intr_info[vector].refcnt++;
if (trigger == IOSAPIC_EDGE)
irq_type = &irq_type_iosapic_edge;
@@ -494,6 +632,13 @@ get_target_cpu (unsigned int gsi, int vector)
static int cpu = -1;
/*
+ * In case of vector shared by multiple RTEs, all RTEs that
+ * share the vector need to use the same destination CPU.
+ */
+ if (!list_empty(&iosapic_intr_info[vector].rtes))
+ return iosapic_intr_info[vector].dest;
+
+ /*
* If the platform supports redirection via XTP, let it
* distribute interrupts.
*/
@@ -565,10 +710,12 @@ int
iosapic_register_intr (unsigned int gsi,
unsigned long polarity, unsigned long trigger)
{
- int vector;
+ int vector, mask = 1;
unsigned int dest;
unsigned long flags;
-
+ struct iosapic_rte_info *rte;
+ u32 low32;
+again:
/*
* If this GSI has already been registered (i.e., it's a
* shared interrupt, or we lost a race to register it),
@@ -578,19 +725,45 @@ iosapic_register_intr (unsigned int gsi,
{
vector = gsi_to_vector(gsi);
if (vector > 0) {
- iosapic_intr_info[vector].refcnt++;
+ rte = gsi_vector_to_rte(gsi, vector);
+ rte->refcnt++;
spin_unlock_irqrestore(&iosapic_lock, flags);
return vector;
}
+ }
+ spin_unlock_irqrestore(&iosapic_lock, flags);
+
+ /* If vector is running out, we try to find a sharable vector */
+ vector = assign_irq_vector_nopanic(AUTO_ASSIGN);
+ if (vector < 0)
+ vector = iosapic_find_sharable_vector(trigger, polarity);
+
+ spin_lock_irqsave(&irq_descp(vector)->lock, flags);
+ spin_lock(&iosapic_lock);
+ {
+ if (gsi_to_vector(gsi) > 0) {
+ if (list_empty(&iosapic_intr_info[vector].rtes))
+ free_irq_vector(vector);
+ spin_unlock(&iosapic_lock);
+ spin_unlock_irqrestore(&irq_descp(vector)->lock, flags);
+ goto again;
+ }
- vector = assign_irq_vector(AUTO_ASSIGN);
dest = get_target_cpu(gsi, vector);
register_intr(gsi, vector, IOSAPIC_LOWEST_PRIORITY,
- polarity, trigger);
+ polarity, trigger);
- set_rte(vector, dest, 1);
+ /*
+ * If the vector is shared and already unmasked for
+ * other interrupt sources, don't mask it.
+ */
+ low32 = iosapic_intr_info[vector].low32;
+ if (vector_is_shared(vector) && !(low32 & IOSAPIC_MASK))
+ mask = 0;
+ set_rte(gsi, vector, dest, mask);
}
- spin_unlock_irqrestore(&iosapic_lock, flags);
+ spin_unlock(&iosapic_lock);
+ spin_unlock_irqrestore(&irq_descp(vector)->lock, flags);
printk(KERN_INFO "GSI %u (%s, %s) -> CPU %d (0x%04x) vector %d\n",
gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
@@ -607,8 +780,10 @@ iosapic_unregister_intr (unsigned int gsi)
unsigned long flags;
int irq, vector;
irq_desc_t *idesc;
- int rte_index;
+ u32 low32;
unsigned long trigger, polarity;
+ unsigned int dest;
+ struct iosapic_rte_info *rte;
/*
* If the irq associated with the gsi is not found,
@@ -627,54 +802,56 @@ iosapic_unregister_intr (unsigned int gsi)
spin_lock_irqsave(&idesc->lock, flags);
spin_lock(&iosapic_lock);
{
- rte_index = iosapic_intr_info[vector].rte_index;
- if (rte_index < 0) {
- spin_unlock(&iosapic_lock);
- spin_unlock_irqrestore(&idesc->lock, flags);
+ if ((rte = gsi_vector_to_rte(gsi, vector)) == NULL) {
printk(KERN_ERR "iosapic_unregister_intr(%u) unbalanced\n", gsi);
WARN_ON(1);
- return;
+ goto out;
}
- if (--iosapic_intr_info[vector].refcnt > 0) {
- spin_unlock(&iosapic_lock);
- spin_unlock_irqrestore(&idesc->lock, flags);
- return;
- }
+ if (--rte->refcnt > 0)
+ goto out;
- /*
- * If interrupt handlers still exist on the irq
- * associated with the gsi, don't unregister the
- * interrupt.
- */
- if (idesc->action) {
- iosapic_intr_info[vector].refcnt++;
- spin_unlock(&iosapic_lock);
- spin_unlock_irqrestore(&idesc->lock, flags);
- printk(KERN_WARNING "Cannot unregister GSI. IRQ %u is still in use.\n", irq);
- return;
- }
+ /* Mask the interrupt */
+ low32 = iosapic_intr_info[vector].low32 | IOSAPIC_MASK;
+ iosapic_write(rte->addr, IOSAPIC_RTE_LOW(rte->rte_index), low32);
- /* Clear the interrupt controller descriptor. */
- idesc->handler = &no_irq_type;
+ /* Remove the rte entry from the list */
+ list_del(&rte->rte_list);
+ iosapic_intr_info[vector].count--;
+ iosapic_free_rte(rte);
- trigger = iosapic_intr_info[vector].trigger;
+ trigger = iosapic_intr_info[vector].trigger;
polarity = iosapic_intr_info[vector].polarity;
+ dest = iosapic_intr_info[vector].dest;
+ printk(KERN_INFO "GSI %u (%s, %s) -> CPU %d (0x%04x) vector %d unregistered\n",
+ gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
+ (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
+ cpu_logical_id(dest), dest, vector);
+
+ if (list_empty(&iosapic_intr_info[vector].rtes)) {
+ /* Sanity check */
+ BUG_ON(iosapic_intr_info[vector].count);
+
+ /* Clear the interrupt controller descriptor */
+ idesc->handler = &no_irq_type;
+
+ /* Clear the interrupt information */
+ memset(&iosapic_intr_info[vector], 0, sizeof(struct iosapic_intr_info));
+ iosapic_intr_info[vector].low32 |= IOSAPIC_MASK;
+ INIT_LIST_HEAD(&iosapic_intr_info[vector].rtes);
+
+ if (idesc->action) {
+ printk(KERN_ERR "interrupt handlers still exist on IRQ %u\n", irq);
+ WARN_ON(1);
+ }
- /* Clear the interrupt information. */
- memset(&iosapic_intr_info[vector], 0, sizeof(struct iosapic_intr_info));
- iosapic_intr_info[vector].rte_index = -1; /* mark as unused */
+ /* Free the interrupt vector */
+ free_irq_vector(vector);
+ }
}
+ out:
spin_unlock(&iosapic_lock);
spin_unlock_irqrestore(&idesc->lock, flags);
-
- /* Free the interrupt vector */
- free_irq_vector(vector);
-
- printk(KERN_INFO "GSI %u (%s, %s) -> vector %d unregisterd.\n",
- gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
- (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
- vector);
}
#endif /* CONFIG_ACPI_DEALLOCATE_IRQ */
@@ -724,7 +901,7 @@ iosapic_register_platform_intr (u32 int_type, unsigned int gsi,
(polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
cpu_logical_id(dest), dest, vector);
- set_rte(vector, dest, mask);
+ set_rte(gsi, vector, dest, mask);
return vector;
}
@@ -750,7 +927,7 @@ iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi,
polarity == IOSAPIC_POL_HIGH ? "high" : "low",
cpu_logical_id(dest), dest, vector);
- set_rte(vector, dest, 1);
+ set_rte(gsi, vector, dest, 1);
}
void __init
@@ -758,8 +935,10 @@ iosapic_system_init (int system_pcat_compat)
{
int vector;
- for (vector = 0; vector < IA64_NUM_VECTORS; ++vector)
- iosapic_intr_info[vector].rte_index = -1; /* mark as unused */
+ for (vector = 0; vector < IA64_NUM_VECTORS; ++vector) {
+ iosapic_intr_info[vector].low32 = IOSAPIC_MASK;
+ INIT_LIST_HEAD(&iosapic_intr_info[vector].rtes); /* mark as unused */
+ }
pcat_compat = system_pcat_compat;
if (pcat_compat) {
@@ -825,3 +1004,10 @@ map_iosapic_to_node(unsigned int gsi_base, int node)
return;
}
#endif
+
+static int __init iosapic_enable_kmalloc (void)
+{
+ iosapic_kmalloc_ok = 1;
+ return 0;
+}
+core_initcall (iosapic_enable_kmalloc);
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index 5ba06ebe355be..4fe60c7a2e906 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -63,20 +63,30 @@ EXPORT_SYMBOL(isa_irq_to_vector_map);
static unsigned long ia64_vector_mask[BITS_TO_LONGS(IA64_NUM_DEVICE_VECTORS)];
int
-assign_irq_vector (int irq)
+assign_irq_vector_nopanic (int irq)
{
int pos, vector;
again:
pos = find_first_zero_bit(ia64_vector_mask, IA64_NUM_DEVICE_VECTORS);
vector = IA64_FIRST_DEVICE_VECTOR + pos;
if (vector > IA64_LAST_DEVICE_VECTOR)
- /* XXX could look for sharable vectors instead of panic'ing... */
- panic("assign_irq_vector: out of interrupt vectors!");
+ return -1;
if (test_and_set_bit(pos, ia64_vector_mask))
goto again;
return vector;
}
+int
+assign_irq_vector (int irq)
+{
+ int vector = assign_irq_vector_nopanic(irq);
+
+ if (vector < 0)
+ panic("assign_irq_vector: out of interrupt vectors!");
+
+ return vector;
+}
+
void
free_irq_vector (int vector)
{
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 71147be3279ce..376fcbc3f8da8 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -480,14 +480,6 @@ typedef struct {
#define PFM_CMD_ARG_MANY -1 /* cannot be zero */
typedef struct {
- int debug; /* turn on/off debugging via syslog */
- int debug_ovfl; /* turn on/off debug printk in overflow handler */
- int fastctxsw; /* turn on/off fast (unsecure) ctxsw */
- int expert_mode; /* turn on/off value checking */
- int debug_pfm_read;
-} pfm_sysctl_t;
-
-typedef struct {
unsigned long pfm_spurious_ovfl_intr_count; /* keep track of spurious ovfl interrupts */
unsigned long pfm_replay_ovfl_intr_count; /* keep track of replayed ovfl interrupts */
unsigned long pfm_ovfl_intr_count; /* keep track of ovfl interrupts */
@@ -514,8 +506,8 @@ static LIST_HEAD(pfm_buffer_fmt_list);
static pmu_config_t *pmu_conf;
/* sysctl() controls */
-static pfm_sysctl_t pfm_sysctl;
-int pfm_debug_var;
+pfm_sysctl_t pfm_sysctl;
+EXPORT_SYMBOL(pfm_sysctl);
static ctl_table pfm_ctl_table[]={
{1, "debug", &pfm_sysctl.debug, sizeof(int), 0666, NULL, &proc_dointvec, NULL,},
@@ -1576,7 +1568,7 @@ pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos)
goto abort_locked;
}
- DPRINT(("[%d] fd=%d type=%d\n", current->pid, msg->pfm_gen_msg.msg_ctx_fd, msg->pfm_gen_msg.msg_type));
+ DPRINT(("fd=%d type=%d\n", msg->pfm_gen_msg.msg_ctx_fd, msg->pfm_gen_msg.msg_type));
ret = -EFAULT;
if(copy_to_user(buf, msg, sizeof(pfm_msg_t)) == 0) ret = sizeof(pfm_msg_t);
@@ -3695,8 +3687,6 @@ pfm_debug(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
pfm_sysctl.debug = m == 0 ? 0 : 1;
- pfm_debug_var = pfm_sysctl.debug;
-
printk(KERN_INFO "perfmon debugging %s (timing reset)\n", pfm_sysctl.debug ? "on" : "off");
if (m == 0) {
@@ -4996,13 +4986,21 @@ pfm_context_force_terminate(pfm_context_t *ctx, struct pt_regs *regs)
}
static int pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds);
-
+ /*
+ * pfm_handle_work() can be called with interrupts enabled
+ * (TIF_NEED_RESCHED) or disabled. The down_interruptible
+ * call may sleep, therefore we must re-enable interrupts
+ * to avoid deadlocks. It is safe to do so because this function
+ * is called ONLY when returning to user level (PUStk=1), in which case
+ * there is no risk of kernel stack overflow due to deep
+ * interrupt nesting.
+ */
void
pfm_handle_work(void)
{
pfm_context_t *ctx;
struct pt_regs *regs;
- unsigned long flags;
+ unsigned long flags, dummy_flags;
unsigned long ovfl_regs;
unsigned int reason;
int ret;
@@ -5039,18 +5037,15 @@ pfm_handle_work(void)
//if (CTX_OVFL_NOBLOCK(ctx)) goto skip_blocking;
if (reason == PFM_TRAP_REASON_RESET) goto skip_blocking;
+ /*
+ * restore interrupt mask to what it was on entry.
+ * Could be enabled/diasbled.
+ */
UNPROTECT_CTX(ctx, flags);
- /*
- * pfm_handle_work() is currently called with interrupts disabled.
- * The down_interruptible call may sleep, therefore we
- * must re-enable interrupts to avoid deadlocks. It is
- * safe to do so because this function is called ONLY
- * when returning to user level (PUStk=1), in which case
- * there is no risk of kernel stack overflow due to deep
- * interrupt nesting.
- */
- BUG_ON(flags & IA64_PSR_I);
+ /*
+ * force interrupt enable because of down_interruptible()
+ */
local_irq_enable();
DPRINT(("before block sleeping\n"));
@@ -5064,12 +5059,12 @@ pfm_handle_work(void)
DPRINT(("after block sleeping ret=%d\n", ret));
/*
- * disable interrupts to restore state we had upon entering
- * this function
+ * lock context and mask interrupts again
+ * We save flags into a dummy because we may have
+ * altered interrupts mask compared to entry in this
+ * function.
*/
- local_irq_disable();
-
- PROTECT_CTX(ctx, flags);
+ PROTECT_CTX(ctx, dummy_flags);
/*
* we need to read the ovfl_regs only after wake-up
@@ -5095,7 +5090,9 @@ skip_blocking:
ctx->ctx_ovfl_regs[0] = 0UL;
nothing_to_do:
-
+ /*
+ * restore flags as they were upon entry
+ */
UNPROTECT_CTX(ctx, flags);
}
diff --git a/arch/ia64/kernel/perfmon_default_smpl.c b/arch/ia64/kernel/perfmon_default_smpl.c
index 965d29004555a..344941db0a9ed 100644
--- a/arch/ia64/kernel/perfmon_default_smpl.c
+++ b/arch/ia64/kernel/perfmon_default_smpl.c
@@ -20,24 +20,17 @@ MODULE_AUTHOR("Stephane Eranian <eranian@hpl.hp.com>");
MODULE_DESCRIPTION("perfmon default sampling format");
MODULE_LICENSE("GPL");
-MODULE_PARM(debug, "i");
-MODULE_PARM_DESC(debug, "debug");
-
-MODULE_PARM(debug_ovfl, "i");
-MODULE_PARM_DESC(debug_ovfl, "debug ovfl");
-
-
#define DEFAULT_DEBUG 1
#ifdef DEFAULT_DEBUG
#define DPRINT(a) \
do { \
- if (unlikely(debug >0)) { printk("%s.%d: CPU%d ", __FUNCTION__, __LINE__, smp_processor_id()); printk a; } \
+ if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d ", __FUNCTION__, __LINE__, smp_processor_id()); printk a; } \
} while (0)
#define DPRINT_ovfl(a) \
do { \
- if (unlikely(debug_ovfl >0)) { printk("%s.%d: CPU%d ", __FUNCTION__, __LINE__, smp_processor_id()); printk a; } \
+ if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d ", __FUNCTION__, __LINE__, smp_processor_id()); printk a; } \
} while (0)
#else
@@ -45,8 +38,6 @@ MODULE_PARM_DESC(debug_ovfl, "debug ovfl");
#define DPRINT_ovfl(a)
#endif
-static int debug, debug_ovfl;
-
static int
default_validate(struct task_struct *task, unsigned int flags, int cpu, void *data)
{
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index f05650c801d2a..b7e6b4cb374b8 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -4,10 +4,15 @@
* Copyright (C) 1998-2001, 2003-2004 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
* Stephane Eranian <eranian@hpl.hp.com>
- * Copyright (C) 2000, Rohit Seth <rohit.seth@intel.com>
+ * Copyright (C) 2000, 2004 Intel Corp
+ * Rohit Seth <rohit.seth@intel.com>
+ * Suresh Siddha <suresh.b.siddha@intel.com>
+ * Gordon Jin <gordon.jin@intel.com>
* Copyright (C) 1999 VA Linux Systems
* Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
*
+ * 12/26/04 S.Siddha, G.Jin, R.Seth
+ * Add multi-threading and multi-core detection
* 11/12/01 D.Mosberger Convert get_cpuinfo() to seq_file based show_cpuinfo().
* 04/04/00 D.Mosberger renamed cpu_initialized to cpu_online_map
* 03/31/00 R.Seth cpu_initialized and current->processor fixes
@@ -296,6 +301,34 @@ mark_bsp_online (void)
#endif
}
+#ifdef CONFIG_SMP
+static void
+check_for_logical_procs (void)
+{
+ pal_logical_to_physical_t info;
+ s64 status;
+
+ status = ia64_pal_logical_to_phys(0, &info);
+ if (status == -1) {
+ printk(KERN_INFO "No logical to physical processor mapping "
+ "available\n");
+ return;
+ }
+ if (status) {
+ printk(KERN_ERR "ia64_pal_logical_to_phys failed with %ld\n",
+ status);
+ return;
+ }
+ /*
+ * Total number of siblings that BSP has. Though not all of them
+ * may have booted successfully. The correct number of siblings
+ * booted is in info.overview_num_log.
+ */
+ smp_num_siblings = info.overview_tpc;
+ smp_num_cpucores = info.overview_cpp;
+}
+#endif
+
void __init
setup_arch (char **cmdline_p)
{
@@ -356,6 +389,19 @@ setup_arch (char **cmdline_p)
#ifdef CONFIG_SMP
cpu_physical_id(0) = hard_smp_processor_id();
+
+ cpu_set(0, cpu_sibling_map[0]);
+ cpu_set(0, cpu_core_map[0]);
+
+ check_for_logical_procs();
+ if (smp_num_cpucores > 1)
+ printk(KERN_INFO
+ "cpu package is Multi-Core capable: number of cores=%d\n",
+ smp_num_cpucores);
+ if (smp_num_siblings > 1)
+ printk(KERN_INFO
+ "cpu package is Multi-Threading capable: number of siblings=%d\n",
+ smp_num_siblings);
#endif
cpu_init(); /* initialize the bootstrap CPU */
@@ -459,12 +505,23 @@ show_cpuinfo (struct seq_file *m, void *v)
"cpu regs : %u\n"
"cpu MHz : %lu.%06lu\n"
"itc MHz : %lu.%06lu\n"
- "BogoMIPS : %lu.%02lu\n\n",
+ "BogoMIPS : %lu.%02lu\n",
cpunum, c->vendor, family, c->model, c->revision, c->archrev,
features, c->ppn, c->number,
c->proc_freq / 1000000, c->proc_freq % 1000000,
c->itc_freq / 1000000, c->itc_freq % 1000000,
lpj*HZ/500000, (lpj*HZ/5000) % 100);
+#ifdef CONFIG_SMP
+ seq_printf(m, "siblings : %u\n", c->num_log);
+ if (c->threads_per_core > 1 || c->cores_per_socket > 1)
+ seq_printf(m,
+ "physical id: %u\n"
+ "core id : %u\n"
+ "thread id : %u\n",
+ c->socket_id, c->core_id, c->thread_id);
+#endif
+ seq_printf(m,"\n");
+
return 0;
}
@@ -533,6 +590,14 @@ identify_cpu (struct cpuinfo_ia64 *c)
memcpy(c->vendor, cpuid.field.vendor, 16);
#ifdef CONFIG_SMP
c->cpu = smp_processor_id();
+
+ /* below default values will be overwritten by identify_siblings()
+ * for Multi-Threading/Multi-Core capable cpu's
+ */
+ c->threads_per_core = c->cores_per_socket = c->num_log = 1;
+ c->socket_id = -1;
+
+ identify_siblings(c);
#endif
c->ppn = cpuid.field.ppn;
c->number = cpuid.field.number;
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index ca1536db33940..0d5ee57c9865c 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -1,8 +1,13 @@
/*
* SMP boot-related support
*
- * Copyright (C) 1998-2003 Hewlett-Packard Co
+ * Copyright (C) 1998-2003, 2005 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 2001, 2004-2005 Intel Corp
+ * Rohit Seth <rohit.seth@intel.com>
+ * Suresh Siddha <suresh.b.siddha@intel.com>
+ * Gordon Jin <gordon.jin@intel.com>
+ * Ashok Raj <ashok.raj@intel.com>
*
* 01/05/16 Rohit Seth <rohit.seth@intel.com> Moved SMP booting functions from smp.c to here.
* 01/04/27 David Mosberger <davidm@hpl.hp.com> Added ITC synching code.
@@ -10,6 +15,11 @@
* smp_boot_cpus()/smp_commence() is replaced by
* smp_prepare_cpus()/__cpu_up()/smp_cpus_done().
* 04/06/21 Ashok Raj <ashok.raj@intel.com> Added CPU Hotplug Support
+ * 04/12/26 Jin Gordon <gordon.jin@intel.com>
+ * 04/12/26 Rohit Seth <rohit.seth@intel.com>
+ * Add multi-threading and multi-core detection
+ * 05/01/30 Suresh Siddha <suresh.b.siddha@intel.com>
+ * Setup cpu_sibling_map and cpu_core_map
*/
#include <linux/config.h>
@@ -122,6 +132,11 @@ EXPORT_SYMBOL(cpu_online_map);
cpumask_t cpu_possible_map;
EXPORT_SYMBOL(cpu_possible_map);
+cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
+cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
+int smp_num_siblings = 1;
+int smp_num_cpucores = 1;
+
/* which logical CPU number maps to which CPU (physical APIC ID) */
volatile int ia64_cpu_to_sapicid[NR_CPUS];
EXPORT_SYMBOL(ia64_cpu_to_sapicid);
@@ -156,7 +171,8 @@ sync_master (void *arg)
local_irq_save(flags);
{
for (i = 0; i < NUM_ROUNDS*NUM_ITERS; ++i) {
- while (!go[MASTER]);
+ while (!go[MASTER])
+ cpu_relax();
go[MASTER] = 0;
go[SLAVE] = ia64_get_itc();
}
@@ -179,7 +195,8 @@ get_delta (long *rt, long *master)
for (i = 0; i < NUM_ITERS; ++i) {
t0 = ia64_get_itc();
go[MASTER] = 1;
- while (!(tm = go[SLAVE]));
+ while (!(tm = go[SLAVE]))
+ cpu_relax();
go[SLAVE] = 0;
t1 = ia64_get_itc();
@@ -258,7 +275,8 @@ ia64_sync_itc (unsigned int master)
return;
}
- while (go[MASTER]); /* wait for master to be ready */
+ while (go[MASTER])
+ cpu_relax(); /* wait for master to be ready */
spin_lock_irqsave(&itc_sync_lock, flags);
{
@@ -595,7 +613,68 @@ void __devinit smp_prepare_boot_cpu(void)
cpu_set(smp_processor_id(), cpu_callin_map);
}
+/*
+ * mt_info[] is a temporary store for all info returned by
+ * PAL_LOGICAL_TO_PHYSICAL, to be copied into cpuinfo_ia64 when the
+ * specific cpu comes.
+ */
+static struct {
+ __u32 socket_id;
+ __u16 core_id;
+ __u16 thread_id;
+ __u16 proc_fixed_addr;
+ __u8 valid;
+}mt_info[NR_CPUS] __devinit;
+
#ifdef CONFIG_HOTPLUG_CPU
+static inline void
+remove_from_mtinfo(int cpu)
+{
+ int i;
+
+ for_each_cpu(i)
+ if (mt_info[i].valid && mt_info[i].socket_id ==
+ cpu_data(cpu)->socket_id)
+ mt_info[i].valid = 0;
+}
+
+static inline void
+clear_cpu_sibling_map(int cpu)
+{
+ int i;
+
+ for_each_cpu_mask(i, cpu_sibling_map[cpu])
+ cpu_clear(cpu, cpu_sibling_map[i]);
+ for_each_cpu_mask(i, cpu_core_map[cpu])
+ cpu_clear(cpu, cpu_core_map[i]);
+
+ cpu_sibling_map[cpu] = cpu_core_map[cpu] = CPU_MASK_NONE;
+}
+
+static void
+remove_siblinginfo(int cpu)
+{
+ int last = 0;
+
+ if (cpu_data(cpu)->threads_per_core == 1 &&
+ cpu_data(cpu)->cores_per_socket == 1) {
+ cpu_clear(cpu, cpu_core_map[cpu]);
+ cpu_clear(cpu, cpu_sibling_map[cpu]);
+ return;
+ }
+
+ last = (cpus_weight(cpu_core_map[cpu]) == 1 ? 1 : 0);
+
+ /* remove it from all sibling map's */
+ clear_cpu_sibling_map(cpu);
+
+ /* if this cpu is the last in the core group, remove all its info
+ * from mt_info structure
+ */
+ if (last)
+ remove_from_mtinfo(cpu);
+}
+
extern void fixup_irqs(void);
/* must be called with cpucontrol mutex held */
int __cpu_disable(void)
@@ -608,6 +687,7 @@ int __cpu_disable(void)
if (cpu == 0)
return -EBUSY;
+ remove_siblinginfo(cpu);
fixup_irqs();
local_flush_tlb_all();
cpu_clear(cpu, cpu_callin_map);
@@ -660,6 +740,23 @@ smp_cpus_done (unsigned int dummy)
(int)num_online_cpus(), bogosum/(500000/HZ), (bogosum/(5000/HZ))%100);
}
+static inline void __devinit
+set_cpu_sibling_map(int cpu)
+{
+ int i;
+
+ for_each_online_cpu(i) {
+ if ((cpu_data(cpu)->socket_id == cpu_data(i)->socket_id)) {
+ cpu_set(i, cpu_core_map[cpu]);
+ cpu_set(cpu, cpu_core_map[i]);
+ if (cpu_data(cpu)->core_id == cpu_data(i)->core_id) {
+ cpu_set(i, cpu_sibling_map[cpu]);
+ cpu_set(cpu, cpu_sibling_map[i]);
+ }
+ }
+ }
+}
+
int __devinit
__cpu_up (unsigned int cpu)
{
@@ -682,6 +779,15 @@ __cpu_up (unsigned int cpu)
if (ret < 0)
return ret;
+ if (cpu_data(cpu)->threads_per_core == 1 &&
+ cpu_data(cpu)->cores_per_socket == 1) {
+ cpu_set(cpu, cpu_sibling_map[cpu]);
+ cpu_set(cpu, cpu_core_map[cpu]);
+ return 0;
+ }
+
+ set_cpu_sibling_map(cpu);
+
return 0;
}
@@ -709,3 +815,106 @@ init_smp_config(void)
ia64_sal_strerror(sal_ret));
}
+static inline int __devinit
+check_for_mtinfo_index(void)
+{
+ int i;
+
+ for_each_cpu(i)
+ if (!mt_info[i].valid)
+ return i;
+
+ return -1;
+}
+
+/*
+ * Search the mt_info to find out if this socket's cid/tid information is
+ * cached or not. If the socket exists, fill in the core_id and thread_id
+ * in cpuinfo
+ */
+static int __devinit
+check_for_new_socket(__u16 logical_address, struct cpuinfo_ia64 *c)
+{
+ int i;
+ __u32 sid = c->socket_id;
+
+ for_each_cpu(i) {
+ if (mt_info[i].valid && mt_info[i].proc_fixed_addr == logical_address
+ && mt_info[i].socket_id == sid) {
+ c->core_id = mt_info[i].core_id;
+ c->thread_id = mt_info[i].thread_id;
+ return 1; /* not a new socket */
+ }
+ }
+ return 0;
+}
+
+/*
+ * identify_siblings(cpu) gets called from identify_cpu. This populates the
+ * information related to logical execution units in per_cpu_data structure.
+ */
+void __devinit
+identify_siblings(struct cpuinfo_ia64 *c)
+{
+ s64 status;
+ u16 pltid;
+ u64 proc_fixed_addr;
+ int count, i;
+ pal_logical_to_physical_t info;
+
+ if (smp_num_cpucores == 1 && smp_num_siblings == 1)
+ return;
+
+ if ((status = ia64_pal_logical_to_phys(0, &info)) != PAL_STATUS_SUCCESS) {
+ printk(KERN_ERR "ia64_pal_logical_to_phys failed with %ld\n",
+ status);
+ return;
+ }
+ if ((status = ia64_sal_physical_id_info(&pltid)) != PAL_STATUS_SUCCESS) {
+ printk(KERN_ERR "ia64_sal_pltid failed with %ld\n", status);
+ return;
+ }
+ if ((status = ia64_pal_fixed_addr(&proc_fixed_addr)) != PAL_STATUS_SUCCESS) {
+ printk(KERN_ERR "ia64_pal_fixed_addr failed with %ld\n", status);
+ return;
+ }
+
+ c->socket_id = (pltid << 8) | info.overview_ppid;
+ c->cores_per_socket = info.overview_cpp;
+ c->threads_per_core = info.overview_tpc;
+ count = c->num_log = info.overview_num_log;
+
+ /* If the thread and core id information is already cached, then
+ * we will simply update cpu_info and return. Otherwise, we will
+ * do the PAL calls and cache core and thread id's of all the siblings.
+ */
+ if (check_for_new_socket(proc_fixed_addr, c))
+ return;
+
+ for (i = 0; i < count; i++) {
+ int index;
+
+ if (i && (status = ia64_pal_logical_to_phys(i, &info))
+ != PAL_STATUS_SUCCESS) {
+ printk(KERN_ERR "ia64_pal_logical_to_phys failed"
+ " with %ld\n", status);
+ return;
+ }
+ if (info.log2_la == proc_fixed_addr) {
+ c->core_id = info.log1_cid;
+ c->thread_id = info.log1_tid;
+ }
+
+ index = check_for_mtinfo_index();
+ /* We will not do the mt_info caching optimization in this case.
+ */
+ if (index < 0)
+ continue;
+
+ mt_info[index].valid = 1;
+ mt_info[index].socket_id = c->socket_id;
+ mt_info[index].core_id = info.log1_cid;
+ mt_info[index].thread_id = info.log1_tid;
+ mt_info[index].proc_fixed_addr = info.log2_la;
+ }
+}
diff --git a/arch/ia64/kernel/unwind.c b/arch/ia64/kernel/unwind.c
index d494ff647caca..2776a074c6f10 100644
--- a/arch/ia64/kernel/unwind.c
+++ b/arch/ia64/kernel/unwind.c
@@ -1943,23 +1943,30 @@ EXPORT_SYMBOL(unw_unwind);
int
unw_unwind_to_user (struct unw_frame_info *info)
{
- unsigned long ip, sp;
+ unsigned long ip, sp, pr = 0;
while (unw_unwind(info) >= 0) {
- if (unw_get_rp(info, &ip) < 0) {
- unw_get_ip(info, &ip);
- UNW_DPRINT(0, "unwind.%s: failed to read return pointer (ip=0x%lx)\n",
- __FUNCTION__, ip);
- return -1;
- }
unw_get_sp(info, &sp);
- if (sp >= (unsigned long)info->task + IA64_STK_OFFSET)
+ if ((long)((unsigned long)info->task + IA64_STK_OFFSET - sp)
+ < IA64_PT_REGS_SIZE) {
+ UNW_DPRINT(0, "unwind.%s: ran off the top of the kernel stack\n",
+ __FUNCTION__);
break;
- if (ip < FIXADDR_USER_END)
+ }
+ if (unw_is_intr_frame(info) &&
+ (pr & (1UL << PRED_USER_STACK)))
return 0;
+ if (unw_get_pr (info, &pr) < 0) {
+ unw_get_rp(info, &ip);
+ UNW_DPRINT(0, "unwind.%s: failed to read "
+ "predicate register (ip=0x%lx)\n",
+ __FUNCTION__, ip);
+ return -1;
+ }
}
unw_get_ip(info, &ip);
- UNW_DPRINT(0, "unwind.%s: failed to unwind to user-level (ip=0x%lx)\n", __FUNCTION__, ip);
+ UNW_DPRINT(0, "unwind.%s: failed to unwind to user-level (ip=0x%lx)\n",
+ __FUNCTION__, ip);
return -1;
}
EXPORT_SYMBOL(unw_unwind_to_user);
diff --git a/arch/ia64/lib/memcpy_mck.S b/arch/ia64/lib/memcpy_mck.S
index 6f26ef7cc2367..3c2cd2f04db96 100644
--- a/arch/ia64/lib/memcpy_mck.S
+++ b/arch/ia64/lib/memcpy_mck.S
@@ -300,7 +300,7 @@ EK(.ex_handler, (p[D]) st8 [dst1] = t15, 4*8)
add src_pre_mem=0,src0 // prefetch src pointer
add dst_pre_mem=0,dst0 // prefetch dest pointer
and src0=-8,src0 // 1st src pointer
-(p7) mov ar.lc = r21
+(p7) mov ar.lc = cnt
(p8) mov ar.lc = r0
;;
TEXT_ALIGN(32)
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 6daf15ac89405..91a055f5731f4 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -61,7 +61,8 @@ show_mem (void)
printk("%d reserved pages\n", reserved);
printk("%d pages shared\n", shared);
printk("%d pages swap cached\n", cached);
- printk("%ld pages in page table cache\n", pgtable_cache_size);
+ printk("%ld pages in page table cache\n",
+ pgtable_quicklist_total_size());
}
/* physical address where the bootmem map is located */
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 3456a9b6971ec..c00710929390d 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -582,7 +582,8 @@ void show_mem(void)
printk("%d reserved pages\n", total_reserved);
printk("%d pages shared\n", total_shared);
printk("%d pages swap cached\n", total_cached);
- printk("Total of %ld pages in page table cache\n", pgtable_cache_size);
+ printk("Total of %ld pages in page table cache\n",
+ pgtable_quicklist_total_size());
printk("%d free buffer pages\n", nr_free_buffer_pages());
}
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index da859125aaeff..4174ec999dde6 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -209,10 +209,13 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
}
no_context:
- if (isr & IA64_ISR_SP) {
+ if ((isr & IA64_ISR_SP)
+ || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
+ {
/*
- * This fault was due to a speculative load set the "ed" bit in the psr to
- * ensure forward progress (target register will get a NaT).
+ * This fault was due to a speculative load or lfetch.fault, set the "ed"
+ * bit in the psr to ensure forward progress. (Target register will get a
+ * NaT for ld.s, lfetch will be canceled.)
*/
ia64_psr(regs)->ed = 1;
return;
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 65cf839573ea8..547785e3cba2f 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -39,6 +39,9 @@
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
+DEFINE_PER_CPU(unsigned long *, __pgtable_quicklist);
+DEFINE_PER_CPU(long, __pgtable_quicklist_size);
+
extern void ia64_tlb_init (void);
unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
@@ -50,27 +53,53 @@ struct page *vmem_map;
EXPORT_SYMBOL(vmem_map);
#endif
-static int pgt_cache_water[2] = { 25, 50 };
-
-struct page *zero_page_memmap_ptr; /* map entry for zero page */
+struct page *zero_page_memmap_ptr; /* map entry for zero page */
EXPORT_SYMBOL(zero_page_memmap_ptr);
+#define MIN_PGT_PAGES 25UL
+#define MAX_PGT_FREES_PER_PASS 16L
+#define PGT_FRACTION_OF_NODE_MEM 16
+
+static inline long
+max_pgt_pages(void)
+{
+ u64 node_free_pages, max_pgt_pages;
+
+#ifndef CONFIG_NUMA
+ node_free_pages = nr_free_pages();
+#else
+ node_free_pages = nr_free_pages_pgdat(NODE_DATA(numa_node_id()));
+#endif
+ max_pgt_pages = node_free_pages / PGT_FRACTION_OF_NODE_MEM;
+ max_pgt_pages = max(max_pgt_pages, MIN_PGT_PAGES);
+ return max_pgt_pages;
+}
+
+static inline long
+min_pages_to_free(void)
+{
+ long pages_to_free;
+
+ pages_to_free = pgtable_quicklist_size - max_pgt_pages();
+ pages_to_free = min(pages_to_free, MAX_PGT_FREES_PER_PASS);
+ return pages_to_free;
+}
+
void
-check_pgt_cache (void)
+check_pgt_cache(void)
{
- int low, high;
+ long pages_to_free;
- low = pgt_cache_water[0];
- high = pgt_cache_water[1];
+ if (unlikely(pgtable_quicklist_size <= MIN_PGT_PAGES))
+ return;
preempt_disable();
- if (pgtable_cache_size > (u64) high) {
- do {
- if (pgd_quicklist)
- free_page((unsigned long)pgd_alloc_one_fast(NULL));
- if (pmd_quicklist)
- free_page((unsigned long)pmd_alloc_one_fast(NULL, 0));
- } while (pgtable_cache_size > (u64) low);
+ while (unlikely((pages_to_free = min_pages_to_free()) > 0)) {
+ while (pages_to_free--) {
+ free_page((unsigned long)pgtable_quicklist_alloc());
+ }
+ preempt_enable();
+ preempt_disable();
}
preempt_enable();
}
@@ -523,11 +552,14 @@ void
mem_init (void)
{
long reserved_pages, codesize, datasize, initsize;
- unsigned long num_pgt_pages;
pg_data_t *pgdat;
int i;
static struct kcore_list kcore_mem, kcore_vmem, kcore_kernel;
+ BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE);
+ BUG_ON(PTRS_PER_PMD * sizeof(pmd_t) != PAGE_SIZE);
+ BUG_ON(PTRS_PER_PTE * sizeof(pte_t) != PAGE_SIZE);
+
#ifdef CONFIG_PCI
/*
* This needs to be called _after_ the command line has been parsed but _before_
@@ -564,18 +596,6 @@ mem_init (void)
num_physpages << (PAGE_SHIFT - 10), codesize >> 10,
reserved_pages << (PAGE_SHIFT - 10), datasize >> 10, initsize >> 10);
- /*
- * Allow for enough (cached) page table pages so that we can map the entire memory
- * at least once. Each task also needs a couple of page tables pages, so add in a
- * fudge factor for that (don't use "threads-max" here; that would be wrong!).
- * Don't allow the cache to be more than 10% of total memory, though.
- */
-# define NUM_TASKS 500 /* typical number of tasks */
- num_pgt_pages = nr_free_pages() / PTRS_PER_PGD + NUM_TASKS;
- if (num_pgt_pages > nr_free_pages() / 10)
- num_pgt_pages = nr_free_pages() / 10;
- if (num_pgt_pages > (u64) pgt_cache_water[1])
- pgt_cache_water[1] = num_pgt_pages;
/*
* For fsyscall entrpoints with no light-weight handler, use the ordinary
diff --git a/arch/ia64/sn/include/pci/pcibr_provider.h b/arch/ia64/sn/include/pci/pcibr_provider.h
index b1f05ffec70be..1cd291d8badd0 100644
--- a/arch/ia64/sn/include/pci/pcibr_provider.h
+++ b/arch/ia64/sn/include/pci/pcibr_provider.h
@@ -123,9 +123,11 @@ pcibr_lock(struct pcibus_info *pcibus_info)
}
#define pcibr_unlock(pcibus_info, flag) spin_unlock_irqrestore(&pcibus_info->pbi_lock, flag)
+extern int pcibr_init_provider(void);
extern void *pcibr_bus_fixup(struct pcibus_bussoft *);
-extern uint64_t pcibr_dma_map(struct pcidev_info *, unsigned long, size_t, unsigned int);
-extern void pcibr_dma_unmap(struct pcidev_info *, dma_addr_t, int);
+extern dma_addr_t pcibr_dma_map(struct pci_dev *, unsigned long, size_t);
+extern dma_addr_t pcibr_dma_map_consistent(struct pci_dev *, unsigned long, size_t);
+extern void pcibr_dma_unmap(struct pci_dev *, dma_addr_t, int);
/*
* prototypes for the bridge asic register access routines in pcibr_reg.c
diff --git a/arch/ia64/sn/include/pci/pcibus_provider_defs.h b/arch/ia64/sn/include/pci/pcibus_provider_defs.h
deleted file mode 100644
index 07065615bbea7..0000000000000
--- a/arch/ia64/sn/include/pci/pcibus_provider_defs.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved.
- */
-#ifndef _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H
-#define _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H
-
-/*
- * SN pci asic types. Do not ever renumber these or reuse values. The
- * values must agree with what prom thinks they are.
- */
-
-#define PCIIO_ASIC_TYPE_UNKNOWN 0
-#define PCIIO_ASIC_TYPE_PPB 1
-#define PCIIO_ASIC_TYPE_PIC 2
-#define PCIIO_ASIC_TYPE_TIOCP 3
-
-/*
- * Common pciio bus provider data. There should be one of these as the
- * first field in any pciio based provider soft structure (e.g. pcibr_soft
- * tioca_soft, etc).
- */
-
-struct pcibus_bussoft {
- uint32_t bs_asic_type; /* chipset type */
- uint32_t bs_xid; /* xwidget id */
- uint64_t bs_persist_busnum; /* Persistent Bus Number */
- uint64_t bs_legacy_io; /* legacy io pio addr */
- uint64_t bs_legacy_mem; /* legacy mem pio addr */
- uint64_t bs_base; /* widget base */
- struct xwidget_info *bs_xwidget_info;
-};
-
-/*
- * DMA mapping flags
- */
-
-#define SN_PCIDMA_CONSISTENT 0x0001
-
-#endif /* _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H */
diff --git a/arch/ia64/sn/include/pci/pcidev.h b/arch/ia64/sn/include/pci/pcidev.h
deleted file mode 100644
index 81eb95d3bf47d..0000000000000
--- a/arch/ia64/sn/include/pci/pcidev.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved.
- */
-#ifndef _ASM_IA64_SN_PCI_PCIDEV_H
-#define _ASM_IA64_SN_PCI_PCIDEV_H
-
-#include <linux/pci.h>
-
-extern struct sn_irq_info **sn_irq;
-
-#define SN_PCIDEV_INFO(pci_dev) \
- ((struct pcidev_info *)(pci_dev)->sysdata)
-
-/*
- * Given a pci_bus, return the sn pcibus_bussoft struct. Note that
- * this only works for root busses, not for busses represented by PPB's.
- */
-
-#define SN_PCIBUS_BUSSOFT(pci_bus) \
- ((struct pcibus_bussoft *)(PCI_CONTROLLER((pci_bus))->platform_data))
-
-/*
- * Given a struct pci_dev, return the sn pcibus_bussoft struct. Note
- * that this is not equivalent to SN_PCIBUS_BUSSOFT(pci_dev->bus) due
- * due to possible PPB's in the path.
- */
-
-#define SN_PCIDEV_BUSSOFT(pci_dev) \
- (SN_PCIDEV_INFO(pci_dev)->pdi_host_pcidev_info->pdi_pcibus_info)
-
-#define PCIIO_BUS_NONE 255 /* bus 255 reserved */
-#define PCIIO_SLOT_NONE 255
-#define PCIIO_FUNC_NONE 255
-#define PCIIO_VENDOR_ID_NONE (-1)
-
-struct pcidev_info {
- uint64_t pdi_pio_mapped_addr[7]; /* 6 BARs PLUS 1 ROM */
- uint64_t pdi_slot_host_handle; /* Bus and devfn Host pci_dev */
-
- struct pcibus_bussoft *pdi_pcibus_info; /* Kernel common bus soft */
- struct pcidev_info *pdi_host_pcidev_info; /* Kernel Host pci_dev */
- struct pci_dev *pdi_linux_pcidev; /* Kernel pci_dev */
-
- struct sn_irq_info *pdi_sn_irq_info;
-};
-
-extern void sn_irq_fixup(struct pci_dev *pci_dev,
- struct sn_irq_info *sn_irq_info);
-
-#endif /* _ASM_IA64_SN_PCI_PCIDEV_H */
diff --git a/arch/ia64/sn/kernel/Makefile b/arch/ia64/sn/kernel/Makefile
index 6c7f4d9e8ea07..4f381fb25049b 100644
--- a/arch/ia64/sn/kernel/Makefile
+++ b/arch/ia64/sn/kernel/Makefile
@@ -10,3 +10,4 @@
obj-y += setup.o bte.o bte_error.o irq.o mca.o idle.o \
huberror.o io_init.o iomv.o klconflib.o sn2/
obj-$(CONFIG_IA64_GENERIC) += machvec.o
+obj-$(CONFIG_SGI_TIOCX) += tiocx.o
diff --git a/arch/ia64/sn/kernel/bte.c b/arch/ia64/sn/kernel/bte.c
index ce0bc4085eae1..647deae9bfcd5 100644
--- a/arch/ia64/sn/kernel/bte.c
+++ b/arch/ia64/sn/kernel/bte.c
@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
*/
#include <linux/config.h>
@@ -170,10 +170,6 @@ retry_bteop:
/* Initialize the notification to a known value. */
*bte->most_rcnt_na = BTE_WORD_BUSY;
- /* Set the status reg busy bit and transfer length */
- BTE_PRINTKV(("IBLS = 0x%lx\n", IBLS_BUSY | transfer_size));
- BTE_LNSTAT_STORE(bte, IBLS_BUSY | transfer_size);
-
/* Set the source and destination registers */
BTE_PRINTKV(("IBSA = 0x%lx)\n", (TO_PHYS(src))));
BTE_SRC_STORE(bte, TO_PHYS(src));
@@ -188,7 +184,7 @@ retry_bteop:
/* Initiate the transfer */
BTE_PRINTK(("IBCT = 0x%lx)\n", BTE_VALID_MODE(mode)));
- BTE_CTRL_STORE(bte, BTE_VALID_MODE(mode));
+ BTE_START_TRANSFER(bte, transfer_size, BTE_VALID_MODE(mode));
itc_end = ia64_get_itc() + (40000000 * local_cpu_data->cyc_per_usec);
@@ -429,10 +425,16 @@ void bte_init_node(nodepda_t * mynodepda, cnodeid_t cnode)
mynodepda->bte_recovery_timer.data = (unsigned long)mynodepda;
for (i = 0; i < BTES_PER_NODE; i++) {
+ u64 *base_addr;
+
/* Which link status register should we use? */
- unsigned long link_status = (i == 0 ? IIO_IBLS0 : IIO_IBLS1);
- mynodepda->bte_if[i].bte_base_addr = (u64 *)
- REMOTE_HUB_ADDR(cnodeid_to_nasid(cnode), link_status);
+ base_addr = (u64 *)
+ REMOTE_HUB_ADDR(cnodeid_to_nasid(cnode), BTE_BASE_ADDR(i));
+ mynodepda->bte_if[i].bte_base_addr = base_addr;
+ mynodepda->bte_if[i].bte_source_addr = BTE_SOURCE_ADDR(base_addr);
+ mynodepda->bte_if[i].bte_destination_addr = BTE_DEST_ADDR(base_addr);
+ mynodepda->bte_if[i].bte_control_addr = BTE_CTRL_ADDR(base_addr);
+ mynodepda->bte_if[i].bte_notify_addr = BTE_NOTIF_ADDR(base_addr);
/*
* Initialize the notification and spinlock
diff --git a/arch/ia64/sn/kernel/bte_error.c b/arch/ia64/sn/kernel/bte_error.c
index fd104312c6bdd..fcbc748ae4337 100644
--- a/arch/ia64/sn/kernel/bte_error.c
+++ b/arch/ia64/sn/kernel/bte_error.c
@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
*/
#include <linux/types.h>
@@ -33,48 +33,28 @@ void bte_error_handler(unsigned long);
* Wait until all BTE related CRBs are completed
* and then reset the interfaces.
*/
-void bte_error_handler(unsigned long _nodepda)
+void shub1_bte_error_handler(unsigned long _nodepda)
{
struct nodepda_s *err_nodepda = (struct nodepda_s *)_nodepda;
- spinlock_t *recovery_lock = &err_nodepda->bte_recovery_lock;
struct timer_list *recovery_timer = &err_nodepda->bte_recovery_timer;
nasid_t nasid;
int i;
int valid_crbs;
- unsigned long irq_flags;
- volatile u64 *notify;
- bte_result_t bh_error;
ii_imem_u_t imem; /* II IMEM Register */
ii_icrb0_d_u_t icrbd; /* II CRB Register D */
ii_ibcr_u_t ibcr;
ii_icmr_u_t icmr;
ii_ieclr_u_t ieclr;
- BTE_PRINTK(("bte_error_handler(%p) - %d\n", err_nodepda,
+ BTE_PRINTK(("shub1_bte_error_handler(%p) - %d\n", err_nodepda,
smp_processor_id()));
- spin_lock_irqsave(recovery_lock, irq_flags);
-
if ((err_nodepda->bte_if[0].bh_error == BTE_SUCCESS) &&
(err_nodepda->bte_if[1].bh_error == BTE_SUCCESS)) {
BTE_PRINTK(("eh:%p:%d Nothing to do.\n", err_nodepda,
smp_processor_id()));
- spin_unlock_irqrestore(recovery_lock, irq_flags);
return;
}
- /*
- * Lock all interfaces on this node to prevent new transfers
- * from being queued.
- */
- for (i = 0; i < BTES_PER_NODE; i++) {
- if (err_nodepda->bte_if[i].cleanup_active) {
- continue;
- }
- spin_lock(&err_nodepda->bte_if[i].spinlock);
- BTE_PRINTK(("eh:%p:%d locked %d\n", err_nodepda,
- smp_processor_id(), i));
- err_nodepda->bte_if[i].cleanup_active = 1;
- }
/* Determine information about our hub */
nasid = cnodeid_to_nasid(err_nodepda->bte_if[0].bte_cnode);
@@ -101,7 +81,6 @@ void bte_error_handler(unsigned long _nodepda)
mod_timer(recovery_timer, HZ * 5);
BTE_PRINTK(("eh:%p:%d Marked Giving up\n", err_nodepda,
smp_processor_id()));
- spin_unlock_irqrestore(recovery_lock, irq_flags);
return;
}
if (icmr.ii_icmr_fld_s.i_crb_vld != 0) {
@@ -120,8 +99,6 @@ void bte_error_handler(unsigned long _nodepda)
BTE_PRINTK(("eh:%p:%d Valid %d, Giving up\n",
err_nodepda, smp_processor_id(),
i));
- spin_unlock_irqrestore(recovery_lock,
- irq_flags);
return;
}
}
@@ -146,6 +123,51 @@ void bte_error_handler(unsigned long _nodepda)
ibcr.ii_ibcr_fld_s.i_soft_reset = 1;
REMOTE_HUB_S(nasid, IIO_IBCR, ibcr.ii_ibcr_regval);
+ del_timer(recovery_timer);
+}
+
+/*
+ * Wait until all BTE related CRBs are completed
+ * and then reset the interfaces.
+ */
+void bte_error_handler(unsigned long _nodepda)
+{
+ struct nodepda_s *err_nodepda = (struct nodepda_s *)_nodepda;
+ spinlock_t *recovery_lock = &err_nodepda->bte_recovery_lock;
+ int i;
+ nasid_t nasid;
+ unsigned long irq_flags;
+ volatile u64 *notify;
+ bte_result_t bh_error;
+
+ BTE_PRINTK(("bte_error_handler(%p) - %d\n", err_nodepda,
+ smp_processor_id()));
+
+ spin_lock_irqsave(recovery_lock, irq_flags);
+
+ /*
+ * Lock all interfaces on this node to prevent new transfers
+ * from being queued.
+ */
+ for (i = 0; i < BTES_PER_NODE; i++) {
+ if (err_nodepda->bte_if[i].cleanup_active) {
+ continue;
+ }
+ spin_lock(&err_nodepda->bte_if[i].spinlock);
+ BTE_PRINTK(("eh:%p:%d locked %d\n", err_nodepda,
+ smp_processor_id(), i));
+ err_nodepda->bte_if[i].cleanup_active = 1;
+ }
+
+ if (is_shub1()) {
+ shub1_bte_error_handler(_nodepda);
+ } else {
+ nasid = cnodeid_to_nasid(err_nodepda->bte_if[0].bte_cnode);
+
+ if (ia64_sn_bte_recovery(nasid))
+ panic("bte_error_handler(): Fatal BTE Error");
+ }
+
for (i = 0; i < BTES_PER_NODE; i++) {
bh_error = err_nodepda->bte_if[i].bh_error;
if (bh_error != BTE_SUCCESS) {
@@ -165,8 +187,6 @@ void bte_error_handler(unsigned long _nodepda)
spin_unlock(&err_nodepda->bte_if[i].spinlock);
}
- del_timer(recovery_timer);
-
spin_unlock_irqrestore(recovery_lock, irq_flags);
}
diff --git a/arch/ia64/sn/kernel/huberror.c b/arch/ia64/sn/kernel/huberror.c
index 2bdf684c50661..5c39b43ba3c02 100644
--- a/arch/ia64/sn/kernel/huberror.c
+++ b/arch/ia64/sn/kernel/huberror.c
@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 2000,2002-2004 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992 - 1997, 2000,2002-2005 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
@@ -38,8 +38,11 @@ static irqreturn_t hub_eint_handler(int irq, void *arg, struct pt_regs *ep)
if ((int)ret_stuff.v0)
panic("hubii_eint_handler(): Fatal TIO Error");
- if (!(nasid & 1)) /* Not a TIO, handle CRB errors */
- (void)hubiio_crb_error_handler(hubdev_info);
+ if (is_shub1()) {
+ if (!(nasid & 1)) /* Not a TIO, handle CRB errors */
+ (void)hubiio_crb_error_handler(hubdev_info);
+ } else
+ bte_error_handler((unsigned long)NODEPDA(nasid_to_cnodeid(nasid)));
return IRQ_HANDLED;
}
diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c
index 001880812b7c8..18160a06a8c9e 100644
--- a/arch/ia64/sn/kernel/io_init.c
+++ b/arch/ia64/sn/kernel/io_init.c
@@ -11,14 +11,15 @@
#include <asm/sn/types.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/addrs.h>
-#include "pci/pcibus_provider_defs.h"
-#include "pci/pcidev.h"
+#include <asm/sn/pcibus_provider_defs.h>
+#include <asm/sn/pcidev.h>
#include "pci/pcibr_provider.h"
#include "xtalk/xwidgetdev.h"
#include <asm/sn/geo.h>
#include "xtalk/hubdev.h"
#include <asm/sn/io.h>
#include <asm/sn/simulator.h>
+#include <asm/sn/tioca_provider.h>
char master_baseio_wid;
nasid_t master_nasid = INVALID_NASID; /* Partition Master */
@@ -34,6 +35,37 @@ struct brick {
int sn_ioif_inited = 0; /* SN I/O infrastructure initialized? */
+struct sn_pcibus_provider *sn_pci_provider[PCIIO_ASIC_MAX_TYPES]; /* indexed by asic type */
+
+/*
+ * Hooks and struct for unsupported pci providers
+ */
+
+static dma_addr_t
+sn_default_pci_map(struct pci_dev *pdev, unsigned long paddr, size_t size)
+{
+ return 0;
+}
+
+static void
+sn_default_pci_unmap(struct pci_dev *pdev, dma_addr_t addr, int direction)
+{
+ return;
+}
+
+static void *
+sn_default_pci_bus_fixup(struct pcibus_bussoft *soft)
+{
+ return NULL;
+}
+
+static struct sn_pcibus_provider sn_pci_default_provider = {
+ .dma_map = sn_default_pci_map,
+ .dma_map_consistent = sn_default_pci_map,
+ .dma_unmap = sn_default_pci_unmap,
+ .bus_fixup = sn_default_pci_bus_fixup,
+};
+
/*
* Retrieve the DMA Flush List given nasid. This list is needed
* to implement the WAR - Flush DMA data on PIO Reads.
@@ -201,6 +233,7 @@ static void sn_pci_fixup_slot(struct pci_dev *dev)
struct sn_irq_info *sn_irq_info;
struct pci_dev *host_pci_dev;
int status = 0;
+ struct pcibus_bussoft *bs;
dev->sysdata = kmalloc(sizeof(struct pcidev_info), GFP_KERNEL);
if (SN_PCIDEV_INFO(dev) <= 0)
@@ -241,6 +274,7 @@ static void sn_pci_fixup_slot(struct pci_dev *dev)
}
/* set up host bus linkages */
+ bs = SN_PCIBUS_BUSSOFT(dev->bus);
host_pci_dev =
pci_find_slot(SN_PCIDEV_INFO(dev)->pdi_slot_host_handle >> 32,
SN_PCIDEV_INFO(dev)->
@@ -248,10 +282,16 @@ static void sn_pci_fixup_slot(struct pci_dev *dev)
SN_PCIDEV_INFO(dev)->pdi_host_pcidev_info =
SN_PCIDEV_INFO(host_pci_dev);
SN_PCIDEV_INFO(dev)->pdi_linux_pcidev = dev;
- SN_PCIDEV_INFO(dev)->pdi_pcibus_info = SN_PCIBUS_BUSSOFT(dev->bus);
+ SN_PCIDEV_INFO(dev)->pdi_pcibus_info = bs;
+
+ if (bs && bs->bs_asic_type < PCIIO_ASIC_MAX_TYPES) {
+ SN_PCIDEV_BUSPROVIDER(dev) = sn_pci_provider[bs->bs_asic_type];
+ } else {
+ SN_PCIDEV_BUSPROVIDER(dev) = &sn_pci_default_provider;
+ }
/* Only set up IRQ stuff if this device has a host bus context */
- if (SN_PCIDEV_BUSSOFT(dev) && sn_irq_info->irq_irq) {
+ if (bs && sn_irq_info->irq_irq) {
SN_PCIDEV_INFO(dev)->pdi_sn_irq_info = sn_irq_info;
dev->irq = SN_PCIDEV_INFO(dev)->pdi_sn_irq_info->irq_irq;
sn_irq_fixup(dev, sn_irq_info);
@@ -271,6 +311,7 @@ static void sn_pci_controller_fixup(int segment, int busnum)
struct pcibus_bussoft *prom_bussoft_ptr;
struct hubdev_info *hubdev_info;
void *provider_soft;
+ struct sn_pcibus_provider *provider;
status =
sal_get_pcibus_info((u64) segment, (u64) busnum,
@@ -291,16 +332,22 @@ static void sn_pci_controller_fixup(int segment, int busnum)
/*
* Per-provider fixup. Copies the contents from prom to local
* area and links SN_PCIBUS_BUSSOFT().
- *
- * Note: Provider is responsible for ensuring that prom_bussoft_ptr
- * represents an asic-type that it can handle.
*/
- if (prom_bussoft_ptr->bs_asic_type == PCIIO_ASIC_TYPE_PPB) {
- return; /* no further fixup necessary */
+ if (prom_bussoft_ptr->bs_asic_type >= PCIIO_ASIC_MAX_TYPES) {
+ return; /* unsupported asic type */
+ }
+
+ provider = sn_pci_provider[prom_bussoft_ptr->bs_asic_type];
+ if (provider == NULL) {
+ return; /* no provider registerd for this asic */
+ }
+
+ provider_soft = NULL;
+ if (provider->bus_fixup) {
+ provider_soft = (*provider->bus_fixup) (prom_bussoft_ptr);
}
- provider_soft = pcibr_bus_fixup(prom_bussoft_ptr);
if (provider_soft == NULL) {
return; /* fixup failed or not applicable */
}
@@ -339,6 +386,17 @@ static int __init sn_pci_init(void)
return 0;
/*
+ * prime sn_pci_provider[]. Individial provider init routines will
+ * override their respective default entries.
+ */
+
+ for (i = 0; i < PCIIO_ASIC_MAX_TYPES; i++)
+ sn_pci_provider[i] = &sn_pci_default_provider;
+
+ pcibr_init_provider();
+ tioca_init_provider();
+
+ /*
* This is needed to avoid bounce limit checks in the blk layer
*/
ia64_max_iommu_merge_mask = ~PAGE_MASK;
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c
index 3be44724f6c8f..0f4e8138658f2 100644
--- a/arch/ia64/sn/kernel/irq.c
+++ b/arch/ia64/sn/kernel/irq.c
@@ -13,8 +13,8 @@
#include <asm/sn/addrs.h>
#include <asm/sn/arch.h>
#include "xtalk/xwidgetdev.h"
-#include "pci/pcibus_provider_defs.h"
-#include "pci/pcidev.h"
+#include <asm/sn/pcibus_provider_defs.h>
+#include <asm/sn/pcidev.h>
#include "pci/pcibr_provider.h"
#include <asm/sn/shub_mmr.h>
#include <asm/sn/sn_sal.h>
@@ -82,20 +82,9 @@ static void sn_ack_irq(unsigned int irq)
nasid = get_nasid();
event_occurred =
HUB_L((uint64_t *) GLOBAL_MMR_ADDR(nasid, SH_EVENT_OCCURRED));
- if (event_occurred & SH_EVENT_OCCURRED_UART_INT_MASK) {
- mask |= (1 << SH_EVENT_OCCURRED_UART_INT_SHFT);
- }
- if (event_occurred & SH_EVENT_OCCURRED_IPI_INT_MASK) {
- mask |= (1 << SH_EVENT_OCCURRED_IPI_INT_SHFT);
- }
- if (event_occurred & SH_EVENT_OCCURRED_II_INT0_MASK) {
- mask |= (1 << SH_EVENT_OCCURRED_II_INT0_SHFT);
- }
- if (event_occurred & SH_EVENT_OCCURRED_II_INT1_MASK) {
- mask |= (1 << SH_EVENT_OCCURRED_II_INT1_SHFT);
- }
+ mask = event_occurred & SH_ALL_INT_MASK;
HUB_S((uint64_t *) GLOBAL_MMR_ADDR(nasid, SH_EVENT_OCCURRED_ALIAS),
- mask);
+ mask);
__set_bit(irq, (volatile void *)pda->sn_in_service_ivecs);
move_irq(irq);
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c
index f0306b516afba..d35f2a6f9c948 100644
--- a/arch/ia64/sn/kernel/setup.c
+++ b/arch/ia64/sn/kernel/setup.c
@@ -29,6 +29,7 @@
#include <linux/sched.h>
#include <linux/root_dev.h>
#include <linux/nodemask.h>
+#include <linux/pm.h>
#include <asm/io.h>
#include <asm/sal.h>
@@ -353,6 +354,14 @@ void __init sn_setup(char **cmdline_p)
screen_info = sn_screen_info;
sn_timer_init();
+
+ /*
+ * set pm_power_off to a SAL call to allow
+ * sn machines to power off. The SAL call can be replaced
+ * by an ACPI interface call when ACPI is fully implemented
+ * for sn.
+ */
+ pm_power_off = ia64_sn_power_down;
}
/**
diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
index 197356460ee15..833e700fdac93 100644
--- a/arch/ia64/sn/kernel/sn2/sn_hwperf.c
+++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
@@ -28,6 +28,7 @@
#include <linux/vmalloc.h>
#include <linux/seq_file.h>
#include <linux/miscdevice.h>
+#include <linux/utsname.h>
#include <linux/cpumask.h>
#include <linux/smp_lock.h>
#include <linux/nodemask.h>
@@ -43,6 +44,7 @@
#include <asm/sn/module.h>
#include <asm/sn/geo.h>
#include <asm/sn/sn2/sn_hwperf.h>
+#include <asm/sn/addrs.h>
static void *sn_hwperf_salheap = NULL;
static int sn_hwperf_obj_cnt = 0;
@@ -81,26 +83,45 @@ out:
return e;
}
+static int sn_hwperf_location_to_bpos(char *location,
+ int *rack, int *bay, int *slot, int *slab)
+{
+ char type;
+
+ /* first scan for an old style geoid string */
+ if (sscanf(location, "%03d%c%02d#%d",
+ rack, &type, bay, slab) == 4)
+ *slot = 0;
+ else /* scan for a new bladed geoid string */
+ if (sscanf(location, "%03d%c%02d^%02d#%d",
+ rack, &type, bay, slot, slab) != 5)
+ return -1;
+ /* success */
+ return 0;
+}
+
static int sn_hwperf_geoid_to_cnode(char *location)
{
int cnode;
geoid_t geoid;
moduleid_t module_id;
- char type;
- int rack, slot, slab;
- int this_rack, this_slot, this_slab;
+ int rack, bay, slot, slab;
+ int this_rack, this_bay, this_slot, this_slab;
- if (sscanf(location, "%03d%c%02d#%d", &rack, &type, &slot, &slab) != 4)
+ if (sn_hwperf_location_to_bpos(location, &rack, &bay, &slot, &slab))
return -1;
for (cnode = 0; cnode < numionodes; cnode++) {
geoid = cnodeid_get_geoid(cnode);
module_id = geo_module(geoid);
this_rack = MODULE_GET_RACK(module_id);
- this_slot = MODULE_GET_BPOS(module_id);
+ this_bay = MODULE_GET_BPOS(module_id);
+ this_slot = geo_slot(geoid);
this_slab = geo_slab(geoid);
- if (rack == this_rack && slot == this_slot && slab == this_slab)
+ if (rack == this_rack && bay == this_bay &&
+ slot == this_slot && slab == this_slab) {
break;
+ }
}
return cnode < numionodes ? cnode : -1;
@@ -153,11 +174,36 @@ static const char *sn_hwperf_get_slabname(struct sn_hwperf_object_info *obj,
return slabname;
}
+static void print_pci_topology(struct seq_file *s,
+ struct sn_hwperf_object_info *obj, int *ordinal,
+ u64 rack, u64 bay, u64 slot, u64 slab)
+{
+ char *p1;
+ char *p2;
+ char *pg;
+
+ if (!(pg = (char *)get_zeroed_page(GFP_KERNEL)))
+ return; /* ignore */
+ if (ia64_sn_ioif_get_pci_topology(rack, bay, slot, slab,
+ __pa(pg), PAGE_SIZE) == SN_HWPERF_OP_OK) {
+ for (p1=pg; *p1 && p1 < pg + PAGE_SIZE;) {
+ if (!(p2 = strchr(p1, '\n')))
+ break;
+ *p2 = '\0';
+ seq_printf(s, "pcibus %d %s-%s\n",
+ *ordinal, obj->location, p1);
+ (*ordinal)++;
+ p1 = p2 + 1;
+ }
+ }
+ free_page((unsigned long)pg);
+}
+
static int sn_topology_show(struct seq_file *s, void *d)
{
int sz;
int pt;
- int e;
+ int e = 0;
int i;
int j;
const char *slabname;
@@ -169,11 +215,44 @@ static int sn_topology_show(struct seq_file *s, void *d)
struct sn_hwperf_object_info *p;
struct sn_hwperf_object_info *obj = d; /* this object */
struct sn_hwperf_object_info *objs = s->private; /* all objects */
+ int rack, bay, slot, slab;
+ u8 shubtype;
+ u8 system_size;
+ u8 sharing_size;
+ u8 partid;
+ u8 coher;
+ u8 nasid_shift;
+ u8 region_size;
+ u16 nasid_mask;
+ int nasid_msb;
+ int pci_bus_ordinal = 0;
if (obj == objs) {
- seq_printf(s, "# sn_topology version 1\n");
+ seq_printf(s, "# sn_topology version 2\n");
seq_printf(s, "# objtype ordinal location partition"
" [attribute value [, ...]]\n");
+
+ if (ia64_sn_get_sn_info(0,
+ &shubtype, &nasid_mask, &nasid_shift, &system_size,
+ &sharing_size, &partid, &coher, &region_size))
+ BUG();
+ for (nasid_msb=63; nasid_msb > 0; nasid_msb--) {
+ if (((u64)nasid_mask << nasid_shift) & (1ULL << nasid_msb))
+ break;
+ }
+ seq_printf(s, "partition %u %s local "
+ "shubtype %s, "
+ "nasid_mask 0x%016lx, "
+ "nasid_bits %d:%d, "
+ "system_size %d, "
+ "sharing_size %d, "
+ "coherency_domain %d, "
+ "region_size %d\n",
+
+ partid, system_utsname.nodename,
+ shubtype ? "shub2" : "shub1",
+ (u64)nasid_mask << nasid_shift, nasid_msb, nasid_shift,
+ system_size, sharing_size, coher, region_size);
}
if (SN_HWPERF_FOREIGN(obj)) {
@@ -181,7 +260,7 @@ static int sn_topology_show(struct seq_file *s, void *d)
return 0;
}
- for (i = 0; obj->name[i]; i++) {
+ for (i = 0; i < SN_HWPERF_MAXSTRING && obj->name[i]; i++) {
if (obj->name[i] == ' ')
obj->name[i] = '_';
}
@@ -221,6 +300,17 @@ static int sn_topology_show(struct seq_file *s, void *d)
seq_putc(s, '\n');
}
}
+
+ /*
+ * PCI busses attached to this node, if any
+ */
+ if (sn_hwperf_location_to_bpos(obj->location,
+ &rack, &bay, &slot, &slab)) {
+ /* export pci bus info */
+ print_pci_topology(s, obj, &pci_bus_ordinal,
+ rack, bay, slot, slab);
+
+ }
}
if (obj->ports) {
@@ -397,6 +487,9 @@ static int sn_hwperf_map_err(int hwperf_err)
break;
case SN_HWPERF_OP_BUSY:
+ e = -EBUSY;
+ break;
+
case SN_HWPERF_OP_RECONFIGURE:
e = -EAGAIN;
break;
@@ -549,6 +642,7 @@ sn_hwperf_ioctl(struct inode *in, struct file *fp, u32 op, u64 arg)
r = sn_hwperf_op_cpu(&op_info);
if (r) {
r = sn_hwperf_map_err(r);
+ a.v0 = v0;
goto error;
}
break;
diff --git a/arch/ia64/sn/kernel/tiocx.c b/arch/ia64/sn/kernel/tiocx.c
new file mode 100644
index 0000000000000..66190d7e492d6
--- /dev/null
+++ b/arch/ia64/sn/kernel/tiocx.c
@@ -0,0 +1,548 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2005 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/proc_fs.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <asm/uaccess.h>
+#include <asm/sn/sn_sal.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/io.h>
+#include <asm/sn/types.h>
+#include <asm/sn/shubio.h>
+#include <asm/sn/tiocx.h>
+#include "tio.h"
+#include "xtalk/xwidgetdev.h"
+#include "xtalk/hubdev.h"
+
+#define CX_DEV_NONE 0
+#define DEVICE_NAME "tiocx"
+#define WIDGET_ID 0
+#define TIOCX_DEBUG 0
+
+#if TIOCX_DEBUG
+#define DBG(fmt...) printk(KERN_ALERT fmt)
+#else
+#define DBG(fmt...)
+#endif
+
+struct device_attribute dev_attr_cxdev_control;
+
+/**
+ * tiocx_match - Try to match driver id list with device.
+ * @dev: device pointer
+ * @drv: driver pointer
+ *
+ * Returns 1 if match, 0 otherwise.
+ */
+static int tiocx_match(struct device *dev, struct device_driver *drv)
+{
+ struct cx_dev *cx_dev = to_cx_dev(dev);
+ struct cx_drv *cx_drv = to_cx_driver(drv);
+ const struct cx_device_id *ids = cx_drv->id_table;
+
+ if (!ids)
+ return 0;
+
+ while (ids->part_num) {
+ if (ids->part_num == cx_dev->cx_id.part_num)
+ return 1;
+ ids++;
+ }
+ return 0;
+
+}
+
+static int tiocx_hotplug(struct device *dev, char **envp, int num_envp,
+ char *buffer, int buffer_size)
+{
+ return -ENODEV;
+}
+
+static void tiocx_bus_release(struct device *dev)
+{
+ kfree(to_cx_dev(dev));
+}
+
+struct bus_type tiocx_bus_type = {
+ .name = "tiocx",
+ .match = tiocx_match,
+ .hotplug = tiocx_hotplug,
+};
+
+/**
+ * cx_device_match - Find cx_device in the id table.
+ * @ids: id table from driver
+ * @cx_device: part/mfg id for the device
+ *
+ */
+static const struct cx_device_id *cx_device_match(const struct cx_device_id
+ *ids,
+ struct cx_dev *cx_device)
+{
+ /*
+ * NOTES: We may want to check for CX_ANY_ID too.
+ * Do we want to match against nasid too?
+ * CX_DEV_NONE == 0, if the driver tries to register for
+ * part/mfg == 0 we should return no-match (NULL) here.
+ */
+ while (ids->part_num && ids->mfg_num) {
+ if (ids->part_num == cx_device->cx_id.part_num &&
+ ids->mfg_num == cx_device->cx_id.mfg_num)
+ return ids;
+ ids++;
+ }
+
+ return NULL;
+}
+
+/**
+ * cx_device_probe - Look for matching device.
+ * Call driver probe routine if found.
+ * @cx_driver: driver table (cx_drv struct) from driver
+ * @cx_device: part/mfg id for the device
+ */
+static int cx_device_probe(struct device *dev)
+{
+ const struct cx_device_id *id;
+ struct cx_drv *cx_drv = to_cx_driver(dev->driver);
+ struct cx_dev *cx_dev = to_cx_dev(dev);
+ int error = 0;
+
+ if (!cx_dev->driver && cx_drv->probe) {
+ id = cx_device_match(cx_drv->id_table, cx_dev);
+ if (id) {
+ if ((error = cx_drv->probe(cx_dev, id)) < 0)
+ return error;
+ else
+ cx_dev->driver = cx_drv;
+ }
+ }
+
+ return error;
+}
+
+/**
+ * cx_driver_remove - Remove driver from device struct.
+ * @dev: device
+ */
+static int cx_driver_remove(struct device *dev)
+{
+ struct cx_dev *cx_dev = to_cx_dev(dev);
+ struct cx_drv *cx_drv = cx_dev->driver;
+ if (cx_drv->remove)
+ cx_drv->remove(cx_dev);
+ cx_dev->driver = NULL;
+ return 0;
+}
+
+/**
+ * cx_driver_register - Register the driver.
+ * @cx_driver: driver table (cx_drv struct) from driver
+ *
+ * Called from the driver init routine to register a driver.
+ * The cx_drv struct contains the driver name, a pointer to
+ * a table of part/mfg numbers and a pointer to the driver's
+ * probe/attach routine.
+ */
+int cx_driver_register(struct cx_drv *cx_driver)
+{
+ cx_driver->driver.name = cx_driver->name;
+ cx_driver->driver.bus = &tiocx_bus_type;
+ cx_driver->driver.probe = cx_device_probe;
+ cx_driver->driver.remove = cx_driver_remove;
+
+ return driver_register(&cx_driver->driver);
+}
+
+/**
+ * cx_driver_unregister - Unregister the driver.
+ * @cx_driver: driver table (cx_drv struct) from driver
+ */
+int cx_driver_unregister(struct cx_drv *cx_driver)
+{
+ driver_unregister(&cx_driver->driver);
+ return 0;
+}
+
+/**
+ * cx_device_register - Register a device.
+ * @nasid: device's nasid
+ * @part_num: device's part number
+ * @mfg_num: device's manufacturer number
+ * @hubdev: hub info associated with this device
+ *
+ */
+int
+cx_device_register(nasid_t nasid, int part_num, int mfg_num,
+ struct hubdev_info *hubdev)
+{
+ struct cx_dev *cx_dev;
+
+ cx_dev = kcalloc(1, sizeof(struct cx_dev), GFP_KERNEL);
+ DBG("cx_dev= 0x%p\n", cx_dev);
+ if (cx_dev == NULL)
+ return -ENOMEM;
+
+ cx_dev->cx_id.part_num = part_num;
+ cx_dev->cx_id.mfg_num = mfg_num;
+ cx_dev->cx_id.nasid = nasid;
+ cx_dev->hubdev = hubdev;
+
+ cx_dev->dev.parent = NULL;
+ cx_dev->dev.bus = &tiocx_bus_type;
+ cx_dev->dev.release = tiocx_bus_release;
+ snprintf(cx_dev->dev.bus_id, BUS_ID_SIZE, "%d.0x%x",
+ cx_dev->cx_id.nasid, cx_dev->cx_id.part_num);
+ device_register(&cx_dev->dev);
+ get_device(&cx_dev->dev);
+
+ device_create_file(&cx_dev->dev, &dev_attr_cxdev_control);
+
+ return 0;
+}
+
+/**
+ * cx_device_unregister - Unregister a device.
+ * @cx_dev: part/mfg id for the device
+ */
+int cx_device_unregister(struct cx_dev *cx_dev)
+{
+ put_device(&cx_dev->dev);
+ device_unregister(&cx_dev->dev);
+ return 0;
+}
+
+/**
+ * cx_device_reload - Reload the device.
+ * @nasid: device's nasid
+ * @part_num: device's part number
+ * @mfg_num: device's manufacturer number
+ *
+ * Remove the device associated with 'nasid' from device list and then
+ * call device-register with the given part/mfg numbers.
+ */
+static int cx_device_reload(struct cx_dev *cx_dev)
+{
+ device_remove_file(&cx_dev->dev, &dev_attr_cxdev_control);
+ cx_device_unregister(cx_dev);
+ return cx_device_register(cx_dev->cx_id.nasid, cx_dev->cx_id.part_num,
+ cx_dev->cx_id.mfg_num, cx_dev->hubdev);
+}
+
+static inline uint64_t tiocx_intr_alloc(nasid_t nasid, int widget,
+ u64 sn_irq_info,
+ int req_irq, nasid_t req_nasid,
+ int req_slice)
+{
+ struct ia64_sal_retval rv;
+ rv.status = 0;
+ rv.v0 = 0;
+
+ ia64_sal_oemcall_nolock(&rv, SN_SAL_IOIF_INTERRUPT,
+ SAL_INTR_ALLOC, nasid,
+ widget, sn_irq_info, req_irq,
+ req_nasid, req_slice);
+ return rv.status;
+}
+
+static inline void tiocx_intr_free(nasid_t nasid, int widget,
+ struct sn_irq_info *sn_irq_info)
+{
+ struct ia64_sal_retval rv;
+ rv.status = 0;
+ rv.v0 = 0;
+
+ ia64_sal_oemcall_nolock(&rv, SN_SAL_IOIF_INTERRUPT,
+ SAL_INTR_FREE, nasid,
+ widget, sn_irq_info->irq_irq,
+ sn_irq_info->irq_cookie, 0, 0);
+}
+
+struct sn_irq_info *tiocx_irq_alloc(nasid_t nasid, int widget, int irq,
+ nasid_t req_nasid, int slice)
+{
+ struct sn_irq_info *sn_irq_info;
+ int status;
+ int sn_irq_size = sizeof(struct sn_irq_info);
+
+ if ((nasid & 1) == 0)
+ return NULL;
+
+ sn_irq_info = kmalloc(sn_irq_size, GFP_KERNEL);
+ if (sn_irq_info == NULL)
+ return NULL;
+
+ memset(sn_irq_info, 0x0, sn_irq_size);
+
+ status = tiocx_intr_alloc(nasid, widget, __pa(sn_irq_info), irq,
+ req_nasid, slice);
+ if (status) {
+ kfree(sn_irq_info);
+ return NULL;
+ } else {
+ return sn_irq_info;
+ }
+}
+
+void tiocx_irq_free(struct sn_irq_info *sn_irq_info)
+{
+ uint64_t bridge = (uint64_t) sn_irq_info->irq_bridge;
+ nasid_t nasid = NASID_GET(bridge);
+ int widget;
+
+ if (nasid & 1) {
+ widget = TIO_SWIN_WIDGETNUM(bridge);
+ tiocx_intr_free(nasid, widget, sn_irq_info);
+ kfree(sn_irq_info);
+ }
+}
+
+uint64_t
+tiocx_dma_addr(uint64_t addr)
+{
+ return PHYS_TO_TIODMA(addr);
+}
+
+uint64_t
+tiocx_swin_base(int nasid)
+{
+ return TIO_SWIN_BASE(nasid, TIOCX_CORELET);
+}
+
+EXPORT_SYMBOL(cx_driver_register);
+EXPORT_SYMBOL(cx_driver_unregister);
+EXPORT_SYMBOL(cx_device_register);
+EXPORT_SYMBOL(cx_device_unregister);
+EXPORT_SYMBOL(tiocx_irq_alloc);
+EXPORT_SYMBOL(tiocx_irq_free);
+EXPORT_SYMBOL(tiocx_bus_type);
+EXPORT_SYMBOL(tiocx_dma_addr);
+EXPORT_SYMBOL(tiocx_swin_base);
+
+static uint64_t tiocx_get_hubdev_info(u64 handle, u64 address)
+{
+
+ struct ia64_sal_retval ret_stuff;
+ ret_stuff.status = 0;
+ ret_stuff.v0 = 0;
+
+ ia64_sal_oemcall_nolock(&ret_stuff,
+ SN_SAL_IOIF_GET_HUBDEV_INFO,
+ handle, address, 0, 0, 0, 0, 0);
+ return ret_stuff.v0;
+}
+
+static void tio_conveyor_set(nasid_t nasid, int enable_flag)
+{
+ uint64_t ice_frz;
+ uint64_t disable_cb = (1ull << 61);
+
+ if (!(nasid & 1))
+ return;
+
+ ice_frz = REMOTE_HUB_L(nasid, TIO_ICE_FRZ_CFG);
+ if (enable_flag) {
+ if (!(ice_frz & disable_cb)) /* already enabled */
+ return;
+ ice_frz &= ~disable_cb;
+ } else {
+ if (ice_frz & disable_cb) /* already disabled */
+ return;
+ ice_frz |= disable_cb;
+ }
+ DBG(KERN_ALERT "TIO_ICE_FRZ_CFG= 0x%lx\n", ice_frz);
+ REMOTE_HUB_S(nasid, TIO_ICE_FRZ_CFG, ice_frz);
+}
+
+#define tio_conveyor_enable(nasid) tio_conveyor_set(nasid, 1)
+#define tio_conveyor_disable(nasid) tio_conveyor_set(nasid, 0)
+
+static void tio_corelet_reset(nasid_t nasid, int corelet)
+{
+ if (!(nasid & 1))
+ return;
+
+ REMOTE_HUB_S(nasid, TIO_ICE_PMI_TX_CFG, 1 << corelet);
+ udelay(2000);
+ REMOTE_HUB_S(nasid, TIO_ICE_PMI_TX_CFG, 0);
+ udelay(2000);
+}
+
+static int fpga_attached(nasid_t nasid)
+{
+ uint64_t cx_credits;
+
+ cx_credits = REMOTE_HUB_L(nasid, TIO_ICE_PMI_TX_DYN_CREDIT_STAT_CB3);
+ cx_credits &= TIO_ICE_PMI_TX_DYN_CREDIT_STAT_CB3_CREDIT_CNT_MASK;
+ DBG("cx_credits= 0x%lx\n", cx_credits);
+
+ return (cx_credits == 0xf) ? 1 : 0;
+}
+
+static int tiocx_reload(struct cx_dev *cx_dev)
+{
+ int part_num = CX_DEV_NONE;
+ int mfg_num = CX_DEV_NONE;
+ nasid_t nasid = cx_dev->cx_id.nasid;
+
+ if (fpga_attached(nasid)) {
+ uint64_t cx_id;
+
+ cx_id =
+ *(volatile int32_t *)(TIO_SWIN_BASE(nasid, TIOCX_CORELET) +
+ WIDGET_ID);
+ part_num = XWIDGET_PART_NUM(cx_id);
+ mfg_num = XWIDGET_MFG_NUM(cx_id);
+ DBG("part= 0x%x, mfg= 0x%x\n", part_num, mfg_num);
+ /* just ignore it if it's a CE */
+ if (part_num == TIO_CE_ASIC_PARTNUM)
+ return 0;
+ }
+
+ cx_dev->cx_id.part_num = part_num;
+ cx_dev->cx_id.mfg_num = mfg_num;
+
+ /*
+ * Delete old device and register the new one. It's ok if
+ * part_num/mfg_num == CX_DEV_NONE. We want to register
+ * devices in the table even if a bitstream isn't loaded.
+ * That allows use to see that a bitstream isn't loaded via
+ * TIOCX_IOCTL_DEV_LIST.
+ */
+ return cx_device_reload(cx_dev);
+}
+
+static ssize_t show_cxdev_control(struct device *dev, char *buf)
+{
+ struct cx_dev *cx_dev = to_cx_dev(dev);
+
+ return sprintf(buf, "0x%x 0x%x 0x%x\n",
+ cx_dev->cx_id.nasid,
+ cx_dev->cx_id.part_num, cx_dev->cx_id.mfg_num);
+}
+
+static ssize_t store_cxdev_control(struct device *dev, const char *buf,
+ size_t count)
+{
+ int n;
+ struct cx_dev *cx_dev = to_cx_dev(dev);
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (count <= 0)
+ return 0;
+
+ n = simple_strtoul(buf, NULL, 0);
+
+ switch (n) {
+ case 1:
+ tiocx_reload(cx_dev);
+ break;
+ case 3:
+ tio_corelet_reset(cx_dev->cx_id.nasid, TIOCX_CORELET);
+ break;
+ default:
+ break;
+ }
+
+ return count;
+}
+
+DEVICE_ATTR(cxdev_control, 0644, show_cxdev_control, store_cxdev_control);
+
+static int __init tiocx_init(void)
+{
+ cnodeid_t cnodeid;
+ int found_tiocx_device = 0;
+
+ bus_register(&tiocx_bus_type);
+
+ for (cnodeid = 0; cnodeid < MAX_COMPACT_NODES; cnodeid++) {
+ nasid_t nasid;
+
+ if ((nasid = cnodeid_to_nasid(cnodeid)) < 0)
+ break; /* No more nasids .. bail out of loop */
+
+ if (nasid & 0x1) { /* TIO's are always odd */
+ struct hubdev_info *hubdev;
+ uint64_t status;
+ struct xwidget_info *widgetp;
+
+ DBG("Found TIO at nasid 0x%x\n", nasid);
+
+ hubdev =
+ (struct hubdev_info *)(NODEPDA(cnodeid)->pdinfo);
+ status =
+ tiocx_get_hubdev_info(nasid,
+ (uint64_t) __pa(hubdev));
+ if (status)
+ continue;
+
+ widgetp = &hubdev->hdi_xwidget_info[TIOCX_CORELET];
+
+ /* The CE hangs off of the CX port but is not an FPGA */
+ if (widgetp->xwi_hwid.part_num == TIO_CE_ASIC_PARTNUM)
+ continue;
+
+ tio_corelet_reset(nasid, TIOCX_CORELET);
+ tio_conveyor_enable(nasid);
+
+ if (cx_device_register
+ (nasid, widgetp->xwi_hwid.part_num,
+ widgetp->xwi_hwid.mfg_num, hubdev) < 0)
+ return -ENXIO;
+ else
+ found_tiocx_device++;
+ }
+ }
+
+ /* It's ok if we find zero devices. */
+ DBG("found_tiocx_device= %d\n", found_tiocx_device);
+
+ return 0;
+}
+
+static void __exit tiocx_exit(void)
+{
+ struct device *dev;
+ struct device *tdev;
+
+ DBG("tiocx_exit\n");
+
+ /*
+ * Unregister devices.
+ */
+ list_for_each_entry_safe(dev, tdev, &tiocx_bus_type.devices.list,
+ bus_list) {
+ if (dev) {
+ struct cx_dev *cx_dev = to_cx_dev(dev);
+ device_remove_file(dev, &dev_attr_cxdev_control);
+ cx_device_unregister(cx_dev);
+ }
+ }
+
+ bus_unregister(&tiocx_bus_type);
+}
+
+module_init(tiocx_init);
+module_exit(tiocx_exit);
+
+/************************************************************************
+ * Module licensing and description
+ ************************************************************************/
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Bruce Losure <blosure@sgi.com>");
+MODULE_DESCRIPTION("TIOCX module");
+MODULE_SUPPORTED_DEVICE(DEVICE_NAME);
diff --git a/arch/ia64/sn/pci/Makefile b/arch/ia64/sn/pci/Makefile
index b5dca0097a8e7..2f915bce25f9e 100644
--- a/arch/ia64/sn/pci/Makefile
+++ b/arch/ia64/sn/pci/Makefile
@@ -7,4 +7,4 @@
#
# Makefile for the sn pci general routines.
-obj-y := pci_dma.o pcibr/
+obj-y := pci_dma.o tioca_provider.o pcibr/
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
index f680824f819d5..5da9bdbde7cb8 100644
--- a/arch/ia64/sn/pci/pci_dma.c
+++ b/arch/ia64/sn/pci/pci_dma.c
@@ -12,9 +12,8 @@
#include <linux/module.h>
#include <asm/dma.h>
#include <asm/sn/sn_sal.h>
-#include "pci/pcibus_provider_defs.h"
-#include "pci/pcidev.h"
-#include "pci/pcibr_provider.h"
+#include <asm/sn/pcibus_provider_defs.h>
+#include <asm/sn/pcidev.h>
#define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset)
#define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG))
@@ -79,7 +78,8 @@ void *sn_dma_alloc_coherent(struct device *dev, size_t size,
{
void *cpuaddr;
unsigned long phys_addr;
- struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev));
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
BUG_ON(dev->bus != &pci_bus_type);
@@ -102,8 +102,7 @@ void *sn_dma_alloc_coherent(struct device *dev, size_t size,
* resources.
*/
- *dma_handle = pcibr_dma_map(pcidev_info, phys_addr, size,
- SN_PCIDMA_CONSISTENT);
+ *dma_handle = provider->dma_map_consistent(pdev, phys_addr, size);
if (!*dma_handle) {
printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__);
free_pages((unsigned long)cpuaddr, get_order(size));
@@ -127,11 +126,12 @@ EXPORT_SYMBOL(sn_dma_alloc_coherent);
void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_handle)
{
- struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev));
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
BUG_ON(dev->bus != &pci_bus_type);
- pcibr_dma_unmap(pcidev_info, dma_handle, 0);
+ provider->dma_unmap(pdev, dma_handle, 0);
free_pages((unsigned long)cpu_addr, get_order(size));
}
EXPORT_SYMBOL(sn_dma_free_coherent);
@@ -159,12 +159,13 @@ dma_addr_t sn_dma_map_single(struct device *dev, void *cpu_addr, size_t size,
{
dma_addr_t dma_addr;
unsigned long phys_addr;
- struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev));
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
BUG_ON(dev->bus != &pci_bus_type);
phys_addr = __pa(cpu_addr);
- dma_addr = pcibr_dma_map(pcidev_info, phys_addr, size, 0);
+ dma_addr = provider->dma_map(pdev, phys_addr, size);
if (!dma_addr) {
printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__);
return 0;
@@ -187,10 +188,12 @@ EXPORT_SYMBOL(sn_dma_map_single);
void sn_dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
int direction)
{
- struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev));
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
BUG_ON(dev->bus != &pci_bus_type);
- pcibr_dma_unmap(pcidev_info, dma_addr, direction);
+
+ provider->dma_unmap(pdev, dma_addr, direction);
}
EXPORT_SYMBOL(sn_dma_unmap_single);
@@ -207,12 +210,13 @@ void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
int nhwentries, int direction)
{
int i;
- struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev));
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
BUG_ON(dev->bus != &pci_bus_type);
for (i = 0; i < nhwentries; i++, sg++) {
- pcibr_dma_unmap(pcidev_info, sg->dma_address, direction);
+ provider->dma_unmap(pdev, sg->dma_address, direction);
sg->dma_address = (dma_addr_t) NULL;
sg->dma_length = 0;
}
@@ -233,7 +237,8 @@ int sn_dma_map_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
{
unsigned long phys_addr;
struct scatterlist *saved_sg = sg;
- struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev));
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
int i;
BUG_ON(dev->bus != &pci_bus_type);
@@ -243,8 +248,8 @@ int sn_dma_map_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
*/
for (i = 0; i < nhwentries; i++, sg++) {
phys_addr = SG_ENT_PHYS_ADDRESS(sg);
- sg->dma_address = pcibr_dma_map(pcidev_info, phys_addr,
- sg->length, 0);
+ sg->dma_address = provider->dma_map(pdev,
+ phys_addr, sg->length);
if (!sg->dma_address) {
printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__);
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_ate.c b/arch/ia64/sn/pci/pcibr/pcibr_ate.c
index 9d6854666f9b5..0e47bce85f2da 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_ate.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_ate.c
@@ -8,8 +8,8 @@
#include <linux/types.h>
#include <asm/sn/sn_sal.h>
-#include "pci/pcibus_provider_defs.h"
-#include "pci/pcidev.h"
+#include <asm/sn/pcibus_provider_defs.h>
+#include <asm/sn/pcidev.h>
#include "pci/pcibr_provider.h"
int pcibr_invalidate_ate = 0; /* by default don't invalidate ATE on free */
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_dma.c b/arch/ia64/sn/pci/pcibr/pcibr_dma.c
index b1d66ac065c88..c90685985d815 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_dma.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_dma.c
@@ -12,8 +12,8 @@
#include <asm/sn/geo.h>
#include "xtalk/xwidgetdev.h"
#include "xtalk/hubdev.h"
-#include "pci/pcibus_provider_defs.h"
-#include "pci/pcidev.h"
+#include <asm/sn/pcibus_provider_defs.h>
+#include <asm/sn/pcidev.h>
#include "pci/tiocp.h"
#include "pci/pic.h"
#include "pci/pcibr_provider.h"
@@ -40,7 +40,7 @@ extern int sn_ioif_inited;
* we do not have to allocate entries in the PMU.
*/
-static uint64_t
+static dma_addr_t
pcibr_dmamap_ate32(struct pcidev_info *info,
uint64_t paddr, size_t req_size, uint64_t flags)
{
@@ -109,7 +109,7 @@ pcibr_dmamap_ate32(struct pcidev_info *info,
return pci_addr;
}
-static uint64_t
+static dma_addr_t
pcibr_dmatrans_direct64(struct pcidev_info * info, uint64_t paddr,
uint64_t dma_attributes)
{
@@ -141,7 +141,7 @@ pcibr_dmatrans_direct64(struct pcidev_info * info, uint64_t paddr,
}
-static uint64_t
+static dma_addr_t
pcibr_dmatrans_direct32(struct pcidev_info * info,
uint64_t paddr, size_t req_size, uint64_t flags)
{
@@ -180,11 +180,11 @@ pcibr_dmatrans_direct32(struct pcidev_info * info,
* DMA mappings for Direct 64 and 32 do not have any DMA maps.
*/
void
-pcibr_dma_unmap(struct pcidev_info *pcidev_info, dma_addr_t dma_handle,
- int direction)
+pcibr_dma_unmap(struct pci_dev *hwdev, dma_addr_t dma_handle, int direction)
{
- struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info->
- pdi_pcibus_info;
+ struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
+ struct pcibus_info *pcibus_info =
+ (struct pcibus_info *)pcidev_info->pdi_pcibus_info;
if (IS_PCI32_MAPPED(dma_handle)) {
int ate_index;
@@ -316,64 +316,63 @@ void sn_dma_flush(uint64_t addr)
}
/*
- * Wrapper DMA interface. Called from pci_dma.c routines.
+ * DMA interfaces. Called from pci_dma.c routines.
*/
-uint64_t
-pcibr_dma_map(struct pcidev_info * pcidev_info, unsigned long phys_addr,
- size_t size, unsigned int flags)
+dma_addr_t
+pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size)
{
dma_addr_t dma_handle;
- struct pci_dev *pcidev = pcidev_info->pdi_linux_pcidev;
-
- if (flags & SN_PCIDMA_CONSISTENT) {
- /* sn_pci_alloc_consistent interfaces */
- if (pcidev->dev.coherent_dma_mask == ~0UL) {
- dma_handle =
- pcibr_dmatrans_direct64(pcidev_info, phys_addr,
- PCI64_ATTR_BAR);
- } else {
- dma_handle =
- (dma_addr_t) pcibr_dmamap_ate32(pcidev_info,
- phys_addr, size,
- PCI32_ATE_BAR);
- }
- } else {
- /* map_sg/map_single interfaces */
+ struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
- /* SN cannot support DMA addresses smaller than 32 bits. */
- if (pcidev->dma_mask < 0x7fffffff) {
- return 0;
- }
+ /* SN cannot support DMA addresses smaller than 32 bits. */
+ if (hwdev->dma_mask < 0x7fffffff) {
+ return 0;
+ }
- if (pcidev->dma_mask == ~0UL) {
+ if (hwdev->dma_mask == ~0UL) {
+ /*
+ * Handle the most common case: 64 bit cards. This
+ * call should always succeed.
+ */
+
+ dma_handle = pcibr_dmatrans_direct64(pcidev_info, phys_addr,
+ PCI64_ATTR_PREF);
+ } else {
+ /* Handle 32-63 bit cards via direct mapping */
+ dma_handle = pcibr_dmatrans_direct32(pcidev_info, phys_addr,
+ size, 0);
+ if (!dma_handle) {
/*
- * Handle the most common case: 64 bit cards. This
- * call should always succeed.
+ * It is a 32 bit card and we cannot do direct mapping,
+ * so we use an ATE.
*/
- dma_handle =
- pcibr_dmatrans_direct64(pcidev_info, phys_addr,
- PCI64_ATTR_PREF);
- } else {
- /* Handle 32-63 bit cards via direct mapping */
- dma_handle =
- pcibr_dmatrans_direct32(pcidev_info, phys_addr,
- size, 0);
- if (!dma_handle) {
- /*
- * It is a 32 bit card and we cannot do direct mapping,
- * so we use an ATE.
- */
-
- dma_handle =
- pcibr_dmamap_ate32(pcidev_info, phys_addr,
- size, PCI32_ATE_PREF);
- }
+ dma_handle = pcibr_dmamap_ate32(pcidev_info, phys_addr,
+ size, PCI32_ATE_PREF);
}
}
return dma_handle;
}
+dma_addr_t
+pcibr_dma_map_consistent(struct pci_dev * hwdev, unsigned long phys_addr,
+ size_t size)
+{
+ dma_addr_t dma_handle;
+ struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
+
+ if (hwdev->dev.coherent_dma_mask == ~0UL) {
+ dma_handle = pcibr_dmatrans_direct64(pcidev_info, phys_addr,
+ PCI64_ATTR_BAR);
+ } else {
+ dma_handle = (dma_addr_t) pcibr_dmamap_ate32(pcidev_info,
+ phys_addr, size,
+ PCI32_ATE_BAR);
+ }
+
+ return dma_handle;
+}
+
EXPORT_SYMBOL(sn_dma_flush);
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_provider.c b/arch/ia64/sn/pci/pcibr/pcibr_provider.c
index 92bd278cf7ff2..3893999d23d88 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_provider.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_provider.c
@@ -13,8 +13,8 @@
#include "xtalk/xwidgetdev.h"
#include <asm/sn/geo.h>
#include "xtalk/hubdev.h"
-#include "pci/pcibus_provider_defs.h"
-#include "pci/pcidev.h"
+#include <asm/sn/pcibus_provider_defs.h>
+#include <asm/sn/pcidev.h>
#include "pci/pcibr_provider.h"
#include <asm/sn/addrs.h>
@@ -168,3 +168,23 @@ void pcibr_change_devices_irq(struct sn_irq_info *sn_irq_info)
pcibr_force_interrupt(sn_irq_info);
}
}
+
+/*
+ * Provider entries for PIC/CP
+ */
+
+struct sn_pcibus_provider pcibr_provider = {
+ .dma_map = pcibr_dma_map,
+ .dma_map_consistent = pcibr_dma_map_consistent,
+ .dma_unmap = pcibr_dma_unmap,
+ .bus_fixup = pcibr_bus_fixup,
+};
+
+int
+pcibr_init_provider(void)
+{
+ sn_pci_provider[PCIIO_ASIC_TYPE_PIC] = &pcibr_provider;
+ sn_pci_provider[PCIIO_ASIC_TYPE_TIOCP] = &pcibr_provider;
+
+ return 0;
+}
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_reg.c b/arch/ia64/sn/pci/pcibr/pcibr_reg.c
index 74a74a7d2a135..865c11c3b50a0 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_reg.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_reg.c
@@ -8,8 +8,8 @@
#include <linux/types.h>
#include <linux/interrupt.h>
-#include "pci/pcibus_provider_defs.h"
-#include "pci/pcidev.h"
+#include <asm/sn/pcibus_provider_defs.h>
+#include <asm/sn/pcidev.h>
#include "pci/tiocp.h"
#include "pci/pic.h"
#include "pci/pcibr_provider.h"
diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c
new file mode 100644
index 0000000000000..54a0dd447e76a
--- /dev/null
+++ b/arch/ia64/sn/pci/tioca_provider.c
@@ -0,0 +1,668 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003-2005 Silicon Graphics, Inc. All Rights Reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <asm/sn/sn_sal.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/pcidev.h>
+#include <asm/sn/pcibus_provider_defs.h>
+#include <asm/sn/tioca_provider.h>
+
+uint32_t tioca_gart_found;
+EXPORT_SYMBOL(tioca_gart_found); /* used by agp-sgi */
+
+LIST_HEAD(tioca_list);
+EXPORT_SYMBOL(tioca_list); /* used by agp-sgi */
+
+static int tioca_gart_init(struct tioca_kernel *);
+
+/**
+ * tioca_gart_init - Initialize SGI TIOCA GART
+ * @tioca_common: ptr to common prom/kernel struct identifying the
+ *
+ * If the indicated tioca has devices present, initialize its associated
+ * GART MMR's and kernel memory.
+ */
+static int
+tioca_gart_init(struct tioca_kernel *tioca_kern)
+{
+ uint64_t ap_reg;
+ uint64_t offset;
+ struct page *tmp;
+ struct tioca_common *tioca_common;
+ volatile struct tioca *ca_base;
+
+ tioca_common = tioca_kern->ca_common;
+ ca_base = (struct tioca *)tioca_common->ca_common.bs_base;
+
+ if (list_empty(tioca_kern->ca_devices))
+ return 0;
+
+ ap_reg = 0;
+
+ /*
+ * Validate aperature size
+ */
+
+ switch (CA_APERATURE_SIZE >> 20) {
+ case 4:
+ ap_reg |= (0x3ff << CA_GART_AP_SIZE_SHFT); /* 4MB */
+ break;
+ case 8:
+ ap_reg |= (0x3fe << CA_GART_AP_SIZE_SHFT); /* 8MB */
+ break;
+ case 16:
+ ap_reg |= (0x3fc << CA_GART_AP_SIZE_SHFT); /* 16MB */
+ break;
+ case 32:
+ ap_reg |= (0x3f8 << CA_GART_AP_SIZE_SHFT); /* 32 MB */
+ break;
+ case 64:
+ ap_reg |= (0x3f0 << CA_GART_AP_SIZE_SHFT); /* 64 MB */
+ break;
+ case 128:
+ ap_reg |= (0x3e0 << CA_GART_AP_SIZE_SHFT); /* 128 MB */
+ break;
+ case 256:
+ ap_reg |= (0x3c0 << CA_GART_AP_SIZE_SHFT); /* 256 MB */
+ break;
+ case 512:
+ ap_reg |= (0x380 << CA_GART_AP_SIZE_SHFT); /* 512 MB */
+ break;
+ case 1024:
+ ap_reg |= (0x300 << CA_GART_AP_SIZE_SHFT); /* 1GB */
+ break;
+ case 2048:
+ ap_reg |= (0x200 << CA_GART_AP_SIZE_SHFT); /* 2GB */
+ break;
+ case 4096:
+ ap_reg |= (0x000 << CA_GART_AP_SIZE_SHFT); /* 4 GB */
+ break;
+ default:
+ printk(KERN_ERR "%s: Invalid CA_APERATURE_SIZE "
+ "0x%lx\n", __FUNCTION__, (ulong) CA_APERATURE_SIZE);
+ return -1;
+ }
+
+ /*
+ * Set up other aperature parameters
+ */
+
+ if (PAGE_SIZE >= 16384) {
+ tioca_kern->ca_ap_pagesize = 16384;
+ ap_reg |= CA_GART_PAGE_SIZE;
+ } else {
+ tioca_kern->ca_ap_pagesize = 4096;
+ }
+
+ tioca_kern->ca_ap_size = CA_APERATURE_SIZE;
+ tioca_kern->ca_ap_bus_base = CA_APERATURE_BASE;
+ tioca_kern->ca_gart_entries =
+ tioca_kern->ca_ap_size / tioca_kern->ca_ap_pagesize;
+
+ ap_reg |= (CA_GART_AP_ENB_AGP | CA_GART_AP_ENB_PCI);
+ ap_reg |= tioca_kern->ca_ap_bus_base;
+
+ /*
+ * Allocate and set up the GART
+ */
+
+ tioca_kern->ca_gart_size = tioca_kern->ca_gart_entries * sizeof(u64);
+ tmp =
+ alloc_pages_node(tioca_kern->ca_closest_node,
+ GFP_KERNEL | __GFP_ZERO,
+ get_order(tioca_kern->ca_gart_size));
+
+ if (!tmp) {
+ printk(KERN_ERR "%s: Could not allocate "
+ "%lu bytes (order %d) for GART\n",
+ __FUNCTION__,
+ tioca_kern->ca_gart_size,
+ get_order(tioca_kern->ca_gart_size));
+ return -ENOMEM;
+ }
+
+ tioca_kern->ca_gart = page_address(tmp);
+ tioca_kern->ca_gart_coretalk_addr =
+ PHYS_TO_TIODMA(virt_to_phys(tioca_kern->ca_gart));
+
+ /*
+ * Compute PCI/AGP convenience fields
+ */
+
+ offset = CA_PCI32_MAPPED_BASE - CA_APERATURE_BASE;
+ tioca_kern->ca_pciap_base = CA_PCI32_MAPPED_BASE;
+ tioca_kern->ca_pciap_size = CA_PCI32_MAPPED_SIZE;
+ tioca_kern->ca_pcigart_start = offset / tioca_kern->ca_ap_pagesize;
+ tioca_kern->ca_pcigart_base =
+ tioca_kern->ca_gart_coretalk_addr + offset;
+ tioca_kern->ca_pcigart =
+ &tioca_kern->ca_gart[tioca_kern->ca_pcigart_start];
+ tioca_kern->ca_pcigart_entries =
+ tioca_kern->ca_pciap_size / tioca_kern->ca_ap_pagesize;
+ tioca_kern->ca_pcigart_pagemap =
+ kcalloc(1, tioca_kern->ca_pcigart_entries / 8, GFP_KERNEL);
+ if (!tioca_kern->ca_pcigart_pagemap) {
+ free_pages((unsigned long)tioca_kern->ca_gart,
+ get_order(tioca_kern->ca_gart_size));
+ return -1;
+ }
+
+ offset = CA_AGP_MAPPED_BASE - CA_APERATURE_BASE;
+ tioca_kern->ca_gfxap_base = CA_AGP_MAPPED_BASE;
+ tioca_kern->ca_gfxap_size = CA_AGP_MAPPED_SIZE;
+ tioca_kern->ca_gfxgart_start = offset / tioca_kern->ca_ap_pagesize;
+ tioca_kern->ca_gfxgart_base =
+ tioca_kern->ca_gart_coretalk_addr + offset;
+ tioca_kern->ca_gfxgart =
+ &tioca_kern->ca_gart[tioca_kern->ca_gfxgart_start];
+ tioca_kern->ca_gfxgart_entries =
+ tioca_kern->ca_gfxap_size / tioca_kern->ca_ap_pagesize;
+
+ /*
+ * various control settings:
+ * use agp op-combining
+ * use GET semantics to fetch memory
+ * participate in coherency domain
+ * DISABLE GART PREFETCHING due to hw bug tracked in SGI PV930029
+ */
+
+ ca_base->ca_control1 |= CA_AGPDMA_OP_ENB_COMBDELAY; /* PV895469 ? */
+ ca_base->ca_control2 &= ~(CA_GART_MEM_PARAM);
+ ca_base->ca_control2 |= (0x2ull << CA_GART_MEM_PARAM_SHFT);
+ tioca_kern->ca_gart_iscoherent = 1;
+ ca_base->ca_control2 &=
+ ~(CA_GART_WR_PREFETCH_ENB | CA_GART_RD_PREFETCH_ENB);
+
+ /*
+ * Unmask GART fetch error interrupts. Clear residual errors first.
+ */
+
+ ca_base->ca_int_status_alias = CA_GART_FETCH_ERR;
+ ca_base->ca_mult_error_alias = CA_GART_FETCH_ERR;
+ ca_base->ca_int_mask &= ~CA_GART_FETCH_ERR;
+
+ /*
+ * Program the aperature and gart registers in TIOCA
+ */
+
+ ca_base->ca_gart_aperature = ap_reg;
+ ca_base->ca_gart_ptr_table = tioca_kern->ca_gart_coretalk_addr | 1;
+
+ return 0;
+}
+
+/**
+ * tioca_fastwrite_enable - enable AGP FW for a tioca and its functions
+ * @tioca_kernel: structure representing the CA
+ *
+ * Given a CA, scan all attached functions making sure they all support
+ * FastWrite. If so, enable FastWrite for all functions and the CA itself.
+ */
+
+void
+tioca_fastwrite_enable(struct tioca_kernel *tioca_kern)
+{
+ int cap_ptr;
+ uint64_t ca_control1;
+ uint32_t reg;
+ struct tioca *tioca_base;
+ struct pci_dev *pdev;
+ struct tioca_common *common;
+
+ common = tioca_kern->ca_common;
+
+ /*
+ * Scan all vga controllers on this bus making sure they all
+ * suport FW. If not, return.
+ */
+
+ list_for_each_entry(pdev, tioca_kern->ca_devices, bus_list) {
+ if (pdev->class != (PCI_CLASS_DISPLAY_VGA << 8))
+ continue;
+
+ cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
+ if (!cap_ptr)
+ return; /* no AGP CAP means no FW */
+
+ pci_read_config_dword(pdev, cap_ptr + PCI_AGP_STATUS, &reg);
+ if (!(reg & PCI_AGP_STATUS_FW))
+ return; /* function doesn't support FW */
+ }
+
+ /*
+ * Set fw for all vga fn's
+ */
+
+ list_for_each_entry(pdev, tioca_kern->ca_devices, bus_list) {
+ if (pdev->class != (PCI_CLASS_DISPLAY_VGA << 8))
+ continue;
+
+ cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
+ pci_read_config_dword(pdev, cap_ptr + PCI_AGP_COMMAND, &reg);
+ reg |= PCI_AGP_COMMAND_FW;
+ pci_write_config_dword(pdev, cap_ptr + PCI_AGP_COMMAND, reg);
+ }
+
+ /*
+ * Set ca's fw to match
+ */
+
+ tioca_base = (struct tioca *)common->ca_common.bs_base;
+ ca_control1 = tioca_base->ca_control1;
+ ca_control1 |= CA_AGP_FW_ENABLE;
+ tioca_base->ca_control1 = ca_control1;
+}
+
+EXPORT_SYMBOL(tioca_fastwrite_enable); /* used by agp-sgi */
+
+/**
+ * tioca_dma_d64 - create a DMA mapping using 64-bit direct mode
+ * @paddr: system physical address
+ *
+ * Map @paddr into 64-bit CA bus space. No device context is necessary.
+ * Bits 53:0 come from the coretalk address. We just need to mask in the
+ * following optional bits of the 64-bit pci address:
+ *
+ * 63:60 - Coretalk Packet Type - 0x1 for Mem Get/Put (coherent)
+ * 0x2 for PIO (non-coherent)
+ * We will always use 0x1
+ * 55:55 - Swap bytes Currently unused
+ */
+static uint64_t
+tioca_dma_d64(unsigned long paddr)
+{
+ dma_addr_t bus_addr;
+
+ bus_addr = PHYS_TO_TIODMA(paddr);
+
+ BUG_ON(!bus_addr);
+ BUG_ON(bus_addr >> 54);
+
+ /* Set upper nibble to Cache Coherent Memory op */
+ bus_addr |= (1UL << 60);
+
+ return bus_addr;
+}
+
+/**
+ * tioca_dma_d48 - create a DMA mapping using 48-bit direct mode
+ * @pdev: linux pci_dev representing the function
+ * @paddr: system physical address
+ *
+ * Map @paddr into 64-bit bus space of the CA associated with @pcidev_info.
+ *
+ * The CA agp 48 bit direct address falls out as follows:
+ *
+ * When direct mapping AGP addresses, the 48 bit AGP address is
+ * constructed as follows:
+ *
+ * [47:40] - Low 8 bits of the page Node ID extracted from coretalk
+ * address [47:40]. The upper 8 node bits are fixed
+ * and come from the xxx register bits [5:0]
+ * [39:38] - Chiplet ID extracted from coretalk address [39:38]
+ * [37:00] - node offset extracted from coretalk address [37:00]
+ *
+ * Since the node id in general will be non-zero, and the chiplet id
+ * will always be non-zero, it follows that the device must support
+ * a dma mask of at least 0xffffffffff (40 bits) to target node 0
+ * and in general should be 0xffffffffffff (48 bits) to target nodes
+ * up to 255. Nodes above 255 need the support of the xxx register,
+ * and so a given CA can only directly target nodes in the range
+ * xxx - xxx+255.
+ */
+static uint64_t
+tioca_dma_d48(struct pci_dev *pdev, uint64_t paddr)
+{
+ struct tioca_common *tioca_common;
+ struct tioca *ca_base;
+ uint64_t ct_addr;
+ dma_addr_t bus_addr;
+ uint32_t node_upper;
+ uint64_t agp_dma_extn;
+ struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev);
+
+ tioca_common = (struct tioca_common *)pcidev_info->pdi_pcibus_info;
+ ca_base = (struct tioca *)tioca_common->ca_common.bs_base;
+
+ ct_addr = PHYS_TO_TIODMA(paddr);
+ if (!ct_addr)
+ return 0;
+
+ bus_addr = (dma_addr_t) (ct_addr & 0xffffffffffff);
+ node_upper = ct_addr >> 48;
+
+ if (node_upper > 64) {
+ printk(KERN_ERR "%s: coretalk addr 0x%p node id out "
+ "of range\n", __FUNCTION__, (void *)ct_addr);
+ return 0;
+ }
+
+ agp_dma_extn = ca_base->ca_agp_dma_addr_extn;
+ if (node_upper != (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT)) {
+ printk(KERN_ERR "%s: coretalk upper node (%u) "
+ "mismatch with ca_agp_dma_addr_extn (%lu)\n",
+ __FUNCTION__,
+ node_upper, (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT));
+ return 0;
+ }
+
+ return bus_addr;
+}
+
+/**
+ * tioca_dma_mapped - create a DMA mapping using a CA GART
+ * @pdev: linux pci_dev representing the function
+ * @paddr: host physical address to map
+ * @req_size: len (bytes) to map
+ *
+ * Map @paddr into CA address space using the GART mechanism. The mapped
+ * dma_addr_t is guarenteed to be contiguous in CA bus space.
+ */
+static dma_addr_t
+tioca_dma_mapped(struct pci_dev *pdev, uint64_t paddr, size_t req_size)
+{
+ int i, ps, ps_shift, entry, entries, mapsize, last_entry;
+ uint64_t xio_addr, end_xio_addr;
+ struct tioca_common *tioca_common;
+ struct tioca_kernel *tioca_kern;
+ dma_addr_t bus_addr = 0;
+ struct tioca_dmamap *ca_dmamap;
+ void *map;
+ unsigned long flags;
+ struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev);;
+
+ tioca_common = (struct tioca_common *)pcidev_info->pdi_pcibus_info;
+ tioca_kern = (struct tioca_kernel *)tioca_common->ca_kernel_private;
+
+ xio_addr = PHYS_TO_TIODMA(paddr);
+ if (!xio_addr)
+ return 0;
+
+ spin_lock_irqsave(&tioca_kern->ca_lock, flags);
+
+ /*
+ * allocate a map struct
+ */
+
+ ca_dmamap = kcalloc(1, sizeof(struct tioca_dmamap), GFP_ATOMIC);
+ if (!ca_dmamap)
+ goto map_return;
+
+ /*
+ * Locate free entries that can hold req_size. Account for
+ * unaligned start/length when allocating.
+ */
+
+ ps = tioca_kern->ca_ap_pagesize; /* will be power of 2 */
+ ps_shift = ffs(ps) - 1;
+ end_xio_addr = xio_addr + req_size - 1;
+
+ entries = (end_xio_addr >> ps_shift) - (xio_addr >> ps_shift) + 1;
+
+ map = tioca_kern->ca_pcigart_pagemap;
+ mapsize = tioca_kern->ca_pcigart_entries;
+
+ entry = find_first_zero_bit(map, mapsize);
+ while (entry < mapsize) {
+ last_entry = find_next_bit(map, mapsize, entry);
+
+ if (last_entry - entry >= entries)
+ break;
+
+ entry = find_next_zero_bit(map, mapsize, last_entry);
+ }
+
+ if (entry > mapsize)
+ goto map_return;
+
+ for (i = 0; i < entries; i++)
+ set_bit(entry + i, map);
+
+ bus_addr = tioca_kern->ca_pciap_base + (entry * ps);
+
+ ca_dmamap->cad_dma_addr = bus_addr;
+ ca_dmamap->cad_gart_size = entries;
+ ca_dmamap->cad_gart_entry = entry;
+ list_add(&ca_dmamap->cad_list, &tioca_kern->ca_list);
+
+ if (xio_addr % ps) {
+ tioca_kern->ca_pcigart[entry] = tioca_paddr_to_gart(xio_addr);
+ bus_addr += xio_addr & (ps - 1);
+ xio_addr &= ~(ps - 1);
+ xio_addr += ps;
+ entry++;
+ }
+
+ while (xio_addr < end_xio_addr) {
+ tioca_kern->ca_pcigart[entry] = tioca_paddr_to_gart(xio_addr);
+ xio_addr += ps;
+ entry++;
+ }
+
+ tioca_tlbflush(tioca_kern);
+
+map_return:
+ spin_unlock_irqrestore(&tioca_kern->ca_lock, flags);
+ return bus_addr;
+}
+
+/**
+ * tioca_dma_unmap - release CA mapping resources
+ * @pdev: linux pci_dev representing the function
+ * @bus_addr: bus address returned by an earlier tioca_dma_map
+ * @dir: mapping direction (unused)
+ *
+ * Locate mapping resources associated with @bus_addr and release them.
+ * For mappings created using the direct modes (64 or 48) there are no
+ * resources to release.
+ */
+void
+tioca_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir)
+{
+ int i, entry;
+ struct tioca_common *tioca_common;
+ struct tioca_kernel *tioca_kern;
+ struct tioca_dmamap *map;
+ struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev);
+ unsigned long flags;
+
+ tioca_common = (struct tioca_common *)pcidev_info->pdi_pcibus_info;
+ tioca_kern = (struct tioca_kernel *)tioca_common->ca_kernel_private;
+
+ /* return straight away if this isn't be a mapped address */
+
+ if (bus_addr < tioca_kern->ca_pciap_base ||
+ bus_addr >= (tioca_kern->ca_pciap_base + tioca_kern->ca_pciap_size))
+ return;
+
+ spin_lock_irqsave(&tioca_kern->ca_lock, flags);
+
+ list_for_each_entry(map, &tioca_kern->ca_dmamaps, cad_list)
+ if (map->cad_dma_addr == bus_addr)
+ break;
+
+ BUG_ON(map == NULL);
+
+ entry = map->cad_gart_entry;
+
+ for (i = 0; i < map->cad_gart_size; i++, entry++) {
+ clear_bit(entry, tioca_kern->ca_pcigart_pagemap);
+ tioca_kern->ca_pcigart[entry] = 0;
+ }
+ tioca_tlbflush(tioca_kern);
+
+ list_del(&map->cad_list);
+ spin_unlock_irqrestore(&tioca_kern->ca_lock, flags);
+ kfree(map);
+}
+
+/**
+ * tioca_dma_map - map pages for PCI DMA
+ * @pdev: linux pci_dev representing the function
+ * @paddr: host physical address to map
+ * @byte_count: bytes to map
+ *
+ * This is the main wrapper for mapping host physical pages to CA PCI space.
+ * The mapping mode used is based on the devices dma_mask. As a last resort
+ * use the GART mapped mode.
+ */
+uint64_t
+tioca_dma_map(struct pci_dev *pdev, uint64_t paddr, size_t byte_count)
+{
+ uint64_t mapaddr;
+
+ /*
+ * If card is 64 or 48 bit addresable, use a direct mapping. 32
+ * bit direct is so restrictive w.r.t. where the memory resides that
+ * we don't use it even though CA has some support.
+ */
+
+ if (pdev->dma_mask == ~0UL)
+ mapaddr = tioca_dma_d64(paddr);
+ else if (pdev->dma_mask == 0xffffffffffffUL)
+ mapaddr = tioca_dma_d48(pdev, paddr);
+ else
+ mapaddr = 0;
+
+ /* Last resort ... use PCI portion of CA GART */
+
+ if (mapaddr == 0)
+ mapaddr = tioca_dma_mapped(pdev, paddr, byte_count);
+
+ return mapaddr;
+}
+
+/**
+ * tioca_error_intr_handler - SGI TIO CA error interrupt handler
+ * @irq: unused
+ * @arg: pointer to tioca_common struct for the given CA
+ * @pt: unused
+ *
+ * Handle a CA error interrupt. Simply a wrapper around a SAL call which
+ * defers processing to the SGI prom.
+ */
+static irqreturn_t
+tioca_error_intr_handler(int irq, void *arg, struct pt_regs *pt)
+{
+ struct tioca_common *soft = arg;
+ struct ia64_sal_retval ret_stuff;
+ uint64_t segment;
+ uint64_t busnum;
+ ret_stuff.status = 0;
+ ret_stuff.v0 = 0;
+
+ segment = 0;
+ busnum = soft->ca_common.bs_persist_busnum;
+
+ SAL_CALL_NOLOCK(ret_stuff,
+ (u64) SN_SAL_IOIF_ERROR_INTERRUPT,
+ segment, busnum, 0, 0, 0, 0, 0);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * tioca_bus_fixup - perform final PCI fixup for a TIO CA bus
+ * @prom_bussoft: Common prom/kernel struct representing the bus
+ *
+ * Replicates the tioca_common pointed to by @prom_bussoft in kernel
+ * space. Allocates and initializes a kernel-only area for a given CA,
+ * and sets up an irq for handling CA error interrupts.
+ *
+ * On successful setup, returns the kernel version of tioca_common back to
+ * the caller.
+ */
+void *
+tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft)
+{
+ struct tioca_common *tioca_common;
+ struct tioca_kernel *tioca_kern;
+ struct pci_bus *bus;
+
+ /* sanity check prom rev */
+
+ if (sn_sal_rev_major() < 4 ||
+ (sn_sal_rev_major() == 4 && sn_sal_rev_minor() < 6)) {
+ printk
+ (KERN_ERR "%s: SGI prom rev 4.06 or greater required "
+ "for tioca support\n", __FUNCTION__);
+ return NULL;
+ }
+
+ /*
+ * Allocate kernel bus soft and copy from prom.
+ */
+
+ tioca_common = kcalloc(1, sizeof(struct tioca_common), GFP_KERNEL);
+ if (!tioca_common)
+ return NULL;
+
+ memcpy(tioca_common, prom_bussoft, sizeof(struct tioca_common));
+ tioca_common->ca_common.bs_base |= __IA64_UNCACHED_OFFSET;
+
+ /* init kernel-private area */
+
+ tioca_kern = kcalloc(1, sizeof(struct tioca_kernel), GFP_KERNEL);
+ if (!tioca_kern) {
+ kfree(tioca_common);
+ return NULL;
+ }
+
+ tioca_kern->ca_common = tioca_common;
+ spin_lock_init(&tioca_kern->ca_lock);
+ INIT_LIST_HEAD(&tioca_kern->ca_dmamaps);
+ tioca_kern->ca_closest_node =
+ nasid_to_cnodeid(tioca_common->ca_closest_nasid);
+ tioca_common->ca_kernel_private = (uint64_t) tioca_kern;
+
+ bus = pci_find_bus(0, tioca_common->ca_common.bs_persist_busnum);
+ BUG_ON(!bus);
+ tioca_kern->ca_devices = &bus->devices;
+
+ /* init GART */
+
+ if (tioca_gart_init(tioca_kern) < 0) {
+ kfree(tioca_kern);
+ kfree(tioca_common);
+ return NULL;
+ }
+
+ tioca_gart_found++;
+ list_add(&tioca_kern->ca_list, &tioca_list);
+
+ if (request_irq(SGI_TIOCA_ERROR,
+ tioca_error_intr_handler,
+ SA_SHIRQ, "TIOCA error", (void *)tioca_common))
+ printk(KERN_WARNING
+ "%s: Unable to get irq %d. "
+ "Error interrupts won't be routed for TIOCA bus %d\n",
+ __FUNCTION__, SGI_TIOCA_ERROR,
+ (int)tioca_common->ca_common.bs_persist_busnum);
+
+ return tioca_common;
+}
+
+static struct sn_pcibus_provider tioca_pci_interfaces = {
+ .dma_map = tioca_dma_map,
+ .dma_map_consistent = tioca_dma_map,
+ .dma_unmap = tioca_dma_unmap,
+ .bus_fixup = tioca_bus_fixup,
+};
+
+/**
+ * tioca_init_provider - init SN PCI provider ops for TIO CA
+ */
+int
+tioca_init_provider(void)
+{
+ sn_pci_provider[PCIIO_ASIC_TYPE_TIOCA] = &tioca_pci_interfaces;
+ return 0;
+}