aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2006-02-06 15:46:39 -0800
committerLinus Torvalds <torvalds@g5.osdl.org>2006-02-06 15:46:39 -0800
commitc03296a868ae7c91aa2d8b372184763b18f16d7a (patch)
tree57ff0b44a412b78fe598a49cc28b3cebf51f1154 /include
parente3f749c4af69c4344d89f11e2293e3790eb4eaca (diff)
parent913e4a75572354995b330f57082d9a86250cd75f (diff)
downloadlinux-c03296a868ae7c91aa2d8b372184763b18f16d7a.tar.gz
Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6
Diffstat (limited to 'include')
-rw-r--r--include/asm-ia64/processor.h17
-rw-r--r--include/asm-ia64/sal.h10
-rw-r--r--include/asm-ia64/sn/bte.h23
-rw-r--r--include/asm-ia64/sn/intr.h38
-rw-r--r--include/asm-ia64/system.h25
5 files changed, 56 insertions, 57 deletions
diff --git a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h
index 09b99029ac1ac..23c8e1be19118 100644
--- a/include/asm-ia64/processor.h
+++ b/include/asm-ia64/processor.h
@@ -559,6 +559,23 @@ ia64_eoi (void)
#define cpu_relax() ia64_hint(ia64_hint_pause)
+static inline int
+ia64_get_irr(unsigned int vector)
+{
+ unsigned int reg = vector / 64;
+ unsigned int bit = vector % 64;
+ u64 irr;
+
+ switch (reg) {
+ case 0: irr = ia64_getreg(_IA64_REG_CR_IRR0); break;
+ case 1: irr = ia64_getreg(_IA64_REG_CR_IRR1); break;
+ case 2: irr = ia64_getreg(_IA64_REG_CR_IRR2); break;
+ case 3: irr = ia64_getreg(_IA64_REG_CR_IRR3); break;
+ }
+
+ return test_bit(bit, &irr);
+}
+
static inline void
ia64_set_lrr0 (unsigned long val)
{
diff --git a/include/asm-ia64/sal.h b/include/asm-ia64/sal.h
index 313cad0628d07..0b210abbe0033 100644
--- a/include/asm-ia64/sal.h
+++ b/include/asm-ia64/sal.h
@@ -658,15 +658,7 @@ ia64_sal_freq_base (unsigned long which, unsigned long *ticks_per_second,
return isrv.status;
}
-/* Flush all the processor and platform level instruction and/or data caches */
-static inline s64
-ia64_sal_cache_flush (u64 cache_type)
-{
- struct ia64_sal_retval isrv;
- SAL_CALL(isrv, SAL_CACHE_FLUSH, cache_type, 0, 0, 0, 0, 0, 0);
- return isrv.status;
-}
-
+extern s64 ia64_sal_cache_flush (u64 cache_type);
/* Initialize all the processor and platform level instruction and data caches */
static inline s64
diff --git a/include/asm-ia64/sn/bte.h b/include/asm-ia64/sn/bte.h
index f50da3d91d07f..01e5b41032357 100644
--- a/include/asm-ia64/sn/bte.h
+++ b/include/asm-ia64/sn/bte.h
@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) 2000-2006 Silicon Graphics, Inc. All Rights Reserved.
*/
@@ -100,13 +100,28 @@
#define BTE_LNSTAT_STORE(_bte, _x) \
HUB_S(_bte->bte_base_addr, (_x))
#define BTE_SRC_STORE(_bte, _x) \
- HUB_S(_bte->bte_source_addr, (_x))
+({ \
+ u64 __addr = ((_x) & ~AS_MASK); \
+ if (is_shub2()) \
+ __addr = SH2_TIO_PHYS_TO_DMA(__addr); \
+ HUB_S(_bte->bte_source_addr, __addr); \
+})
#define BTE_DEST_STORE(_bte, _x) \
- HUB_S(_bte->bte_destination_addr, (_x))
+({ \
+ u64 __addr = ((_x) & ~AS_MASK); \
+ if (is_shub2()) \
+ __addr = SH2_TIO_PHYS_TO_DMA(__addr); \
+ HUB_S(_bte->bte_destination_addr, __addr); \
+})
#define BTE_CTRL_STORE(_bte, _x) \
HUB_S(_bte->bte_control_addr, (_x))
#define BTE_NOTIF_STORE(_bte, _x) \
- HUB_S(_bte->bte_notify_addr, (_x))
+({ \
+ u64 __addr = ia64_tpa((_x) & ~AS_MASK); \
+ if (is_shub2()) \
+ __addr = SH2_TIO_PHYS_TO_DMA(__addr); \
+ HUB_S(_bte->bte_notify_addr, __addr); \
+})
#define BTE_START_TRANSFER(_bte, _len, _mode) \
is_shub2() ? BTE_CTRL_STORE(_bte, IBLS_BUSY | (_mode << 24) | _len) \
diff --git a/include/asm-ia64/sn/intr.h b/include/asm-ia64/sn/intr.h
index a3431372c6e7b..60a51a406eec5 100644
--- a/include/asm-ia64/sn/intr.h
+++ b/include/asm-ia64/sn/intr.h
@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992 - 1997, 2000-2006 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_INTR_H
@@ -11,26 +11,26 @@
#include <linux/rcupdate.h>
-#define SGI_UART_VECTOR (0xe9)
+#define SGI_UART_VECTOR 0xe9
/* Reserved IRQs : Note, not to exceed IA64_SN2_FIRST_DEVICE_VECTOR */
-#define SGI_XPC_ACTIVATE (0x30)
-#define SGI_II_ERROR (0x31)
-#define SGI_XBOW_ERROR (0x32)
-#define SGI_PCIASIC_ERROR (0x33)
-#define SGI_ACPI_SCI_INT (0x34)
-#define SGI_TIOCA_ERROR (0x35)
-#define SGI_TIO_ERROR (0x36)
-#define SGI_TIOCX_ERROR (0x37)
-#define SGI_MMTIMER_VECTOR (0x38)
-#define SGI_XPC_NOTIFY (0xe7)
-
-#define IA64_SN2_FIRST_DEVICE_VECTOR (0x3c)
-#define IA64_SN2_LAST_DEVICE_VECTOR (0xe6)
-
-#define SN2_IRQ_RESERVED (0x1)
-#define SN2_IRQ_CONNECTED (0x2)
-#define SN2_IRQ_SHARED (0x4)
+#define SGI_XPC_ACTIVATE 0x30
+#define SGI_II_ERROR 0x31
+#define SGI_XBOW_ERROR 0x32
+#define SGI_PCIASIC_ERROR 0x33
+#define SGI_ACPI_SCI_INT 0x34
+#define SGI_TIOCA_ERROR 0x35
+#define SGI_TIO_ERROR 0x36
+#define SGI_TIOCX_ERROR 0x37
+#define SGI_MMTIMER_VECTOR 0x38
+#define SGI_XPC_NOTIFY 0xe7
+
+#define IA64_SN2_FIRST_DEVICE_VECTOR 0x3c
+#define IA64_SN2_LAST_DEVICE_VECTOR 0xe6
+
+#define SN2_IRQ_RESERVED 0x1
+#define SN2_IRQ_CONNECTED 0x2
+#define SN2_IRQ_SHARED 0x4
// The SN PROM irq struct
struct sn_irq_info {
diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h
index 80c5a234e2599..0625387156230 100644
--- a/include/asm-ia64/system.h
+++ b/include/asm-ia64/system.h
@@ -249,32 +249,7 @@ extern void ia64_load_extra (struct task_struct *task);
# define switch_to(prev,next,last) __switch_to(prev, next, last)
#endif
-/*
- * On IA-64, we don't want to hold the runqueue's lock during the low-level context-switch,
- * because that could cause a deadlock. Here is an example by Erich Focht:
- *
- * Example:
- * CPU#0:
- * schedule()
- * -> spin_lock_irq(&rq->lock)
- * -> context_switch()
- * -> wrap_mmu_context()
- * -> read_lock(&tasklist_lock)
- *
- * CPU#1:
- * sys_wait4() or release_task() or forget_original_parent()
- * -> write_lock(&tasklist_lock)
- * -> do_notify_parent()
- * -> wake_up_parent()
- * -> try_to_wake_up()
- * -> spin_lock_irq(&parent_rq->lock)
- *
- * If the parent's rq happens to be on CPU#0, we'll wait for the rq->lock
- * of that CPU which will not be released, because there we wait for the
- * tasklist_lock to become available.
- */
#define __ARCH_WANT_UNLOCKED_CTXSW
-
#define ARCH_HAS_PREFETCH_SWITCH_STACK
#define ia64_platform_is(x) (strcmp(x, platform_name) == 0)