From: "Andi Kleen" Work around Tyan BIOS MTRR initialization bug. Some Tyan AMD BIOS don't initialize the first fixed range MTRR, which causes it to contain random bogus values. When the MTRR tries to duplicate the MTRR state to other CPUs at startup it oopses because of this. This patch works around this by catching exception while setting MTRRs. It would be better to validate all fixed range MTRRs and fix them, but that would be very complicated code. This simple hack seems to work too (except that the first 64k of physical memory are likely uncached). A BIOS update fixes that. Signed-off-by: Andi Kleen Cc: Signed-off-by: Andrew Morton --- 25-akpm/arch/i386/kernel/cpu/mtrr/generic.c | 30 ++++++++++++++++++---------- 25-akpm/arch/i386/kernel/cpu/mtrr/mtrr.h | 1 25-akpm/arch/i386/kernel/cpu/mtrr/state.c | 4 +-- 25-akpm/include/asm-i386/msr.h | 15 ++++++++++++++ 25-akpm/include/asm-x86_64/msr.h | 8 ++++--- 5 files changed, 43 insertions(+), 15 deletions(-) diff -puN arch/i386/kernel/cpu/mtrr/generic.c~x86_64-work-around-tyan-bios-mtrr-initialization-bug arch/i386/kernel/cpu/mtrr/generic.c --- 25/arch/i386/kernel/cpu/mtrr/generic.c~x86_64-work-around-tyan-bios-mtrr-initialization-bug Wed Mar 23 15:38:43 2005 +++ 25-akpm/arch/i386/kernel/cpu/mtrr/generic.c Wed Mar 23 15:38:43 2005 @@ -92,6 +92,16 @@ void __init mtrr_state_warn(void) printk(KERN_INFO "mtrr: corrected configuration.\n"); } +/* Doesn't attempt to pass an error out to MTRR users + because it's quite complicated in some cases and probably not + worth it because the best error handling is to ignore it. */ +void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b) +{ + if (wrmsr_safe(msr, a, b) < 0) + printk(KERN_ERR + "MTRR: CPU %u: Writing MSR %x to %x:%x failed\n", + smp_processor_id(), msr, a, b); +} int generic_get_free_region(unsigned long base, unsigned long size) /* [SUMMARY] Get a free MTRR. @@ -150,14 +160,14 @@ static int set_fixed_ranges(mtrr_type * rdmsr(MTRRfix64K_00000_MSR, lo, hi); if (p[0] != lo || p[1] != hi) { - wrmsr(MTRRfix64K_00000_MSR, p[0], p[1]); + mtrr_wrmsr(MTRRfix64K_00000_MSR, p[0], p[1]); changed = TRUE; } for (i = 0; i < 2; i++) { rdmsr(MTRRfix16K_80000_MSR + i, lo, hi); if (p[2 + i * 2] != lo || p[3 + i * 2] != hi) { - wrmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2], + mtrr_wrmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2], p[3 + i * 2]); changed = TRUE; } @@ -166,7 +176,7 @@ static int set_fixed_ranges(mtrr_type * for (i = 0; i < 8; i++) { rdmsr(MTRRfix4K_C0000_MSR + i, lo, hi); if (p[6 + i * 2] != lo || p[7 + i * 2] != hi) { - wrmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2], + mtrr_wrmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2], p[7 + i * 2]); changed = TRUE; } @@ -184,7 +194,7 @@ static int set_mtrr_var_ranges(unsigned rdmsr(MTRRphysBase_MSR(index), lo, hi); if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL) || (vr->base_hi & 0xfUL) != (hi & 0xfUL)) { - wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi); + mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi); changed = TRUE; } @@ -192,7 +202,7 @@ static int set_mtrr_var_ranges(unsigned if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL) || (vr->mask_hi & 0xfUL) != (hi & 0xfUL)) { - wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi); + mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi); changed = TRUE; } return changed; @@ -267,7 +277,7 @@ static void prepare_set(void) rdmsr(MTRRdefType_MSR, deftype_lo, deftype_hi); /* Disable MTRRs, and set the default type to uncached */ - wrmsr(MTRRdefType_MSR, deftype_lo & 0xf300UL, deftype_hi); + mtrr_wrmsr(MTRRdefType_MSR, deftype_lo & 0xf300UL, deftype_hi); } static void post_set(void) @@ -276,7 +286,7 @@ static void post_set(void) __flush_tlb(); /* Intel (P6) standard MTRRs */ - wrmsr(MTRRdefType_MSR, deftype_lo, deftype_hi); + mtrr_wrmsr(MTRRdefType_MSR, deftype_lo, deftype_hi); /* Enable caches */ write_cr0(read_cr0() & 0xbfffffff); @@ -330,11 +340,11 @@ static void generic_set_mtrr(unsigned in if (size == 0) { /* The invalid bit is kept in the mask, so we simply clear the relevant mask register to disable a range. */ - wrmsr(MTRRphysMask_MSR(reg), 0, 0); + mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0); } else { - wrmsr(MTRRphysBase_MSR(reg), base << PAGE_SHIFT | type, + mtrr_wrmsr(MTRRphysBase_MSR(reg), base << PAGE_SHIFT | type, (base & size_and_mask) >> (32 - PAGE_SHIFT)); - wrmsr(MTRRphysMask_MSR(reg), -size << PAGE_SHIFT | 0x800, + mtrr_wrmsr(MTRRphysMask_MSR(reg), -size << PAGE_SHIFT | 0x800, (-size & size_and_mask) >> (32 - PAGE_SHIFT)); } diff -puN arch/i386/kernel/cpu/mtrr/mtrr.h~x86_64-work-around-tyan-bios-mtrr-initialization-bug arch/i386/kernel/cpu/mtrr/mtrr.h --- 25/arch/i386/kernel/cpu/mtrr/mtrr.h~x86_64-work-around-tyan-bios-mtrr-initialization-bug Wed Mar 23 15:38:43 2005 +++ 25-akpm/arch/i386/kernel/cpu/mtrr/mtrr.h Wed Mar 23 15:38:43 2005 @@ -94,4 +94,5 @@ extern unsigned int num_var_ranges; void finalize_mtrr_state(void); void mtrr_state_warn(void); char *mtrr_attrib_to_str(int x); +void mtrr_wrmsr(unsigned, unsigned, unsigned); diff -puN arch/i386/kernel/cpu/mtrr/state.c~x86_64-work-around-tyan-bios-mtrr-initialization-bug arch/i386/kernel/cpu/mtrr/state.c --- 25/arch/i386/kernel/cpu/mtrr/state.c~x86_64-work-around-tyan-bios-mtrr-initialization-bug Wed Mar 23 15:38:43 2005 +++ 25-akpm/arch/i386/kernel/cpu/mtrr/state.c Wed Mar 23 15:38:43 2005 @@ -42,7 +42,7 @@ void set_mtrr_cache_disable(struct set_m { if (use_intel()) /* Disable MTRRs, and set the default type to uncached */ - wrmsr(MTRRdefType_MSR, ctxt->deftype_lo & 0xf300UL, + mtrr_wrmsr(MTRRdefType_MSR, ctxt->deftype_lo & 0xf300UL, ctxt->deftype_hi); else if (is_cpu(CYRIX)) /* Cyrix ARRs - everything else were excluded at the top */ @@ -60,7 +60,7 @@ void set_mtrr_done(struct set_mtrr_conte /* Restore MTRRdefType */ if (use_intel()) /* Intel (P6) standard MTRRs */ - wrmsr(MTRRdefType_MSR, ctxt->deftype_lo, ctxt->deftype_hi); + mtrr_wrmsr(MTRRdefType_MSR, ctxt->deftype_lo, ctxt->deftype_hi); else /* Cyrix ARRs - everything else was excluded at the top */ setCx86(CX86_CCR3, ctxt->ccr3); diff -puN include/asm-i386/msr.h~x86_64-work-around-tyan-bios-mtrr-initialization-bug include/asm-i386/msr.h --- 25/include/asm-i386/msr.h~x86_64-work-around-tyan-bios-mtrr-initialization-bug Wed Mar 23 15:38:43 2005 +++ 25-akpm/include/asm-i386/msr.h Wed Mar 23 15:38:43 2005 @@ -32,6 +32,21 @@ static inline void wrmsrl (unsigned long wrmsr (msr, lo, hi); } +/* wrmsr with exception handling */ +#define wrmsr_safe(msr,a,b) ({ int ret__; \ + asm volatile("2: wrmsr ; xorl %0,%0\n" \ + "1:\n\t" \ + ".section .fixup,\"ax\"\n\t" \ + "3: movl %4,%0 ; jmp 1b\n\t" \ + ".previous\n\t" \ + ".section __ex_table,\"a\"\n" \ + " .align 4\n\t" \ + " .long 2b,3b\n\t" \ + ".previous" \ + : "=a" (ret__) \ + : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT));\ + ret__; }) + #define rdtsc(low,high) \ __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high)) diff -puN include/asm-x86_64/msr.h~x86_64-work-around-tyan-bios-mtrr-initialization-bug include/asm-x86_64/msr.h --- 25/include/asm-x86_64/msr.h~x86_64-work-around-tyan-bios-mtrr-initialization-bug Wed Mar 23 15:38:43 2005 +++ 25-akpm/include/asm-x86_64/msr.h Wed Mar 23 15:38:43 2005 @@ -28,8 +28,8 @@ #define wrmsrl(msr,val) wrmsr(msr,(__u32)((__u64)(val)),((__u64)(val))>>32) -/* wrmsrl with exception handling */ -#define checking_wrmsrl(msr,val) ({ int ret__; \ +/* wrmsr with exception handling */ +#define wrmsr_safe(msr,a,b) ({ int ret__; \ asm volatile("2: wrmsr ; xorl %0,%0\n" \ "1:\n\t" \ ".section .fixup,\"ax\"\n\t" \ @@ -40,9 +40,11 @@ " .quad 2b,3b\n\t" \ ".previous" \ : "=a" (ret__) \ - : "c" (msr), "0" ((__u32)val), "d" ((val)>>32), "i" (-EFAULT));\ + : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT));\ ret__; }) +#define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32)) + #define rdtsc(low,high) \ __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high)) _