summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Gortmaker <paul.gortmaker@windriver.com>2016-08-22 15:00:16 -0400
committerPaul Gortmaker <paul.gortmaker@windriver.com>2016-08-22 15:00:16 -0400
commit8c04220c7fd8d2987715eb72131ce938d8230600 (patch)
tree630b9e647e3ebb4b69011dbf6c0a142e97130030
parent2df565b6b515c83441b979adae4bf7ca4b79552a (diff)
download4.8-rt-patches-8c04220c7fd8d2987715eb72131ce938d8230600.tar.gz
-rw-r--r--patches/series2
-rw-r--r--patches/x86-Convert-mce-timer-to-hrtimer.patch (renamed from patches/x86-mce-timer-hrtimer.patch)35
2 files changed, 20 insertions, 17 deletions
diff --git a/patches/series b/patches/series
index 32d3d339ca9dfd..680bb70af4c38f 100644
--- a/patches/series
+++ b/patches/series
@@ -404,7 +404,7 @@ fs-ntfs-disable-interrupt-non-rt.patch
fs-jbd2-pull-your-plug-when-waiting-for-space.patch
# X86
-x86-mce-timer-hrtimer.patch
+x86-Convert-mce-timer-to-hrtimer.patch
x86-mce-use-swait-queue-for-mce-wakeups.patch
x86-stackprot-no-random-on-rt.patch
x86-use-gen-rwsem-spinlocks-rt.patch
diff --git a/patches/x86-mce-timer-hrtimer.patch b/patches/x86-Convert-mce-timer-to-hrtimer.patch
index 97996cda925d8e..c249c45fcb8e32 100644
--- a/patches/x86-mce-timer-hrtimer.patch
+++ b/patches/x86-Convert-mce-timer-to-hrtimer.patch
@@ -1,6 +1,7 @@
+From 202a8c91b1357864167e7a23936f75bb529b60a4 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Mon, 13 Dec 2010 16:33:39 +0100
-Subject: x86: Convert mce timer to hrtimer
+Subject: [PATCH] x86: Convert mce timer to hrtimer
mce_timer is started in atomic contexts of cpu bringup. This results
in might_sleep() warnings on RT. Convert mce_timer to a hrtimer to
@@ -20,10 +21,9 @@ fold in:
|Signed-off-by: Mike Galbraith <bitbucket@online.de>
|[bigeasy: use ULL instead of u64 cast]
|Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- arch/x86/kernel/cpu/mcheck/mce.c | 52 +++++++++++++++------------------------
- 1 file changed, 20 insertions(+), 32 deletions(-)
+diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
+index b80a6361a9e1..afb8435087b3 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -41,6 +41,7 @@
@@ -34,7 +34,7 @@ fold in:
#include <asm/processor.h>
#include <asm/traps.h>
-@@ -1240,7 +1241,7 @@ void mce_log_therm_throt_event(__u64 sta
+@@ -1291,7 +1292,7 @@ void mce_log_therm_throt_event(__u64 status)
static unsigned long check_interval = INITIAL_CHECK_INTERVAL;
static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
@@ -43,7 +43,7 @@ fold in:
static unsigned long mce_adjust_timer_default(unsigned long interval)
{
-@@ -1249,32 +1250,18 @@ static unsigned long mce_adjust_timer_de
+@@ -1300,32 +1301,18 @@ static unsigned long mce_adjust_timer_default(unsigned long interval)
static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
@@ -82,7 +82,7 @@ fold in:
iv = __this_cpu_read(mce_next_interval);
if (mce_available(this_cpu_ptr(&cpu_info))) {
-@@ -1297,7 +1284,7 @@ static void mce_timer_fn(unsigned long d
+@@ -1348,7 +1335,7 @@ static void mce_timer_fn(unsigned long data)
done:
__this_cpu_write(mce_next_interval, iv);
@@ -91,7 +91,7 @@ fold in:
}
/*
-@@ -1305,7 +1292,7 @@ static void mce_timer_fn(unsigned long d
+@@ -1356,7 +1343,7 @@ done:
*/
void mce_timer_kick(unsigned long interval)
{
@@ -100,7 +100,7 @@ fold in:
unsigned long iv = __this_cpu_read(mce_next_interval);
__restart_timer(t, interval);
-@@ -1320,7 +1307,7 @@ static void mce_timer_delete_all(void)
+@@ -1371,7 +1358,7 @@ static void mce_timer_delete_all(void)
int cpu;
for_each_online_cpu(cpu)
@@ -109,7 +109,7 @@ fold in:
}
static void mce_do_trigger(struct work_struct *work)
-@@ -1654,7 +1641,7 @@ static void __mcheck_cpu_clear_vendor(st
+@@ -1717,7 +1704,7 @@ static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c)
}
}
@@ -118,7 +118,7 @@ fold in:
{
unsigned long iv = check_interval * HZ;
-@@ -1663,16 +1650,17 @@ static void mce_start_timer(unsigned int
+@@ -1726,16 +1713,17 @@ static void mce_start_timer(unsigned int cpu, struct timer_list *t)
per_cpu(mce_next_interval, cpu) = iv;
@@ -140,7 +140,7 @@ fold in:
mce_start_timer(cpu, t);
}
-@@ -2393,6 +2381,8 @@ static void mce_disable_cpu(void *h)
+@@ -2459,6 +2447,8 @@ static void mce_disable_cpu(void *h)
if (!mce_available(raw_cpu_ptr(&cpu_info)))
return;
@@ -149,15 +149,15 @@ fold in:
if (!(action & CPU_TASKS_FROZEN))
cmci_clear();
-@@ -2415,6 +2405,7 @@ static void mce_reenable_cpu(void *h)
+@@ -2481,6 +2471,7 @@ static void mce_reenable_cpu(void *h)
if (b->init)
- wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
+ wrmsrl(msr_ops.ctl(i), b->ctl);
}
+ __mcheck_cpu_init_timer();
}
/* Get notified when a cpu comes on/off. Be hotplug friendly. */
-@@ -2422,7 +2413,6 @@ static int
+@@ -2488,7 +2479,6 @@ static int
mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
@@ -165,7 +165,7 @@ fold in:
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_ONLINE:
-@@ -2442,11 +2432,9 @@ mce_cpu_callback(struct notifier_block *
+@@ -2508,11 +2498,9 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
break;
case CPU_DOWN_PREPARE:
smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
@@ -177,3 +177,6 @@ fold in:
break;
}
+--
+2.5.0
+