summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2016-04-15 19:35:35 +0200
committerSebastian Andrzej Siewior <bigeasy@linutronix.de>2016-04-15 19:35:35 +0200
commit67e59dd0c366160e17d4fd773b848394173c30a2 (patch)
treec5cc25e2642d6be0c3b94244048eec5c8c7cad1c
parentc948a38cab5a8c9761b676c31a723621d901f193 (diff)
download4.9-rt-patches-67e59dd0c366160e17d4fd773b848394173c30a2.tar.gz
[ANNOUNCE] 4.4.7-rt16
Dear RT folks! I'm pleased to announce the v4.4.7-rt16 patch set. Changes since v4.4.7-rt15: - picked a few panic() re-entrance from NMI fixes from upstream. On -RT we have the same problem without NMI but with the soft/hard watchdog triggering panic(). - Don't take the port->lock on oops_in_progress. We had a trylock but that trylock does not work if invoked with IRQs off (like from the panic() caller). I am not very happy about this but if we keep it that way it would make sense to make a similar change for the other UART drivers… - Rik van Riel and Clark Williams pointed out that a change made by Frederic Weisbecker in v4.5 could be backported and then we could remove some locking around vtime handling. Known issues: - CPU hotplug got a little better but can deadlock. The delta patch against 4.4.7-rt15 is appended below and can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.4/incr/patch-4.4.7-rt15-rt16.patch.xz You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v4.4.7-rt16 The RT patch against 4.4.7 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.4/patch-4.4.7-rt16.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.7-rt16.tar.xz Sebastian Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-rw-r--r--patches/0003-KVM-Use-simple-waitqueue-for-vcpu-wq.patch10
-rw-r--r--patches/ARM-imx-always-use-TWD-on-IMX6Q.patch7
-rw-r--r--patches/cgroups-scheduling-while-atomic-in-cgroup-code.patch2
-rw-r--r--patches/cgroups-use-simple-wait-in-css_release.patch6
-rw-r--r--patches/completion-use-simple-wait-queues.patch6
-rw-r--r--patches/cond-resched-softirq-rt.patch2
-rw-r--r--patches/cpu-rt-rework-cpu-down.patch4
-rw-r--r--patches/dm-make-rt-aware.patch2
-rw-r--r--patches/infiniband-mellanox-ib-use-nort-irq.patch4
-rw-r--r--patches/introduce_migrate_disable_cpu_light.patch4
-rw-r--r--patches/kgb-serial-hackaround.patch14
-rw-r--r--patches/latency-hist.patch16
-rw-r--r--patches/localversion.patch4
-rw-r--r--patches/md-raid5-percpu-handling-rt-aware.patch6
-rw-r--r--patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch4
-rw-r--r--patches/mm-memcontrol-do_not_disable_irq.patch10
-rw-r--r--patches/mm-page_alloc-reduce-lock-sections-further.patch18
-rw-r--r--patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch28
-rw-r--r--patches/mm-rt-kmap-atomic-scheduling.patch2
-rw-r--r--patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch2
-rw-r--r--patches/oleg-signal-rt-fix.patch2
-rw-r--r--patches/panic-change-nmi_panic-from-macro-to-function.patch112
-rw-r--r--patches/panic-disable-random-on-rt.patch2
-rw-r--r--patches/panic-x86-Allow-CPUs-to-save-registers-even-if-loopi.patch245
-rw-r--r--patches/panic-x86-Fix-re-entrance-problem-due-to-panic-on-NM.patch188
-rw-r--r--patches/posix-timers-thread-posix-cpu-timers-on-rt.patch6
-rw-r--r--patches/preempt-lazy-support.patch2
-rw-r--r--patches/printk-kill.patch2
-rw-r--r--patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch2
-rw-r--r--patches/sched-cputime-Clarify-vtime-symbols-and-document-the.patch89
-rw-r--r--patches/sched-cputime-Convert-vtime_seqlock-to-seqcount.patch (renamed from patches/vtime-split-lock-and-seqcount.patch)139
-rw-r--r--patches/sched-delay-put-task.patch6
-rw-r--r--patches/sched-might-sleep-do-not-account-rcu-depth.patch2
-rw-r--r--patches/sched-mmdrop-delayed.patch4
-rw-r--r--patches/sched-provide-a-tsk_nr_cpus_allowed-helper.patch2
-rw-r--r--patches/sched-rt-mutex-wakeup.patch2
-rw-r--r--patches/series9
-rw-r--r--patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch2
-rw-r--r--patches/softirq-split-locks.patch4
-rw-r--r--patches/suspend-prevernt-might-sleep-splats.patch10
-rw-r--r--patches/trace_Use_rcuidle_version_for_preemptoff_hist_trace_point.patch6
-rw-r--r--patches/tty-serial-8250-don-t-take-the-trylock-during-oops.patch29
-rw-r--r--patches/x86-kvm-require-const-tsc-for-rt.patch2
43 files changed, 840 insertions, 178 deletions
diff --git a/patches/0003-KVM-Use-simple-waitqueue-for-vcpu-wq.patch b/patches/0003-KVM-Use-simple-waitqueue-for-vcpu-wq.patch
index 5833fd67b6497f..f54be20e69f248 100644
--- a/patches/0003-KVM-Use-simple-waitqueue-for-vcpu-wq.patch
+++ b/patches/0003-KVM-Use-simple-waitqueue-for-vcpu-wq.patch
@@ -367,7 +367,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
kvm_async_pf_vcpu_init(vcpu);
vcpu->pre_pcpu = -1;
-@@ -2002,7 +2001,7 @@ static int kvm_vcpu_check_block(struct k
+@@ -2003,7 +2002,7 @@ static int kvm_vcpu_check_block(struct k
void kvm_vcpu_block(struct kvm_vcpu *vcpu)
{
ktime_t start, cur;
@@ -376,7 +376,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
bool waited = false;
u64 block_ns;
-@@ -2027,7 +2026,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcp
+@@ -2028,7 +2027,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcp
kvm_arch_vcpu_blocking(vcpu);
for (;;) {
@@ -385,7 +385,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (kvm_vcpu_check_block(vcpu) < 0)
break;
-@@ -2036,7 +2035,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcp
+@@ -2037,7 +2036,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcp
schedule();
}
@@ -394,7 +394,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
cur = ktime_get();
kvm_arch_vcpu_unblocking(vcpu);
-@@ -2068,11 +2067,11 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu
+@@ -2069,11 +2068,11 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu
{
int me;
int cpu = vcpu->cpu;
@@ -409,7 +409,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
++vcpu->stat.halt_wakeup;
}
-@@ -2173,7 +2172,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *m
+@@ -2174,7 +2173,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *m
continue;
if (vcpu == me)
continue;
diff --git a/patches/ARM-imx-always-use-TWD-on-IMX6Q.patch b/patches/ARM-imx-always-use-TWD-on-IMX6Q.patch
index 68eb5d06c8f67d..22055a030d93ec 100644
--- a/patches/ARM-imx-always-use-TWD-on-IMX6Q.patch
+++ b/patches/ARM-imx-always-use-TWD-on-IMX6Q.patch
@@ -14,11 +14,9 @@ timer.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- arch/arm/mach-imx/Kconfig | 2 +-
+ arch/arm/mach-imx/Kconfig | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
-diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig
-index 8ceda2844c4f..08bcf8fb76f2 100644
--- a/arch/arm/mach-imx/Kconfig
+++ b/arch/arm/mach-imx/Kconfig
@@ -524,7 +524,7 @@ config SOC_IMX6Q
@@ -30,6 +28,3 @@ index 8ceda2844c4f..08bcf8fb76f2 100644
select PCI_DOMAINS if PCI
select PINCTRL_IMX6Q
select SOC_IMX6
---
-2.8.0.rc3
-
diff --git a/patches/cgroups-scheduling-while-atomic-in-cgroup-code.patch b/patches/cgroups-scheduling-while-atomic-in-cgroup-code.patch
index fef6f77b70d2f9..d1e4403850f3cc 100644
--- a/patches/cgroups-scheduling-while-atomic-in-cgroup-code.patch
+++ b/patches/cgroups-scheduling-while-atomic-in-cgroup-code.patch
@@ -42,7 +42,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
-@@ -1937,14 +1937,17 @@ static void drain_local_stock(struct wor
+@@ -1938,14 +1938,17 @@ static void drain_local_stock(struct wor
*/
static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
{
diff --git a/patches/cgroups-use-simple-wait-in-css_release.patch b/patches/cgroups-use-simple-wait-in-css_release.patch
index 22e5cd41497a24..9e9708de4ea242 100644
--- a/patches/cgroups-use-simple-wait-in-css_release.patch
+++ b/patches/cgroups-use-simple-wait-in-css_release.patch
@@ -52,7 +52,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
-@@ -4725,10 +4725,10 @@ static void css_free_rcu_fn(struct rcu_h
+@@ -4733,10 +4733,10 @@ static void css_free_rcu_fn(struct rcu_h
queue_work(cgroup_destroy_wq, &css->destroy_work);
}
@@ -65,7 +65,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct cgroup_subsys *ss = css->ss;
struct cgroup *cgrp = css->cgroup;
-@@ -4767,8 +4767,8 @@ static void css_release(struct percpu_re
+@@ -4775,8 +4775,8 @@ static void css_release(struct percpu_re
struct cgroup_subsys_state *css =
container_of(ref, struct cgroup_subsys_state, refcnt);
@@ -76,7 +76,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static void init_and_link_css(struct cgroup_subsys_state *css,
-@@ -5376,6 +5376,7 @@ static int __init cgroup_wq_init(void)
+@@ -5392,6 +5392,7 @@ static int __init cgroup_wq_init(void)
*/
cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
BUG_ON(!cgroup_destroy_wq);
diff --git a/patches/completion-use-simple-wait-queues.patch b/patches/completion-use-simple-wait-queues.patch
index 17ef483be110dc..10c178ec64b5be 100644
--- a/patches/completion-use-simple-wait-queues.patch
+++ b/patches/completion-use-simple-wait-queues.patch
@@ -136,7 +136,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
struct mm_struct;
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
-@@ -648,6 +648,10 @@ static void power_down(void)
+@@ -649,6 +649,10 @@ static void power_down(void)
cpu_relax();
}
@@ -147,7 +147,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* hibernate - Carry out system hibernation, including saving the image.
*/
-@@ -660,6 +664,8 @@ int hibernate(void)
+@@ -661,6 +665,8 @@ int hibernate(void)
return -EPERM;
}
@@ -156,7 +156,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
lock_system_sleep();
/* The snapshot device should not be opened while we're running */
if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
-@@ -725,6 +731,7 @@ int hibernate(void)
+@@ -726,6 +732,7 @@ int hibernate(void)
atomic_inc(&snapshot_device_available);
Unlock:
unlock_system_sleep();
diff --git a/patches/cond-resched-softirq-rt.patch b/patches/cond-resched-softirq-rt.patch
index 60805ba7c7c588..85a937b884755b 100644
--- a/patches/cond-resched-softirq-rt.patch
+++ b/patches/cond-resched-softirq-rt.patch
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -2985,12 +2985,16 @@ extern int __cond_resched_lock(spinlock_
+@@ -2987,12 +2987,16 @@ extern int __cond_resched_lock(spinlock_
__cond_resched_lock(lock); \
})
diff --git a/patches/cpu-rt-rework-cpu-down.patch b/patches/cpu-rt-rework-cpu-down.patch
index 213252995707ca..7d3cca283425ad 100644
--- a/patches/cpu-rt-rework-cpu-down.patch
+++ b/patches/cpu-rt-rework-cpu-down.patch
@@ -56,7 +56,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -2286,6 +2286,10 @@ extern void do_set_cpus_allowed(struct t
+@@ -2288,6 +2288,10 @@ extern void do_set_cpus_allowed(struct t
extern int set_cpus_allowed_ptr(struct task_struct *p,
const struct cpumask *new_mask);
@@ -67,7 +67,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#else
static inline void do_set_cpus_allowed(struct task_struct *p,
const struct cpumask *new_mask)
-@@ -2298,6 +2302,9 @@ static inline int set_cpus_allowed_ptr(s
+@@ -2300,6 +2304,9 @@ static inline int set_cpus_allowed_ptr(s
return -EINVAL;
return 0;
}
diff --git a/patches/dm-make-rt-aware.patch b/patches/dm-make-rt-aware.patch
index f0b0a890aee1bb..7bf6cbb9bab796 100644
--- a/patches/dm-make-rt-aware.patch
+++ b/patches/dm-make-rt-aware.patch
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
-@@ -2128,7 +2128,7 @@ static void dm_request_fn(struct request
+@@ -2127,7 +2127,7 @@ static void dm_request_fn(struct request
/* Establish tio->ti before queuing work (map_tio_request) */
tio->ti = ti;
queue_kthread_work(&md->kworker, &tio->work);
diff --git a/patches/infiniband-mellanox-ib-use-nort-irq.patch b/patches/infiniband-mellanox-ib-use-nort-irq.patch
index 90b28b986e129d..01e88049f6c31f 100644
--- a/patches/infiniband-mellanox-ib-use-nort-irq.patch
+++ b/patches/infiniband-mellanox-ib-use-nort-irq.patch
@@ -20,7 +20,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
-@@ -847,7 +847,7 @@ void ipoib_mcast_restart_task(struct wor
+@@ -857,7 +857,7 @@ void ipoib_mcast_restart_task(struct wor
ipoib_dbg_mcast(priv, "restarting multicast task\n");
@@ -29,7 +29,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
netif_addr_lock(dev);
spin_lock(&priv->lock);
-@@ -929,7 +929,7 @@ void ipoib_mcast_restart_task(struct wor
+@@ -939,7 +939,7 @@ void ipoib_mcast_restart_task(struct wor
spin_unlock(&priv->lock);
netif_addr_unlock(dev);
diff --git a/patches/introduce_migrate_disable_cpu_light.patch b/patches/introduce_migrate_disable_cpu_light.patch
index 5dada63a008b54..ca6b01bd3c38c2 100644
--- a/patches/introduce_migrate_disable_cpu_light.patch
+++ b/patches/introduce_migrate_disable_cpu_light.patch
@@ -89,7 +89,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
int nr_cpus_allowed;
cpumask_t cpus_allowed;
-@@ -1836,14 +1842,6 @@ extern int arch_task_struct_size __read_
+@@ -1838,14 +1844,6 @@ extern int arch_task_struct_size __read_
# define arch_task_struct_size (sizeof(struct task_struct))
#endif
@@ -104,7 +104,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define TNF_MIGRATED 0x01
#define TNF_NO_GROUP 0x02
#define TNF_SHARED 0x04
-@@ -3120,6 +3118,31 @@ static inline void set_task_cpu(struct t
+@@ -3122,6 +3120,31 @@ static inline void set_task_cpu(struct t
#endif /* CONFIG_SMP */
diff --git a/patches/kgb-serial-hackaround.patch b/patches/kgb-serial-hackaround.patch
index 2cb029728551e5..46fe574c195d90 100644
--- a/patches/kgb-serial-hackaround.patch
+++ b/patches/kgb-serial-hackaround.patch
@@ -18,10 +18,10 @@ Thanks,
Jason.
---
- drivers/tty/serial/8250/8250_port.c | 3 ++-
+ drivers/tty/serial/8250/8250_port.c | 3 +++
include/linux/kdb.h | 2 ++
kernel/debug/kdb/kdb_io.c | 6 ++----
- 3 files changed, 6 insertions(+), 5 deletions(-)
+ 3 files changed, 7 insertions(+), 4 deletions(-)
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -33,15 +33,15 @@ Jason.
#include <linux/uaccess.h>
#include <linux/pm_runtime.h>
-@@ -2851,7 +2852,7 @@ void serial8250_console_write(struct uar
+@@ -2845,6 +2846,8 @@ void serial8250_console_write(struct uar
- if (port->sysrq)
+ if (port->sysrq || oops_in_progress)
locked = 0;
-- else if (oops_in_progress)
-+ else if (oops_in_progress || in_kdb_printk())
- locked = spin_trylock_irqsave(&port->lock, flags);
++ else if (in_kdb_printk())
++ locked = spin_trylock_irqsave(&port->lock, flags);
else
spin_lock_irqsave(&port->lock, flags);
+
--- a/include/linux/kdb.h
+++ b/include/linux/kdb.h
@@ -167,6 +167,7 @@ extern __printf(2, 0) int vkdb_printf(en
diff --git a/patches/latency-hist.patch b/patches/latency-hist.patch
index 411216eb3c00b3..a02f651f429391 100644
--- a/patches/latency-hist.patch
+++ b/patches/latency-hist.patch
@@ -236,7 +236,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
int start_pid;
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1794,6 +1794,12 @@ struct task_struct {
+@@ -1796,6 +1796,12 @@ struct task_struct {
unsigned long trace;
/* bitmask and counter of trace recursion */
unsigned long trace_recursion;
@@ -1740,7 +1740,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include "trace.h"
-@@ -420,11 +421,13 @@ void start_critical_timings(void)
+@@ -424,11 +425,13 @@ void start_critical_timings(void)
{
if (preempt_trace() || irq_trace())
start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
@@ -1754,7 +1754,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (preempt_trace() || irq_trace())
stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
}
-@@ -434,6 +437,7 @@ EXPORT_SYMBOL_GPL(stop_critical_timings)
+@@ -438,6 +441,7 @@ EXPORT_SYMBOL_GPL(stop_critical_timings)
#ifdef CONFIG_PROVE_LOCKING
void time_hardirqs_on(unsigned long a0, unsigned long a1)
{
@@ -1762,7 +1762,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (!preempt_trace() && irq_trace())
stop_critical_timing(a0, a1);
}
-@@ -442,6 +446,7 @@ void time_hardirqs_off(unsigned long a0,
+@@ -446,6 +450,7 @@ void time_hardirqs_off(unsigned long a0,
{
if (!preempt_trace() && irq_trace())
start_critical_timing(a0, a1);
@@ -1770,7 +1770,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
#else /* !CONFIG_PROVE_LOCKING */
-@@ -467,6 +472,7 @@ inline void print_irqtrace_events(struct
+@@ -471,6 +476,7 @@ inline void print_irqtrace_events(struct
*/
void trace_hardirqs_on(void)
{
@@ -1778,7 +1778,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (!preempt_trace() && irq_trace())
stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
}
-@@ -476,11 +482,13 @@ void trace_hardirqs_off(void)
+@@ -480,11 +486,13 @@ void trace_hardirqs_off(void)
{
if (!preempt_trace() && irq_trace())
start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
@@ -1792,7 +1792,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (!preempt_trace() && irq_trace())
stop_critical_timing(CALLER_ADDR0, caller_addr);
}
-@@ -490,6 +498,7 @@ EXPORT_SYMBOL(trace_hardirqs_on_caller);
+@@ -494,6 +502,7 @@ EXPORT_SYMBOL(trace_hardirqs_on_caller);
{
if (!preempt_trace() && irq_trace())
start_critical_timing(CALLER_ADDR0, caller_addr);
@@ -1800,7 +1800,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(trace_hardirqs_off_caller);
-@@ -499,12 +508,14 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller)
+@@ -503,12 +512,14 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller)
#ifdef CONFIG_PREEMPT_TRACER
void trace_preempt_on(unsigned long a0, unsigned long a1)
{
diff --git a/patches/localversion.patch b/patches/localversion.patch
index 64766cad46816a..35c628ed6d79e3 100644
--- a/patches/localversion.patch
+++ b/patches/localversion.patch
@@ -1,4 +1,4 @@
-Subject: v4.4.7-rt15
+Subject: v4.4.7-rt16
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 08 Jul 2011 20:25:16 +0200
@@ -10,4 +10,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt15
++-rt16
diff --git a/patches/md-raid5-percpu-handling-rt-aware.patch b/patches/md-raid5-percpu-handling-rt-aware.patch
index 67c66a5ace93b4..a6ce854361cc4f 100644
--- a/patches/md-raid5-percpu-handling-rt-aware.patch
+++ b/patches/md-raid5-percpu-handling-rt-aware.patch
@@ -20,7 +20,7 @@ Tested-by: Udo van den Heuvel <udovdh@xs4all.nl>
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
-@@ -1929,8 +1929,9 @@ static void raid_run_ops(struct stripe_h
+@@ -1920,8 +1920,9 @@ static void raid_run_ops(struct stripe_h
struct raid5_percpu *percpu;
unsigned long cpu;
@@ -31,7 +31,7 @@ Tested-by: Udo van den Heuvel <udovdh@xs4all.nl>
if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
ops_run_biofill(sh);
overlap_clear++;
-@@ -1986,7 +1987,8 @@ static void raid_run_ops(struct stripe_h
+@@ -1977,7 +1978,8 @@ static void raid_run_ops(struct stripe_h
if (test_and_clear_bit(R5_Overlap, &dev->flags))
wake_up(&sh->raid_conf->wait_for_overlap);
}
@@ -41,7 +41,7 @@ Tested-by: Udo van den Heuvel <udovdh@xs4all.nl>
}
static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp)
-@@ -6411,6 +6413,7 @@ static int raid5_alloc_percpu(struct r5c
+@@ -6414,6 +6416,7 @@ static int raid5_alloc_percpu(struct r5c
__func__, cpu);
break;
}
diff --git a/patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch b/patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
index ce0cdf65dc84d9..822cda34344af2 100644
--- a/patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
+++ b/patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
@@ -48,7 +48,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
-@@ -1957,7 +1957,7 @@ static void drain_all_stock(struct mem_c
+@@ -1958,7 +1958,7 @@ static void drain_all_stock(struct mem_c
return;
/* Notify other cpus that system-wide "drain" is running */
get_online_cpus();
@@ -57,7 +57,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
for_each_online_cpu(cpu) {
struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
struct mem_cgroup *memcg;
-@@ -1974,7 +1974,7 @@ static void drain_all_stock(struct mem_c
+@@ -1975,7 +1975,7 @@ static void drain_all_stock(struct mem_c
schedule_work_on(cpu, &stock->work);
}
}
diff --git a/patches/mm-memcontrol-do_not_disable_irq.patch b/patches/mm-memcontrol-do_not_disable_irq.patch
index 7a0075d69fbfe0..ffc50724fa6327 100644
--- a/patches/mm-memcontrol-do_not_disable_irq.patch
+++ b/patches/mm-memcontrol-do_not_disable_irq.patch
@@ -59,7 +59,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static const char * const mem_cgroup_stat_names[] = {
"cache",
"rss",
-@@ -4616,12 +4619,12 @@ static int mem_cgroup_move_account(struc
+@@ -4617,12 +4620,12 @@ static int mem_cgroup_move_account(struc
ret = 0;
@@ -74,7 +74,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
out_unlock:
unlock_page(page);
out:
-@@ -5374,10 +5377,10 @@ void mem_cgroup_commit_charge(struct pag
+@@ -5410,10 +5413,10 @@ void mem_cgroup_commit_charge(struct pag
VM_BUG_ON_PAGE(!PageTransHuge(page), page);
}
@@ -87,7 +87,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (do_swap_account && PageSwapCache(page)) {
swp_entry_t entry = { .val = page_private(page) };
-@@ -5433,14 +5436,14 @@ static void uncharge_batch(struct mem_cg
+@@ -5469,14 +5472,14 @@ static void uncharge_batch(struct mem_cg
memcg_oom_recover(memcg);
}
@@ -104,7 +104,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!mem_cgroup_is_root(memcg))
css_put_many(&memcg->css, nr_pages);
-@@ -5632,6 +5635,7 @@ void mem_cgroup_swapout(struct page *pag
+@@ -5668,6 +5671,7 @@ void mem_cgroup_swapout(struct page *pag
{
struct mem_cgroup *memcg;
unsigned short oldid;
@@ -112,7 +112,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
VM_BUG_ON_PAGE(PageLRU(page), page);
VM_BUG_ON_PAGE(page_count(page), page);
-@@ -5660,9 +5664,13 @@ void mem_cgroup_swapout(struct page *pag
+@@ -5696,9 +5700,13 @@ void mem_cgroup_swapout(struct page *pag
* important here to have the interrupts disabled because it is the
* only synchronisation we have for udpating the per-CPU variables.
*/
diff --git a/patches/mm-page_alloc-reduce-lock-sections-further.patch b/patches/mm-page_alloc-reduce-lock-sections-further.patch
index 3b1ae808485e95..1a47c5c9e09b73 100644
--- a/patches/mm-page_alloc-reduce-lock-sections-further.patch
+++ b/patches/mm-page_alloc-reduce-lock-sections-further.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
-@@ -777,7 +777,7 @@ static inline int free_pages_check(struc
+@@ -797,7 +797,7 @@ static inline int free_pages_check(struc
}
/*
@@ -22,7 +22,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* Assumes all pages on list are in same zone, and of same order.
* count is the number of pages to free.
*
-@@ -788,18 +788,53 @@ static inline int free_pages_check(struc
+@@ -808,18 +808,53 @@ static inline int free_pages_check(struc
* pinned" detection logic.
*/
static void free_pcppages_bulk(struct zone *zone, int count,
@@ -80,7 +80,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
while (to_free) {
struct page *page;
struct list_head *list;
-@@ -815,7 +850,7 @@ static void free_pcppages_bulk(struct zo
+@@ -835,7 +870,7 @@ static void free_pcppages_bulk(struct zo
batch_free++;
if (++migratetype == MIGRATE_PCPTYPES)
migratetype = 0;
@@ -89,7 +89,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
} while (list_empty(list));
/* This is the only non-empty list. Free them all. */
-@@ -823,24 +858,12 @@ static void free_pcppages_bulk(struct zo
+@@ -843,24 +878,12 @@ static void free_pcppages_bulk(struct zo
batch_free = to_free;
do {
@@ -116,7 +116,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static void free_one_page(struct zone *zone,
-@@ -849,7 +872,9 @@ static void free_one_page(struct zone *z
+@@ -869,7 +892,9 @@ static void free_one_page(struct zone *z
int migratetype)
{
unsigned long nr_scanned;
@@ -127,7 +127,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
if (nr_scanned)
__mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
-@@ -859,7 +884,7 @@ static void free_one_page(struct zone *z
+@@ -879,7 +904,7 @@ static void free_one_page(struct zone *z
migratetype = get_pfnblock_migratetype(page, pfn);
}
__free_one_page(page, pfn, zone, order, migratetype);
@@ -136,7 +136,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static int free_tail_pages_check(struct page *head_page, struct page *page)
-@@ -1870,16 +1895,18 @@ static int rmqueue_bulk(struct zone *zon
+@@ -1890,16 +1915,18 @@ static int rmqueue_bulk(struct zone *zon
void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
{
unsigned long flags;
@@ -156,7 +156,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
#endif
-@@ -1895,16 +1922,21 @@ static void drain_pages_zone(unsigned in
+@@ -1915,16 +1942,21 @@ static void drain_pages_zone(unsigned in
unsigned long flags;
struct per_cpu_pageset *pset;
struct per_cpu_pages *pcp;
@@ -180,7 +180,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -2082,8 +2114,13 @@ void free_hot_cold_page(struct page *pag
+@@ -2102,8 +2134,13 @@ void free_hot_cold_page(struct page *pag
pcp->count++;
if (pcp->count >= pcp->high) {
unsigned long batch = READ_ONCE(pcp->batch);
diff --git a/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch b/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch
index 31ecdd4469da6f..24b756261c504f 100644
--- a/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch
+++ b/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch
@@ -44,7 +44,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
int page_group_by_mobility_disabled __read_mostly;
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
-@@ -997,10 +1010,10 @@ static void __free_pages_ok(struct page
+@@ -1017,10 +1030,10 @@ static void __free_pages_ok(struct page
return;
migratetype = get_pfnblock_migratetype(page, pfn);
@@ -57,7 +57,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static void __init __free_pages_boot_core(struct page *page,
-@@ -1859,14 +1872,14 @@ void drain_zone_pages(struct zone *zone,
+@@ -1879,14 +1892,14 @@ void drain_zone_pages(struct zone *zone,
unsigned long flags;
int to_drain, batch;
@@ -74,7 +74,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
#endif
-@@ -1883,7 +1896,7 @@ static void drain_pages_zone(unsigned in
+@@ -1903,7 +1916,7 @@ static void drain_pages_zone(unsigned in
struct per_cpu_pageset *pset;
struct per_cpu_pages *pcp;
@@ -83,7 +83,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pset = per_cpu_ptr(zone->pageset, cpu);
pcp = &pset->pcp;
-@@ -1891,7 +1904,7 @@ static void drain_pages_zone(unsigned in
+@@ -1911,7 +1924,7 @@ static void drain_pages_zone(unsigned in
free_pcppages_bulk(zone, pcp->count, pcp);
pcp->count = 0;
}
@@ -92,7 +92,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -1977,8 +1990,17 @@ void drain_all_pages(struct zone *zone)
+@@ -1997,8 +2010,17 @@ void drain_all_pages(struct zone *zone)
else
cpumask_clear_cpu(cpu, &cpus_with_pcps);
}
@@ -110,7 +110,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
#ifdef CONFIG_HIBERNATION
-@@ -2034,7 +2056,7 @@ void free_hot_cold_page(struct page *pag
+@@ -2054,7 +2076,7 @@ void free_hot_cold_page(struct page *pag
migratetype = get_pfnblock_migratetype(page, pfn);
set_pcppage_migratetype(page, migratetype);
@@ -119,7 +119,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
__count_vm_event(PGFREE);
/*
-@@ -2065,7 +2087,7 @@ void free_hot_cold_page(struct page *pag
+@@ -2085,7 +2107,7 @@ void free_hot_cold_page(struct page *pag
}
out:
@@ -128,7 +128,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -2200,7 +2222,7 @@ struct page *buffered_rmqueue(struct zon
+@@ -2220,7 +2242,7 @@ struct page *buffered_rmqueue(struct zon
struct per_cpu_pages *pcp;
struct list_head *list;
@@ -137,7 +137,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pcp = &this_cpu_ptr(zone->pageset)->pcp;
list = &pcp->lists[migratetype];
if (list_empty(list)) {
-@@ -2232,7 +2254,7 @@ struct page *buffered_rmqueue(struct zon
+@@ -2252,7 +2274,7 @@ struct page *buffered_rmqueue(struct zon
*/
WARN_ON_ONCE(order > 1);
}
@@ -146,7 +146,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
page = NULL;
if (alloc_flags & ALLOC_HARDER) {
-@@ -2242,11 +2264,13 @@ struct page *buffered_rmqueue(struct zon
+@@ -2262,11 +2284,13 @@ struct page *buffered_rmqueue(struct zon
}
if (!page)
page = __rmqueue(zone, order, migratetype, gfp_flags);
@@ -162,7 +162,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
__mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
-@@ -2256,13 +2280,13 @@ struct page *buffered_rmqueue(struct zon
+@@ -2276,13 +2300,13 @@ struct page *buffered_rmqueue(struct zon
__count_zone_vm_events(PGALLOC, zone, 1 << order);
zone_statistics(preferred_zone, zone, gfp_flags);
@@ -178,7 +178,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return NULL;
}
-@@ -5928,6 +5952,7 @@ static int page_alloc_cpu_notify(struct
+@@ -5948,6 +5972,7 @@ static int page_alloc_cpu_notify(struct
void __init page_alloc_init(void)
{
hotcpu_notifier(page_alloc_cpu_notify, 0);
@@ -186,7 +186,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -6822,7 +6847,7 @@ void zone_pcp_reset(struct zone *zone)
+@@ -6842,7 +6867,7 @@ void zone_pcp_reset(struct zone *zone)
struct per_cpu_pageset *pset;
/* avoid races with drain_pages() */
@@ -195,7 +195,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (zone->pageset != &boot_pageset) {
for_each_online_cpu(cpu) {
pset = per_cpu_ptr(zone->pageset, cpu);
-@@ -6831,7 +6856,7 @@ void zone_pcp_reset(struct zone *zone)
+@@ -6851,7 +6876,7 @@ void zone_pcp_reset(struct zone *zone)
free_percpu(zone->pageset);
zone->pageset = &boot_pageset;
}
diff --git a/patches/mm-rt-kmap-atomic-scheduling.patch b/patches/mm-rt-kmap-atomic-scheduling.patch
index 43fa53f1d2e809..9dbd83c51dff75 100644
--- a/patches/mm-rt-kmap-atomic-scheduling.patch
+++ b/patches/mm-rt-kmap-atomic-scheduling.patch
@@ -229,7 +229,7 @@ Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins
#include <asm/page.h>
#include <asm/ptrace.h>
-@@ -1847,6 +1848,12 @@ struct task_struct {
+@@ -1849,6 +1850,12 @@ struct task_struct {
int softirq_nestcnt;
unsigned int softirqs_raised;
#endif
diff --git a/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch b/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch
index 383fa8c6622d1f..208c29c1fc9c20 100644
--- a/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch
+++ b/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch
@@ -45,7 +45,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct net_device *__dev_get_by_index(struct net *net, int ifindex);
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1850,6 +1850,9 @@ struct task_struct {
+@@ -1852,6 +1852,9 @@ struct task_struct {
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
unsigned long task_state_change;
#endif
diff --git a/patches/oleg-signal-rt-fix.patch b/patches/oleg-signal-rt-fix.patch
index 7222dd3ca80d33..6367819f96b47a 100644
--- a/patches/oleg-signal-rt-fix.patch
+++ b/patches/oleg-signal-rt-fix.patch
@@ -76,7 +76,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1577,6 +1577,10 @@ struct task_struct {
+@@ -1579,6 +1579,10 @@ struct task_struct {
sigset_t blocked, real_blocked;
sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
struct sigpending pending;
diff --git a/patches/panic-change-nmi_panic-from-macro-to-function.patch b/patches/panic-change-nmi_panic-from-macro-to-function.patch
new file mode 100644
index 00000000000000..608acc78014abb
--- /dev/null
+++ b/patches/panic-change-nmi_panic-from-macro-to-function.patch
@@ -0,0 +1,112 @@
+From 8b60994ea4ef1dad39b29a0e61261bd0c2c2919f Mon Sep 17 00:00:00 2001
+From: Hidehiro Kawai <hidehiro.kawai.ez@hitachi.com>
+Date: Tue, 22 Mar 2016 14:27:17 -0700
+Subject: [PATCH] panic: change nmi_panic from macro to function
+
+Commit 1717f2096b54 ("panic, x86: Fix re-entrance problem due to panic
+on NMI") and commit 58c5661f2144 ("panic, x86: Allow CPUs to save
+registers even if looping in NMI context") introduced nmi_panic() which
+prevents concurrent/recursive execution of panic(). It also saves
+registers for the crash dump on x86.
+
+However, there are some cases where NMI handlers still use panic().
+This patch set partially replaces them with nmi_panic() in those cases.
+
+Even this patchset is applied, some NMI or similar handlers (e.g. MCE
+handler) continue to use panic(). This is because I can't test them
+well and actual problems won't happen. For example, the possibility
+that normal panic and panic on MCE happen simultaneously is very low.
+
+This patch (of 3):
+
+Convert nmi_panic() to a proper function and export it instead of
+exporting internal implementation details to modules, for obvious
+reasons.
+
+Signed-off-by: Hidehiro Kawai <hidehiro.kawai.ez@hitachi.com>
+Acked-by: Borislav Petkov <bp@suse.de>
+Acked-by: Michal Nazarewicz <mina86@mina86.com>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk>
+Cc: Nicolas Iooss <nicolas.iooss_linux@m4x.org>
+Cc: Javi Merino <javi.merino@arm.com>
+Cc: Gobinda Charan Maji <gobinda.cemk07@gmail.com>
+Cc: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Vitaly Kuznetsov <vkuznets@redhat.com>
+Cc: HATAYAMA Daisuke <d.hatayama@jp.fujitsu.com>
+Cc: Tejun Heo <tj@kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/kernel.h | 21 +--------------------
+ kernel/panic.c | 20 ++++++++++++++++++++
+ 2 files changed, 21 insertions(+), 20 deletions(-)
+
+--- a/include/linux/kernel.h
++++ b/include/linux/kernel.h
+@@ -255,7 +255,7 @@ extern long (*panic_blink)(int state);
+ __printf(1, 2)
+ void panic(const char *fmt, ...)
+ __noreturn __cold;
+-void nmi_panic_self_stop(struct pt_regs *);
++void nmi_panic(struct pt_regs *regs, const char *msg);
+ extern void oops_enter(void);
+ extern void oops_exit(void);
+ void print_oops_end_marker(void);
+@@ -455,25 +455,6 @@ extern atomic_t panic_cpu;
+ #define PANIC_CPU_INVALID -1
+
+ /*
+- * A variant of panic() called from NMI context. We return if we've already
+- * panicked on this CPU. If another CPU already panicked, loop in
+- * nmi_panic_self_stop() which can provide architecture dependent code such
+- * as saving register state for crash dump.
+- */
+-#define nmi_panic(regs, fmt, ...) \
+-do { \
+- int old_cpu, cpu; \
+- \
+- cpu = raw_smp_processor_id(); \
+- old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, cpu); \
+- \
+- if (old_cpu == PANIC_CPU_INVALID) \
+- panic(fmt, ##__VA_ARGS__); \
+- else if (old_cpu != cpu) \
+- nmi_panic_self_stop(regs); \
+-} while (0)
+-
+-/*
+ * Only to be used by arch init code. If the user over-wrote the default
+ * CONFIG_PANIC_TIMEOUT, honor it.
+ */
+--- a/kernel/panic.c
++++ b/kernel/panic.c
+@@ -72,6 +72,26 @@ void __weak nmi_panic_self_stop(struct p
+
+ atomic_t panic_cpu = ATOMIC_INIT(PANIC_CPU_INVALID);
+
++/*
++ * A variant of panic() called from NMI context. We return if we've already
++ * panicked on this CPU. If another CPU already panicked, loop in
++ * nmi_panic_self_stop() which can provide architecture dependent code such
++ * as saving register state for crash dump.
++ */
++void nmi_panic(struct pt_regs *regs, const char *msg)
++{
++ int old_cpu, cpu;
++
++ cpu = raw_smp_processor_id();
++ old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, cpu);
++
++ if (old_cpu == PANIC_CPU_INVALID)
++ panic("%s", msg);
++ else if (old_cpu != cpu)
++ nmi_panic_self_stop(regs);
++}
++EXPORT_SYMBOL(nmi_panic);
++
+ /**
+ * panic - halt the system
+ * @fmt: The text string to print
diff --git a/patches/panic-disable-random-on-rt.patch b/patches/panic-disable-random-on-rt.patch
index caed5d57dabb61..e9f300080a8c9f 100644
--- a/patches/panic-disable-random-on-rt.patch
+++ b/patches/panic-disable-random-on-rt.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/panic.c
+++ b/kernel/panic.c
-@@ -400,9 +400,11 @@ static u64 oops_id;
+@@ -439,9 +439,11 @@ static u64 oops_id;
static int init_oops_id(void)
{
diff --git a/patches/panic-x86-Allow-CPUs-to-save-registers-even-if-loopi.patch b/patches/panic-x86-Allow-CPUs-to-save-registers-even-if-loopi.patch
new file mode 100644
index 00000000000000..876d058c986d3b
--- /dev/null
+++ b/patches/panic-x86-Allow-CPUs-to-save-registers-even-if-loopi.patch
@@ -0,0 +1,245 @@
+From 957548a86594805bce67b7b5c8360e78a0c658e1 Mon Sep 17 00:00:00 2001
+From: Hidehiro Kawai <hidehiro.kawai.ez@hitachi.com>
+Date: Mon, 14 Dec 2015 11:19:10 +0100
+Subject: [PATCH] panic, x86: Allow CPUs to save registers even if looping
+ in NMI context
+
+Currently, kdump_nmi_shootdown_cpus(), a subroutine of crash_kexec(),
+sends an NMI IPI to CPUs which haven't called panic() to stop them,
+save their register information and do some cleanups for crash dumping.
+However, if such a CPU is infinitely looping in NMI context, we fail to
+save its register information into the crash dump.
+
+For example, this can happen when unknown NMIs are broadcast to all
+CPUs as follows:
+
+ CPU 0 CPU 1
+ =========================== ==========================
+ receive an unknown NMI
+ unknown_nmi_error()
+ panic() receive an unknown NMI
+ spin_trylock(&panic_lock) unknown_nmi_error()
+ crash_kexec() panic()
+ spin_trylock(&panic_lock)
+ panic_smp_self_stop()
+ infinite loop
+ kdump_nmi_shootdown_cpus()
+ issue NMI IPI -----------> blocked until IRET
+ infinite loop...
+
+Here, since CPU 1 is in NMI context, the second NMI from CPU 0 is
+blocked until CPU 1 executes IRET. However, CPU 1 never executes IRET,
+so the NMI is not handled and the callback function to save registers is
+never called.
+
+In practice, this can happen on some servers which broadcast NMIs to all
+CPUs when the NMI button is pushed.
+
+To save registers in this case, we need to:
+
+ a) Return from NMI handler instead of looping infinitely
+ or
+ b) Call the callback function directly from the infinite loop
+
+Inherently, a) is risky because NMI is also used to prevent corrupted
+data from being propagated to devices. So, we chose b).
+
+This patch does the following:
+
+1. Move the infinite looping of CPUs which haven't called panic() in NMI
+ context (actually done by panic_smp_self_stop()) outside of panic() to
+ enable us to refer pt_regs. Please note that panic_smp_self_stop() is
+ still used for normal context.
+
+2. Call a callback of kdump_nmi_shootdown_cpus() directly to save
+ registers and do some cleanups after setting waiting_for_crash_ipi which
+ is used for counting down the number of CPUs which handled the callback
+
+Signed-off-by: Hidehiro Kawai <hidehiro.kawai.ez@hitachi.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Cc: Aaron Tomlin <atomlin@redhat.com>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Baoquan He <bhe@redhat.com>
+Cc: Chris Metcalf <cmetcalf@ezchip.com>
+Cc: Dave Young <dyoung@redhat.com>
+Cc: David Hildenbrand <dahi@linux.vnet.ibm.com>
+Cc: Don Zickus <dzickus@redhat.com>
+Cc: Eric Biederman <ebiederm@xmission.com>
+Cc: Frederic Weisbecker <fweisbec@gmail.com>
+Cc: Gobinda Charan Maji <gobinda.cemk07@gmail.com>
+Cc: HATAYAMA Daisuke <d.hatayama@jp.fujitsu.com>
+Cc: Hidehiro Kawai <hidehiro.kawai.ez@hitachi.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Ingo Molnar <mingo@kernel.org>
+Cc: Javi Merino <javi.merino@arm.com>
+Cc: Jiang Liu <jiang.liu@linux.intel.com>
+Cc: Jonathan Corbet <corbet@lwn.net>
+Cc: kexec@lists.infradead.org
+Cc: linux-doc@vger.kernel.org
+Cc: lkml <linux-kernel@vger.kernel.org>
+Cc: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
+Cc: Michal Nazarewicz <mina86@mina86.com>
+Cc: Nicolas Iooss <nicolas.iooss_linux@m4x.org>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Prarit Bhargava <prarit@redhat.com>
+Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk>
+Cc: Seth Jennings <sjenning@redhat.com>
+Cc: Stefan Lippers-Hollmann <s.l-h@gmx.de>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Ulrich Obergfell <uobergfe@redhat.com>
+Cc: Vitaly Kuznetsov <vkuznets@redhat.com>
+Cc: Vivek Goyal <vgoyal@redhat.com>
+Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
+Link: http://lkml.kernel.org/r/20151210014628.25437.75256.stgit@softrs
+[ Cleanup comments, fixup formatting. ]
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/x86/kernel/nmi.c | 6 +++---
+ arch/x86/kernel/reboot.c | 20 ++++++++++++++++++++
+ include/linux/kernel.h | 16 ++++++++++++----
+ kernel/panic.c | 9 +++++++++
+ kernel/watchdog.c | 2 +-
+ 5 files changed, 45 insertions(+), 8 deletions(-)
+
+--- a/arch/x86/kernel/nmi.c
++++ b/arch/x86/kernel/nmi.c
+@@ -231,7 +231,7 @@ pci_serr_error(unsigned char reason, str
+ #endif
+
+ if (panic_on_unrecovered_nmi)
+- nmi_panic("NMI: Not continuing");
++ nmi_panic(regs, "NMI: Not continuing");
+
+ pr_emerg("Dazed and confused, but trying to continue\n");
+
+@@ -256,7 +256,7 @@ io_check_error(unsigned char reason, str
+ show_regs(regs);
+
+ if (panic_on_io_nmi) {
+- nmi_panic("NMI IOCK error: Not continuing");
++ nmi_panic(regs, "NMI IOCK error: Not continuing");
+
+ /*
+ * If we end up here, it means we have received an NMI while
+@@ -305,7 +305,7 @@ unknown_nmi_error(unsigned char reason,
+
+ pr_emerg("Do you have a strange power saving mode enabled?\n");
+ if (unknown_nmi_panic || panic_on_unrecovered_nmi)
+- nmi_panic("NMI: Not continuing");
++ nmi_panic(regs, "NMI: Not continuing");
+
+ pr_emerg("Dazed and confused, but trying to continue\n");
+ }
+--- a/arch/x86/kernel/reboot.c
++++ b/arch/x86/kernel/reboot.c
+@@ -726,6 +726,7 @@ static int crashing_cpu;
+ static nmi_shootdown_cb shootdown_callback;
+
+ static atomic_t waiting_for_crash_ipi;
++static int crash_ipi_issued;
+
+ static int crash_nmi_callback(unsigned int val, struct pt_regs *regs)
+ {
+@@ -788,6 +789,9 @@ void nmi_shootdown_cpus(nmi_shootdown_cb
+
+ smp_send_nmi_allbutself();
+
++ /* Kick CPUs looping in NMI context. */
++ WRITE_ONCE(crash_ipi_issued, 1);
++
+ msecs = 1000; /* Wait at most a second for the other cpus to stop */
+ while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
+ mdelay(1);
+@@ -796,6 +800,22 @@ void nmi_shootdown_cpus(nmi_shootdown_cb
+
+ /* Leave the nmi callback set */
+ }
++
++/* Override the weak function in kernel/panic.c */
++void nmi_panic_self_stop(struct pt_regs *regs)
++{
++ while (1) {
++ /*
++ * Wait for the crash dumping IPI to be issued, and then
++ * call its callback directly.
++ */
++ if (READ_ONCE(crash_ipi_issued))
++ crash_nmi_callback(0, regs); /* Don't return */
++
++ cpu_relax();
++ }
++}
++
+ #else /* !CONFIG_SMP */
+ void nmi_shootdown_cpus(nmi_shootdown_cb callback)
+ {
+--- a/include/linux/kernel.h
++++ b/include/linux/kernel.h
+@@ -255,6 +255,7 @@ extern long (*panic_blink)(int state);
+ __printf(1, 2)
+ void panic(const char *fmt, ...)
+ __noreturn __cold;
++void nmi_panic_self_stop(struct pt_regs *);
+ extern void oops_enter(void);
+ extern void oops_exit(void);
+ void print_oops_end_marker(void);
+@@ -455,14 +456,21 @@ extern atomic_t panic_cpu;
+
+ /*
+ * A variant of panic() called from NMI context. We return if we've already
+- * panicked on this CPU.
++ * panicked on this CPU. If another CPU already panicked, loop in
++ * nmi_panic_self_stop() which can provide architecture dependent code such
++ * as saving register state for crash dump.
+ */
+-#define nmi_panic(fmt, ...) \
++#define nmi_panic(regs, fmt, ...) \
+ do { \
+- int cpu = raw_smp_processor_id(); \
++ int old_cpu, cpu; \
+ \
+- if (atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, cpu) != cpu) \
++ cpu = raw_smp_processor_id(); \
++ old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, cpu); \
++ \
++ if (old_cpu == PANIC_CPU_INVALID) \
+ panic(fmt, ##__VA_ARGS__); \
++ else if (old_cpu != cpu) \
++ nmi_panic_self_stop(regs); \
+ } while (0)
+
+ /*
+--- a/kernel/panic.c
++++ b/kernel/panic.c
+@@ -61,6 +61,15 @@ void __weak panic_smp_self_stop(void)
+ cpu_relax();
+ }
+
++/*
++ * Stop ourselves in NMI context if another CPU has already panicked. Arch code
++ * may override this to prepare for crash dumping, e.g. save regs info.
++ */
++void __weak nmi_panic_self_stop(struct pt_regs *regs)
++{
++ panic_smp_self_stop();
++}
++
+ atomic_t panic_cpu = ATOMIC_INIT(PANIC_CPU_INVALID);
+
+ /**
+--- a/kernel/watchdog.c
++++ b/kernel/watchdog.c
+@@ -351,7 +351,7 @@ static void watchdog_overflow_callback(s
+ trigger_allbutself_cpu_backtrace();
+
+ if (hardlockup_panic)
+- nmi_panic("Hard LOCKUP");
++ nmi_panic(regs, "Hard LOCKUP");
+
+ __this_cpu_write(hard_watchdog_warn, true);
+ return;
diff --git a/patches/panic-x86-Fix-re-entrance-problem-due-to-panic-on-NM.patch b/patches/panic-x86-Fix-re-entrance-problem-due-to-panic-on-NM.patch
new file mode 100644
index 00000000000000..75fe083009f0c3
--- /dev/null
+++ b/patches/panic-x86-Fix-re-entrance-problem-due-to-panic-on-NM.patch
@@ -0,0 +1,188 @@
+From: Hidehiro Kawai <hidehiro.kawai.ez@hitachi.com>
+Date: Mon, 14 Dec 2015 11:19:09 +0100
+Subject: [PATCH] panic, x86: Fix re-entrance problem due to panic on NMI
+
+If panic on NMI happens just after panic() on the same CPU, panic() is
+recursively called. Kernel stalls, as a result, after failing to acquire
+panic_lock.
+
+To avoid this problem, don't call panic() in NMI context if we've
+already entered panic().
+
+For that, introduce nmi_panic() macro to reduce code duplication. In
+the case of panic on NMI, don't return from NMI handlers if another CPU
+already panicked.
+
+Signed-off-by: Hidehiro Kawai <hidehiro.kawai.ez@hitachi.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Cc: Aaron Tomlin <atomlin@redhat.com>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Baoquan He <bhe@redhat.com>
+Cc: Chris Metcalf <cmetcalf@ezchip.com>
+Cc: David Hildenbrand <dahi@linux.vnet.ibm.com>
+Cc: Don Zickus <dzickus@redhat.com>
+Cc: "Eric W. Biederman" <ebiederm@xmission.com>
+Cc: Frederic Weisbecker <fweisbec@gmail.com>
+Cc: Gobinda Charan Maji <gobinda.cemk07@gmail.com>
+Cc: HATAYAMA Daisuke <d.hatayama@jp.fujitsu.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Ingo Molnar <mingo@kernel.org>
+Cc: Javi Merino <javi.merino@arm.com>
+Cc: Jonathan Corbet <corbet@lwn.net>
+Cc: kexec@lists.infradead.org
+Cc: linux-doc@vger.kernel.org
+Cc: lkml <linux-kernel@vger.kernel.org>
+Cc: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
+Cc: Michal Nazarewicz <mina86@mina86.com>
+Cc: Nicolas Iooss <nicolas.iooss_linux@m4x.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Prarit Bhargava <prarit@redhat.com>
+Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk>
+Cc: Rusty Russell <rusty@rustcorp.com.au>
+Cc: Seth Jennings <sjenning@redhat.com>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Ulrich Obergfell <uobergfe@redhat.com>
+Cc: Vitaly Kuznetsov <vkuznets@redhat.com>
+Cc: Vivek Goyal <vgoyal@redhat.com>
+Link: http://lkml.kernel.org/r/20151210014626.25437.13302.stgit@softrs
+[ Cleanup comments, fixup formatting. ]
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/x86/kernel/nmi.c | 16 ++++++++++++----
+ include/linux/kernel.h | 20 ++++++++++++++++++++
+ kernel/panic.c | 16 +++++++++++++---
+ kernel/watchdog.c | 2 +-
+ 4 files changed, 46 insertions(+), 8 deletions(-)
+
+--- a/arch/x86/kernel/nmi.c
++++ b/arch/x86/kernel/nmi.c
+@@ -231,7 +231,7 @@ pci_serr_error(unsigned char reason, str
+ #endif
+
+ if (panic_on_unrecovered_nmi)
+- panic("NMI: Not continuing");
++ nmi_panic("NMI: Not continuing");
+
+ pr_emerg("Dazed and confused, but trying to continue\n");
+
+@@ -255,8 +255,16 @@ io_check_error(unsigned char reason, str
+ reason, smp_processor_id());
+ show_regs(regs);
+
+- if (panic_on_io_nmi)
+- panic("NMI IOCK error: Not continuing");
++ if (panic_on_io_nmi) {
++ nmi_panic("NMI IOCK error: Not continuing");
++
++ /*
++ * If we end up here, it means we have received an NMI while
++ * processing panic(). Simply return without delaying and
++ * re-enabling NMIs.
++ */
++ return;
++ }
+
+ /* Re-enable the IOCK line, wait for a few seconds */
+ reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_IOCHK;
+@@ -297,7 +305,7 @@ unknown_nmi_error(unsigned char reason,
+
+ pr_emerg("Do you have a strange power saving mode enabled?\n");
+ if (unknown_nmi_panic || panic_on_unrecovered_nmi)
+- panic("NMI: Not continuing");
++ nmi_panic("NMI: Not continuing");
+
+ pr_emerg("Dazed and confused, but trying to continue\n");
+ }
+--- a/include/linux/kernel.h
++++ b/include/linux/kernel.h
+@@ -446,6 +446,26 @@ extern int sysctl_panic_on_stackoverflow
+ extern bool crash_kexec_post_notifiers;
+
+ /*
++ * panic_cpu is used for synchronizing panic() and crash_kexec() execution. It
++ * holds a CPU number which is executing panic() currently. A value of
++ * PANIC_CPU_INVALID means no CPU has entered panic() or crash_kexec().
++ */
++extern atomic_t panic_cpu;
++#define PANIC_CPU_INVALID -1
++
++/*
++ * A variant of panic() called from NMI context. We return if we've already
++ * panicked on this CPU.
++ */
++#define nmi_panic(fmt, ...) \
++do { \
++ int cpu = raw_smp_processor_id(); \
++ \
++ if (atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, cpu) != cpu) \
++ panic(fmt, ##__VA_ARGS__); \
++} while (0)
++
++/*
+ * Only to be used by arch init code. If the user over-wrote the default
+ * CONFIG_PANIC_TIMEOUT, honor it.
+ */
+--- a/kernel/panic.c
++++ b/kernel/panic.c
+@@ -61,6 +61,8 @@ void __weak panic_smp_self_stop(void)
+ cpu_relax();
+ }
+
++atomic_t panic_cpu = ATOMIC_INIT(PANIC_CPU_INVALID);
++
+ /**
+ * panic - halt the system
+ * @fmt: The text string to print
+@@ -71,17 +73,17 @@ void __weak panic_smp_self_stop(void)
+ */
+ void panic(const char *fmt, ...)
+ {
+- static DEFINE_SPINLOCK(panic_lock);
+ static char buf[1024];
+ va_list args;
+ long i, i_next = 0;
+ int state = 0;
++ int old_cpu, this_cpu;
+
+ /*
+ * Disable local interrupts. This will prevent panic_smp_self_stop
+ * from deadlocking the first cpu that invokes the panic, since
+ * there is nothing to prevent an interrupt handler (that runs
+- * after the panic_lock is acquired) from invoking panic again.
++ * after setting panic_cpu) from invoking panic() again.
+ */
+ local_irq_disable();
+
+@@ -94,8 +96,16 @@ void panic(const char *fmt, ...)
+ * multiple parallel invocations of panic, all other CPUs either
+ * stop themself or will wait until they are stopped by the 1st CPU
+ * with smp_send_stop().
++ *
++ * `old_cpu == PANIC_CPU_INVALID' means this is the 1st CPU which
++ * comes here, so go ahead.
++ * `old_cpu == this_cpu' means we came from nmi_panic() which sets
++ * panic_cpu to this CPU. In this case, this is also the 1st CPU.
+ */
+- if (!spin_trylock(&panic_lock))
++ this_cpu = raw_smp_processor_id();
++ old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, this_cpu);
++
++ if (old_cpu != PANIC_CPU_INVALID && old_cpu != this_cpu)
+ panic_smp_self_stop();
+
+ console_verbose();
+--- a/kernel/watchdog.c
++++ b/kernel/watchdog.c
+@@ -351,7 +351,7 @@ static void watchdog_overflow_callback(s
+ trigger_allbutself_cpu_backtrace();
+
+ if (hardlockup_panic)
+- panic("Hard LOCKUP");
++ nmi_panic("Hard LOCKUP");
+
+ __this_cpu_write(hard_watchdog_warn, true);
+ return;
diff --git a/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch b/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch
index b150e65775d6cd..dac394b24e3585 100644
--- a/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch
+++ b/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch
@@ -31,8 +31,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
# define INIT_VTIME(tsk) \
- .vtime_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.vtime_lock), \
-@@ -240,6 +246,7 @@ extern struct task_group root_task_group
+ .vtime_seqcount = SEQCNT_ZERO(tsk.vtime_seqcount), \
+@@ -239,6 +245,7 @@ extern struct task_group root_task_group
.cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
.pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
.timer_slack_ns = 50000, /* 50 usec default slack */ \
@@ -42,7 +42,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
[PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1542,6 +1542,9 @@ struct task_struct {
+@@ -1544,6 +1544,9 @@ struct task_struct {
struct task_cputime cputime_expires;
struct list_head cpu_timers[3];
diff --git a/patches/preempt-lazy-support.patch b/patches/preempt-lazy-support.patch
index 67ae12bd9d3dd9..41137f487c862a 100644
--- a/patches/preempt-lazy-support.patch
+++ b/patches/preempt-lazy-support.patch
@@ -165,7 +165,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -2965,6 +2965,43 @@ static inline int test_tsk_need_resched(
+@@ -2967,6 +2967,43 @@ static inline int test_tsk_need_resched(
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
diff --git a/patches/printk-kill.patch b/patches/printk-kill.patch
index fa3175f45c4930..444c4596dffad9 100644
--- a/patches/printk-kill.patch
+++ b/patches/printk-kill.patch
@@ -159,5 +159,5 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ raw_spin_unlock(&watchdog_output_lock);
if (hardlockup_panic)
- panic("Hard LOCKUP");
+ nmi_panic(regs, "Hard LOCKUP");
diff --git a/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch b/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
index 608fb0e9c7ec00..5ff503d717e598 100644
--- a/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
+++ b/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
@@ -41,7 +41,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#define task_contributes_to_load(task) \
((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
(task->flags & PF_FROZEN) == 0 && \
-@@ -2982,6 +2979,51 @@ static inline int signal_pending_state(l
+@@ -2984,6 +2981,51 @@ static inline int signal_pending_state(l
return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
}
diff --git a/patches/sched-cputime-Clarify-vtime-symbols-and-document-the.patch b/patches/sched-cputime-Clarify-vtime-symbols-and-document-the.patch
new file mode 100644
index 00000000000000..c312211c70c96c
--- /dev/null
+++ b/patches/sched-cputime-Clarify-vtime-symbols-and-document-the.patch
@@ -0,0 +1,89 @@
+From: Frederic Weisbecker <fweisbec@gmail.com>
+Date: Thu, 19 Nov 2015 16:47:30 +0100
+Subject: [PATCH] sched/cputime: Clarify vtime symbols and document them
+
+VTIME_SLEEPING state happens either when:
+
+1) The task is sleeping and no tickless delta is to be added on the task
+ cputime stats.
+2) The CPU isn't running vtime at all, so the same properties of 1) applies.
+
+Lets rename the vtime symbol to reflect both states.
+
+Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Chris Metcalf <cmetcalf@ezchip.com>
+Cc: Christoph Lameter <cl@linux.com>
+Cc: Hiroshi Shimamoto <h-shimamoto@ct.jp.nec.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Luiz Capitulino <lcapitulino@redhat.com>
+Cc: Mike Galbraith <efault@gmx.de>
+Cc: Paul E . McKenney <paulmck@linux.vnet.ibm.com>
+Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/1447948054-28668-4-git-send-email-fweisbec@gmail.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/sched.h | 5 ++++-
+ kernel/fork.c | 2 +-
+ kernel/sched/cputime.c | 6 +++---
+ 3 files changed, 8 insertions(+), 5 deletions(-)
+
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1524,8 +1524,11 @@ struct task_struct {
+ seqlock_t vtime_seqlock;
+ unsigned long long vtime_snap;
+ enum {
+- VTIME_SLEEPING = 0,
++ /* Task is sleeping or running in a CPU with VTIME inactive */
++ VTIME_INACTIVE = 0,
++ /* Task runs in userspace in a CPU with VTIME active */
+ VTIME_USER,
++ /* Task runs in kernelspace in a CPU with VTIME active */
+ VTIME_SYS,
+ } vtime_snap_whence;
+ #endif
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -1351,7 +1351,7 @@ static struct task_struct *copy_process(
+ #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
+ seqlock_init(&p->vtime_seqlock);
+ p->vtime_snap = 0;
+- p->vtime_snap_whence = VTIME_SLEEPING;
++ p->vtime_snap_whence = VTIME_INACTIVE;
+ #endif
+
+ #if defined(SPLIT_RSS_COUNTING)
+--- a/kernel/sched/cputime.c
++++ b/kernel/sched/cputime.c
+@@ -680,7 +680,7 @@ static cputime_t get_vtime_delta(struct
+ {
+ unsigned long long delta = vtime_delta(tsk);
+
+- WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_SLEEPING);
++ WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_INACTIVE);
+ tsk->vtime_snap += delta;
+
+ /* CHECKME: always safe to convert nsecs to cputime? */
+@@ -764,7 +764,7 @@ void vtime_account_idle(struct task_stru
+ void arch_vtime_task_switch(struct task_struct *prev)
+ {
+ write_seqlock(&prev->vtime_seqlock);
+- prev->vtime_snap_whence = VTIME_SLEEPING;
++ prev->vtime_snap_whence = VTIME_INACTIVE;
+ write_sequnlock(&prev->vtime_seqlock);
+
+ write_seqlock(&current->vtime_seqlock);
+@@ -829,7 +829,7 @@ fetch_task_cputime(struct task_struct *t
+ *s_dst = *s_src;
+
+ /* Task is sleeping, nothing to add */
+- if (t->vtime_snap_whence == VTIME_SLEEPING ||
++ if (t->vtime_snap_whence == VTIME_INACTIVE ||
+ is_idle_task(t))
+ continue;
+
diff --git a/patches/vtime-split-lock-and-seqcount.patch b/patches/sched-cputime-Convert-vtime_seqlock-to-seqcount.patch
index 1b792f26a42d92..148082500f4c3e 100644
--- a/patches/vtime-split-lock-and-seqcount.patch
+++ b/patches/sched-cputime-Convert-vtime_seqlock-to-seqcount.patch
@@ -1,79 +1,92 @@
-Subject: vtime: Split lock and seqcount
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Tue, 23 Jul 2013 15:45:51 +0200
+From: Frederic Weisbecker <fweisbec@gmail.com>
+Date: Thu, 19 Nov 2015 16:47:34 +0100
+Subject: [PATCH] sched/cputime: Convert vtime_seqlock to seqcount
-Replace vtime_seqlock seqlock with a simple seqcounter and a rawlock
-so it can taken in atomic context on RT.
+The cputime can only be updated by the current task itself, even in
+vtime case. So we can safely use seqcount instead of seqlock as there
+is no writer concurrency involved.
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+[ bigeasy: safe since 6a61671bb2f3 ("cputime: Safely read cputime of
+full dynticks CPUs") ]
+
+Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Chris Metcalf <cmetcalf@ezchip.com>
+Cc: Christoph Lameter <cl@linux.com>
+Cc: Hiroshi Shimamoto <h-shimamoto@ct.jp.nec.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Luiz Capitulino <lcapitulino@redhat.com>
+Cc: Mike Galbraith <efault@gmx.de>
+Cc: Paul E . McKenney <paulmck@linux.vnet.ibm.com>
+Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/1447948054-28668-8-git-send-email-fweisbec@gmail.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- include/linux/init_task.h | 3 +-
- include/linux/sched.h | 3 +-
- kernel/fork.c | 3 +-
- kernel/sched/cputime.c | 62 +++++++++++++++++++++++++++++-----------------
- 4 files changed, 46 insertions(+), 25 deletions(-)
+ include/linux/init_task.h | 2 +-
+ include/linux/sched.h | 2 +-
+ kernel/fork.c | 2 +-
+ kernel/sched/cputime.c | 46 ++++++++++++++++++++++++----------------------
+ 4 files changed, 27 insertions(+), 25 deletions(-)
+
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
-@@ -150,7 +150,8 @@ extern struct task_group root_task_group
+@@ -150,7 +150,7 @@ extern struct task_group root_task_group
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
# define INIT_VTIME(tsk) \
- .vtime_seqlock = __SEQLOCK_UNLOCKED(tsk.vtime_seqlock), \
-+ .vtime_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.vtime_lock), \
-+ .vtime_seq = SEQCNT_ZERO(tsk.vtime_seq), \
++ .vtime_seqcount = SEQCNT_ZERO(tsk.vtime_seqcount), \
.vtime_snap = 0, \
.vtime_snap_whence = VTIME_SYS,
#else
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1519,7 +1519,8 @@ struct task_struct {
+@@ -1521,7 +1521,7 @@ struct task_struct {
cputime_t gtime;
struct prev_cputime prev_cputime;
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
- seqlock_t vtime_seqlock;
-+ raw_spinlock_t vtime_lock;
-+ seqcount_t vtime_seq;
++ seqcount_t vtime_seqcount;
unsigned long long vtime_snap;
enum {
- VTIME_SLEEPING = 0,
+ /* Task is sleeping or running in a CPU with VTIME inactive */
--- a/kernel/fork.c
+++ b/kernel/fork.c
-@@ -1349,7 +1349,8 @@ static struct task_struct *copy_process(
+@@ -1349,7 +1349,7 @@ static struct task_struct *copy_process(
prev_cputime_init(&p->prev_cputime);
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
- seqlock_init(&p->vtime_seqlock);
-+ raw_spin_lock_init(&p->vtime_lock);
-+ seqcount_init(&p->vtime_seq);
++ seqcount_init(&p->vtime_seqcount);
p->vtime_snap = 0;
- p->vtime_snap_whence = VTIME_SLEEPING;
+ p->vtime_snap_whence = VTIME_INACTIVE;
#endif
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
-@@ -696,37 +696,45 @@ static void __vtime_account_system(struc
+@@ -696,37 +696,37 @@ static void __vtime_account_system(struc
void vtime_account_system(struct task_struct *tsk)
{
- write_seqlock(&tsk->vtime_seqlock);
-+ raw_spin_lock(&tsk->vtime_lock);
-+ write_seqcount_begin(&tsk->vtime_seq);
++ write_seqcount_begin(&tsk->vtime_seqcount);
__vtime_account_system(tsk);
- write_sequnlock(&tsk->vtime_seqlock);
-+ write_seqcount_end(&tsk->vtime_seq);
-+ raw_spin_unlock(&tsk->vtime_lock);
++ write_seqcount_end(&tsk->vtime_seqcount);
}
void vtime_gen_account_irq_exit(struct task_struct *tsk)
{
- write_seqlock(&tsk->vtime_seqlock);
-+ raw_spin_lock(&tsk->vtime_lock);
-+ write_seqcount_begin(&tsk->vtime_seq);
++ write_seqcount_begin(&tsk->vtime_seqcount);
__vtime_account_system(tsk);
if (context_tracking_in_user())
tsk->vtime_snap_whence = VTIME_USER;
- write_sequnlock(&tsk->vtime_seqlock);
-+ write_seqcount_end(&tsk->vtime_seq);
-+ raw_spin_unlock(&tsk->vtime_lock);
++ write_seqcount_end(&tsk->vtime_seqcount);
}
void vtime_account_user(struct task_struct *tsk)
@@ -81,77 +94,65 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
cputime_t delta_cpu;
- write_seqlock(&tsk->vtime_seqlock);
-+ raw_spin_lock(&tsk->vtime_lock);
-+ write_seqcount_begin(&tsk->vtime_seq);
++ write_seqcount_begin(&tsk->vtime_seqcount);
delta_cpu = get_vtime_delta(tsk);
tsk->vtime_snap_whence = VTIME_SYS;
account_user_time(tsk, delta_cpu, cputime_to_scaled(delta_cpu));
- write_sequnlock(&tsk->vtime_seqlock);
-+ write_seqcount_end(&tsk->vtime_seq);
-+ raw_spin_unlock(&tsk->vtime_lock);
++ write_seqcount_end(&tsk->vtime_seqcount);
}
void vtime_user_enter(struct task_struct *tsk)
{
- write_seqlock(&tsk->vtime_seqlock);
-+ raw_spin_lock(&tsk->vtime_lock);
-+ write_seqcount_begin(&tsk->vtime_seq);
++ write_seqcount_begin(&tsk->vtime_seqcount);
__vtime_account_system(tsk);
tsk->vtime_snap_whence = VTIME_USER;
- write_sequnlock(&tsk->vtime_seqlock);
-+ write_seqcount_end(&tsk->vtime_seq);
-+ raw_spin_unlock(&tsk->vtime_lock);
++ write_seqcount_end(&tsk->vtime_seqcount);
}
void vtime_guest_enter(struct task_struct *tsk)
-@@ -738,19 +746,23 @@ void vtime_guest_enter(struct task_struc
+@@ -738,19 +738,19 @@ void vtime_guest_enter(struct task_struc
* synchronization against the reader (task_gtime())
* that can thus safely catch up with a tickless delta.
*/
- write_seqlock(&tsk->vtime_seqlock);
-+ raw_spin_lock(&tsk->vtime_lock);
-+ write_seqcount_begin(&tsk->vtime_seq);
++ write_seqcount_begin(&tsk->vtime_seqcount);
__vtime_account_system(tsk);
current->flags |= PF_VCPU;
- write_sequnlock(&tsk->vtime_seqlock);
-+ write_seqcount_end(&tsk->vtime_seq);
-+ raw_spin_unlock(&tsk->vtime_lock);
++ write_seqcount_end(&tsk->vtime_seqcount);
}
EXPORT_SYMBOL_GPL(vtime_guest_enter);
void vtime_guest_exit(struct task_struct *tsk)
{
- write_seqlock(&tsk->vtime_seqlock);
-+ raw_spin_lock(&tsk->vtime_lock);
-+ write_seqcount_begin(&tsk->vtime_seq);
++ write_seqcount_begin(&tsk->vtime_seqcount);
__vtime_account_system(tsk);
current->flags &= ~PF_VCPU;
- write_sequnlock(&tsk->vtime_seqlock);
-+ write_seqcount_end(&tsk->vtime_seq);
-+ raw_spin_unlock(&tsk->vtime_lock);
++ write_seqcount_end(&tsk->vtime_seqcount);
}
EXPORT_SYMBOL_GPL(vtime_guest_exit);
-@@ -763,24 +775,30 @@ void vtime_account_idle(struct task_stru
+@@ -763,24 +763,26 @@ void vtime_account_idle(struct task_stru
void arch_vtime_task_switch(struct task_struct *prev)
{
- write_seqlock(&prev->vtime_seqlock);
-+ raw_spin_lock(&prev->vtime_lock);
-+ write_seqcount_begin(&prev->vtime_seq);
- prev->vtime_snap_whence = VTIME_SLEEPING;
++ write_seqcount_begin(&prev->vtime_seqcount);
+ prev->vtime_snap_whence = VTIME_INACTIVE;
- write_sequnlock(&prev->vtime_seqlock);
-+ write_seqcount_end(&prev->vtime_seq);
-+ raw_spin_unlock(&prev->vtime_lock);
++ write_seqcount_end(&prev->vtime_seqcount);
- write_seqlock(&current->vtime_seqlock);
-+ raw_spin_lock(&current->vtime_lock);
-+ write_seqcount_begin(&current->vtime_seq);
++ write_seqcount_begin(&current->vtime_seqcount);
current->vtime_snap_whence = VTIME_SYS;
current->vtime_snap = sched_clock_cpu(smp_processor_id());
- write_sequnlock(&current->vtime_seqlock);
-+ write_seqcount_end(&current->vtime_seq);
-+ raw_spin_unlock(&current->vtime_lock);
++ write_seqcount_end(&current->vtime_seqcount);
}
void vtime_init_idle(struct task_struct *t, int cpu)
@@ -159,47 +160,47 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
unsigned long flags;
- write_seqlock_irqsave(&t->vtime_seqlock, flags);
-+ raw_spin_lock_irqsave(&t->vtime_lock, flags);
-+ write_seqcount_begin(&t->vtime_seq);
++ local_irq_save(flags);
++ write_seqcount_begin(&t->vtime_seqcount);
t->vtime_snap_whence = VTIME_SYS;
t->vtime_snap = sched_clock_cpu(cpu);
- write_sequnlock_irqrestore(&t->vtime_seqlock, flags);
-+ write_seqcount_end(&t->vtime_seq);
-+ raw_spin_unlock_irqrestore(&t->vtime_lock, flags);
++ write_seqcount_end(&t->vtime_seqcount);
++ local_irq_restore(flags);
}
cputime_t task_gtime(struct task_struct *t)
-@@ -792,13 +810,13 @@ cputime_t task_gtime(struct task_struct
+@@ -792,13 +794,13 @@ cputime_t task_gtime(struct task_struct
return t->gtime;
do {
- seq = read_seqbegin(&t->vtime_seqlock);
-+ seq = read_seqcount_begin(&t->vtime_seq);
++ seq = read_seqcount_begin(&t->vtime_seqcount);
gtime = t->gtime;
if (t->flags & PF_VCPU)
gtime += vtime_delta(t);
- } while (read_seqretry(&t->vtime_seqlock, seq));
-+ } while (read_seqcount_retry(&t->vtime_seq, seq));
++ } while (read_seqcount_retry(&t->vtime_seqcount, seq));
return gtime;
}
-@@ -821,7 +839,7 @@ fetch_task_cputime(struct task_struct *t
+@@ -821,7 +823,7 @@ fetch_task_cputime(struct task_struct *t
*udelta = 0;
*sdelta = 0;
- seq = read_seqbegin(&t->vtime_seqlock);
-+ seq = read_seqcount_begin(&t->vtime_seq);
++ seq = read_seqcount_begin(&t->vtime_seqcount);
if (u_dst)
*u_dst = *u_src;
-@@ -845,7 +863,7 @@ fetch_task_cputime(struct task_struct *t
+@@ -845,7 +847,7 @@ fetch_task_cputime(struct task_struct *t
if (t->vtime_snap_whence == VTIME_SYS)
*sdelta = delta;
}
- } while (read_seqretry(&t->vtime_seqlock, seq));
-+ } while (read_seqcount_retry(&t->vtime_seq, seq));
++ } while (read_seqcount_retry(&t->vtime_seqcount, seq));
}
diff --git a/patches/sched-delay-put-task.patch b/patches/sched-delay-put-task.patch
index c7dd83a8ea4287..c175ef65b72106 100644
--- a/patches/sched-delay-put-task.patch
+++ b/patches/sched-delay-put-task.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1829,6 +1829,9 @@ struct task_struct {
+@@ -1831,6 +1831,9 @@ struct task_struct {
unsigned int sequential_io;
unsigned int sequential_io_avg;
#endif
@@ -23,7 +23,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
unsigned long task_state_change;
#endif
-@@ -2038,6 +2041,15 @@ extern struct pid *cad_pid;
+@@ -2040,6 +2043,15 @@ extern struct pid *cad_pid;
extern void free_task(struct task_struct *tsk);
#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
@@ -39,7 +39,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
extern void __put_task_struct(struct task_struct *t);
static inline void put_task_struct(struct task_struct *t)
-@@ -2045,6 +2057,7 @@ static inline void put_task_struct(struc
+@@ -2047,6 +2059,7 @@ static inline void put_task_struct(struc
if (atomic_dec_and_test(&t->usage))
__put_task_struct(t);
}
diff --git a/patches/sched-might-sleep-do-not-account-rcu-depth.patch b/patches/sched-might-sleep-do-not-account-rcu-depth.patch
index 4995a02f55c586..78ab45f37db76f 100644
--- a/patches/sched-might-sleep-do-not-account-rcu-depth.patch
+++ b/patches/sched-might-sleep-do-not-account-rcu-depth.patch
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Internal to kernel */
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -7657,7 +7657,7 @@ void __init sched_init(void)
+@@ -7658,7 +7658,7 @@ void __init sched_init(void)
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
static inline int preempt_count_equals(int preempt_offset)
{
diff --git a/patches/sched-mmdrop-delayed.patch b/patches/sched-mmdrop-delayed.patch
index 0744a6b1a12bb4..8ffacbd2993f63 100644
--- a/patches/sched-mmdrop-delayed.patch
+++ b/patches/sched-mmdrop-delayed.patch
@@ -35,7 +35,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void __user *bd_addr;
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -2604,12 +2604,24 @@ extern struct mm_struct * mm_alloc(void)
+@@ -2606,12 +2606,24 @@ extern struct mm_struct * mm_alloc(void)
/* mmdrop drops the mm and the page tables */
extern void __mmdrop(struct mm_struct *);
@@ -120,7 +120,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -5631,6 +5641,10 @@ migration_call(struct notifier_block *nf
+@@ -5632,6 +5642,10 @@ migration_call(struct notifier_block *nf
case CPU_DEAD:
calc_load_migrate(rq);
diff --git a/patches/sched-provide-a-tsk_nr_cpus_allowed-helper.patch b/patches/sched-provide-a-tsk_nr_cpus_allowed-helper.patch
index f987b3aeb70064..358c84627ccff8 100644
--- a/patches/sched-provide-a-tsk_nr_cpus_allowed-helper.patch
+++ b/patches/sched-provide-a-tsk_nr_cpus_allowed-helper.patch
@@ -16,7 +16,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1833,6 +1833,11 @@ extern int arch_task_struct_size __read_
+@@ -1836,6 +1836,11 @@ extern int arch_task_struct_size __read_
/* Future-safe accessor for struct task_struct's cpus_allowed. */
#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
diff --git a/patches/sched-rt-mutex-wakeup.patch b/patches/sched-rt-mutex-wakeup.patch
index 8fad3e5de336eb..fd59d1672b516a 100644
--- a/patches/sched-rt-mutex-wakeup.patch
+++ b/patches/sched-rt-mutex-wakeup.patch
@@ -25,7 +25,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void *stack;
atomic_t usage;
unsigned int flags; /* per process flags, defined below */
-@@ -2481,6 +2482,7 @@ extern void xtime_update(unsigned long t
+@@ -2483,6 +2484,7 @@ extern void xtime_update(unsigned long t
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
diff --git a/patches/series b/patches/series
index 5ab8bb0246b523..6bce9da7dfb631 100644
--- a/patches/series
+++ b/patches/series
@@ -9,6 +9,11 @@ rtmutex-Make-wait_lock-irq-safe.patch
arm64-replace-read_lock-to-rcu-lock-in-call_step_hoo.patch
tracing-writeback-Replace-cgroup-path-to-cgroup-ino.patch
kvm-rt-change-async-pagefault-code-locking-for-PREEM.patch
+panic-x86-Fix-re-entrance-problem-due-to-panic-on-NM.patch
+panic-x86-Allow-CPUs-to-save-registers-even-if-loopi.patch
+panic-change-nmi_panic-from-macro-to-function.patch
+sched-cputime-Clarify-vtime-symbols-and-document-the.patch
+sched-cputime-Convert-vtime_seqlock-to-seqcount.patch
# AT91 queue in ARM-SOC
0001-clk-at91-make-use-of-syscon-to-share-PMC-registers-i.patch
@@ -109,9 +114,6 @@ block-shorten-interrupt-disabled-regions.patch
# Timekeeping split jiffies lock. Needs a good argument :)
timekeeping-split-jiffies-lock.patch
-# CHECKME: Should local_irq_enable() generally do a preemption check ?
-vtime-split-lock-and-seqcount.patch
-
# Tracing
tracing-account-for-preempt-off-in-preempt_schedule.patch
@@ -392,6 +394,7 @@ kernel-stop_machine-partly-revert-stop_machine-Use-r.patch
drivers-tty-fix-omap-lock-crap.patch
drivers-tty-pl011-irq-disable-madness.patch
rt-serial-warn-fix.patch
+tty-serial-8250-don-t-take-the-trylock-during-oops.patch
# SIMPLE WAITQUEUE
wait.h-include-atomic.h.patch
diff --git a/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch b/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
index 96e8c4719d24c6..bf6bc953dd4259 100644
--- a/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
+++ b/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
@@ -17,7 +17,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1566,6 +1566,7 @@ struct task_struct {
+@@ -1568,6 +1568,7 @@ struct task_struct {
/* signal handlers */
struct signal_struct *signal;
struct sighand_struct *sighand;
diff --git a/patches/softirq-split-locks.patch b/patches/softirq-split-locks.patch
index b2410346f7c9ce..e76ddde39ead9a 100644
--- a/patches/softirq-split-locks.patch
+++ b/patches/softirq-split-locks.patch
@@ -172,7 +172,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* Are we in NMI context?
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1832,6 +1832,8 @@ struct task_struct {
+@@ -1834,6 +1834,8 @@ struct task_struct {
#endif
#ifdef CONFIG_PREEMPT_RT_BASE
struct rcu_head put_rcu;
@@ -181,7 +181,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
unsigned long task_state_change;
-@@ -2097,6 +2099,7 @@ extern void thread_group_cputime_adjuste
+@@ -2099,6 +2101,7 @@ extern void thread_group_cputime_adjuste
/*
* Per process flags
*/
diff --git a/patches/suspend-prevernt-might-sleep-splats.patch b/patches/suspend-prevernt-might-sleep-splats.patch
index 089e64f423a98f..fb132e3aa3d42b 100644
--- a/patches/suspend-prevernt-might-sleep-splats.patch
+++ b/patches/suspend-prevernt-might-sleep-splats.patch
@@ -25,7 +25,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
-@@ -473,6 +473,7 @@ extern enum system_states {
+@@ -482,6 +482,7 @@ extern enum system_states {
SYSTEM_HALT,
SYSTEM_POWER_OFF,
SYSTEM_RESTART,
@@ -52,7 +52,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
local_irq_enable();
Enable_cpus:
-@@ -437,6 +440,7 @@ static int resume_target_kernel(bool pla
+@@ -438,6 +441,7 @@ static int resume_target_kernel(bool pla
goto Enable_cpus;
local_irq_disable();
@@ -60,7 +60,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
error = syscore_suspend();
if (error)
-@@ -470,6 +474,7 @@ static int resume_target_kernel(bool pla
+@@ -471,6 +475,7 @@ static int resume_target_kernel(bool pla
syscore_resume();
Enable_irqs:
@@ -68,7 +68,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
local_irq_enable();
Enable_cpus:
-@@ -555,6 +560,7 @@ int hibernation_platform_enter(void)
+@@ -556,6 +561,7 @@ int hibernation_platform_enter(void)
goto Enable_cpus;
local_irq_disable();
@@ -76,7 +76,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
syscore_suspend();
if (pm_wakeup_pending()) {
error = -EAGAIN;
-@@ -567,6 +573,7 @@ int hibernation_platform_enter(void)
+@@ -568,6 +574,7 @@ int hibernation_platform_enter(void)
Power_up:
syscore_resume();
diff --git a/patches/trace_Use_rcuidle_version_for_preemptoff_hist_trace_point.patch b/patches/trace_Use_rcuidle_version_for_preemptoff_hist_trace_point.patch
index cc2e0deed35ab1..917920b68942b8 100644
--- a/patches/trace_Use_rcuidle_version_for_preemptoff_hist_trace_point.patch
+++ b/patches/trace_Use_rcuidle_version_for_preemptoff_hist_trace_point.patch
@@ -54,7 +54,7 @@ in 4.4-rt. It looks such fix is still needed.
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
-@@ -421,13 +421,13 @@ void start_critical_timings(void)
+@@ -425,13 +425,13 @@ void start_critical_timings(void)
{
if (preempt_trace() || irq_trace())
start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
@@ -70,7 +70,7 @@ in 4.4-rt. It looks such fix is still needed.
if (preempt_trace() || irq_trace())
stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
}
-@@ -437,7 +437,7 @@ EXPORT_SYMBOL_GPL(stop_critical_timings)
+@@ -441,7 +441,7 @@ EXPORT_SYMBOL_GPL(stop_critical_timings)
#ifdef CONFIG_PROVE_LOCKING
void time_hardirqs_on(unsigned long a0, unsigned long a1)
{
@@ -79,7 +79,7 @@ in 4.4-rt. It looks such fix is still needed.
if (!preempt_trace() && irq_trace())
stop_critical_timing(a0, a1);
}
-@@ -446,7 +446,7 @@ void time_hardirqs_off(unsigned long a0,
+@@ -450,7 +450,7 @@ void time_hardirqs_off(unsigned long a0,
{
if (!preempt_trace() && irq_trace())
start_critical_timing(a0, a1);
diff --git a/patches/tty-serial-8250-don-t-take-the-trylock-during-oops.patch b/patches/tty-serial-8250-don-t-take-the-trylock-during-oops.patch
new file mode 100644
index 00000000000000..492612dfdd69bc
--- /dev/null
+++ b/patches/tty-serial-8250-don-t-take-the-trylock-during-oops.patch
@@ -0,0 +1,29 @@
+From 08552bb6e497a6f37a31884083cdd2c046d0f674 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Mon, 11 Apr 2016 16:55:02 +0200
+Subject: [PATCH] tty: serial: 8250: don't take the trylock during oops
+
+An oops with irqs off (panic() from irqsafe hrtimer like the watchdog
+timer) will lead to a lockdep warning on each invocation and as such
+never completes.
+Therefore we skip the trylock in the oops case.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/8250/8250_port.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/drivers/tty/serial/8250/8250_port.c
++++ b/drivers/tty/serial/8250/8250_port.c
+@@ -2843,10 +2843,8 @@ void serial8250_console_write(struct uar
+
+ serial8250_rpm_get(up);
+
+- if (port->sysrq)
++ if (port->sysrq || oops_in_progress)
+ locked = 0;
+- else if (oops_in_progress)
+- locked = spin_trylock_irqsave(&port->lock, flags);
+ else
+ spin_lock_irqsave(&port->lock, flags);
+
diff --git a/patches/x86-kvm-require-const-tsc-for-rt.patch b/patches/x86-kvm-require-const-tsc-for-rt.patch
index 5fa84b0893b128..0df27e3f7e2a7e 100644
--- a/patches/x86-kvm-require-const-tsc-for-rt.patch
+++ b/patches/x86-kvm-require-const-tsc-for-rt.patch
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
-@@ -5788,6 +5788,13 @@ int kvm_arch_init(void *opaque)
+@@ -5789,6 +5789,13 @@ int kvm_arch_init(void *opaque)
goto out;
}