summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2016-02-04 21:33:13 +0100
committerSebastian Andrzej Siewior <bigeasy@linutronix.de>2016-02-04 21:36:35 +0100
commitd8dccc069daa6d14f605b478321c29b8a9f6869c (patch)
treee2b398827b0349c4b793a50ff25c31c84d77b2f0
parent80cacc1f9455036617b76da3556e2226eac9a1c3 (diff)
download4.12-rt-patches-d8dccc069daa6d14f605b478321c29b8a9f6869c.tar.gz
[ANNOUNCE] 4.4.1-rt5
Dear RT folks! I'm pleased to announce the v4.4.1-rt5 patch set. Changes since v4.4.1-rt4: - various compile fixes found by kbuild test robot. - Mike Galbraith spotted that migrate_disables() sets ->nr_cpus_allowed to one by accident. - Christoph Mathys reported that the "preemptirqsoff_hist" tracer reboots the system once enabled. The problem has been resolved. - Thomas Gleixner sent a patch to set a default affinity for mask for interrupts via kernel command line. - Yang Shi reported that some perf events were reported as "not counted" instead the actual numbers. The problem has been resolved. Known issues: - bcache stays disabled - CPU hotplug is not better than before - The netlink_release() OOPS, reported by Clark, is still on the list, but unsolved due to lack of information The delta patch against 4.4.1-rt4 is appended below and can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.4/incr/patch-4.4.1-rt4-rt5.patch.xz You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v4.4.1-rt5 The RT patch against 4.4.1 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.4/patch-4.4.1-rt5.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz Sebastian Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-rw-r--r--patches/HACK-printk-drop-the-logbuf_lock-more-often.patch10
-rw-r--r--patches/completion-use-simple-wait-queues.patch2
-rw-r--r--patches/cond-resched-softirq-rt.patch4
-rw-r--r--patches/cpu-rt-rework-cpu-down.patch22
-rw-r--r--patches/genirq-Add-default-affinity-mask-command-line-option.patch67
-rw-r--r--patches/introduce_migrate_disable_cpu_light.patch13
-rw-r--r--patches/kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch25
-rw-r--r--patches/latencyhist-disable-jump-labels.patch61
-rw-r--r--patches/localversion.patch4
-rw-r--r--patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch2
-rw-r--r--patches/preempt-lazy-check-preempt_schedule.patch6
-rw-r--r--patches/preempt-lazy-support.patch10
-rw-r--r--patches/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch2
-rw-r--r--patches/printk-kill.patch23
-rw-r--r--patches/printk-rt-aware.patch14
-rw-r--r--patches/sched-might-sleep-do-not-account-rcu-depth.patch2
-rw-r--r--patches/sched-mmdrop-delayed.patch6
-rw-r--r--patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch2
-rw-r--r--patches/series4
-rw-r--r--patches/workqueue-distangle-from-rq-lock.patch8
-rw-r--r--patches/workqueue-prevent-deadlock-stall.patch4
21 files changed, 215 insertions, 76 deletions
diff --git a/patches/HACK-printk-drop-the-logbuf_lock-more-often.patch b/patches/HACK-printk-drop-the-logbuf_lock-more-often.patch
index 2e005d172b8313..d490e62263c5fe 100644
--- a/patches/HACK-printk-drop-the-logbuf_lock-more-often.patch
+++ b/patches/HACK-printk-drop-the-logbuf_lock-more-often.patch
@@ -12,7 +12,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -1203,6 +1203,7 @@ static int syslog_print_all(char __user
+@@ -1262,6 +1262,7 @@ static int syslog_print_all(char __user
{
char *text;
int len = 0;
@@ -20,7 +20,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL);
if (!text)
-@@ -1214,7 +1215,14 @@ static int syslog_print_all(char __user
+@@ -1273,7 +1274,14 @@ static int syslog_print_all(char __user
u64 seq;
u32 idx;
enum log_flags prev;
@@ -36,7 +36,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (clear_seq < log_first_seq) {
/* messages are gone, move to first available one */
clear_seq = log_first_seq;
-@@ -1235,6 +1243,14 @@ static int syslog_print_all(char __user
+@@ -1294,6 +1302,14 @@ static int syslog_print_all(char __user
prev = msg->flags;
idx = log_next(idx);
seq++;
@@ -51,7 +51,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/* move first record forward until length fits into the buffer */
-@@ -1248,6 +1264,14 @@ static int syslog_print_all(char __user
+@@ -1307,6 +1323,14 @@ static int syslog_print_all(char __user
prev = msg->flags;
idx = log_next(idx);
seq++;
@@ -66,7 +66,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/* last message fitting into this dump */
-@@ -1288,6 +1312,7 @@ static int syslog_print_all(char __user
+@@ -1347,6 +1371,7 @@ static int syslog_print_all(char __user
clear_seq = log_next_seq;
clear_idx = log_next_idx;
}
diff --git a/patches/completion-use-simple-wait-queues.patch b/patches/completion-use-simple-wait-queues.patch
index b7df99b5f255fb..6c7a61724989ae 100644
--- a/patches/completion-use-simple-wait-queues.patch
+++ b/patches/completion-use-simple-wait-queues.patch
@@ -210,7 +210,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
if (p->migrate_disable) {
-@@ -3130,7 +3133,10 @@ void migrate_enable(void)
+@@ -3129,7 +3132,10 @@ void migrate_enable(void)
}
#ifdef CONFIG_SCHED_DEBUG
diff --git a/patches/cond-resched-softirq-rt.patch b/patches/cond-resched-softirq-rt.patch
index 65753b03d11636..469e9794fac8a5 100644
--- a/patches/cond-resched-softirq-rt.patch
+++ b/patches/cond-resched-softirq-rt.patch
@@ -34,7 +34,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
{
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -4771,6 +4771,7 @@ int __cond_resched_lock(spinlock_t *lock
+@@ -4770,6 +4770,7 @@ int __cond_resched_lock(spinlock_t *lock
}
EXPORT_SYMBOL(__cond_resched_lock);
@@ -42,7 +42,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
int __sched __cond_resched_softirq(void)
{
BUG_ON(!in_softirq());
-@@ -4784,6 +4785,7 @@ int __sched __cond_resched_softirq(void)
+@@ -4783,6 +4784,7 @@ int __sched __cond_resched_softirq(void)
return 0;
}
EXPORT_SYMBOL(__cond_resched_softirq);
diff --git a/patches/cpu-rt-rework-cpu-down.patch b/patches/cpu-rt-rework-cpu-down.patch
index 14535d661be54b..7d3cca283425ad 100644
--- a/patches/cpu-rt-rework-cpu-down.patch
+++ b/patches/cpu-rt-rework-cpu-down.patch
@@ -51,8 +51,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/sched.h | 7 +
kernel/cpu.c | 240 ++++++++++++++++++++++++++++++++++++++++----------
- kernel/sched/core.c | 82 ++++++++++++++++-
- 3 files changed, 283 insertions(+), 46 deletions(-)
+ kernel/sched/core.c | 78 ++++++++++++++++
+ 3 files changed, 281 insertions(+), 44 deletions(-)
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -527,21 +527,3 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Change a given task's CPU affinity. Migrate the thread to a
* proper CPU and schedule it away if the CPU it's executing on
-@@ -3044,7 +3122,7 @@ void migrate_disable(void)
- {
- struct task_struct *p = current;
-
-- if (in_atomic() || p->flags & PF_NO_SETAFFINITY) {
-+ if (in_atomic()) {
- #ifdef CONFIG_SCHED_DEBUG
- p->migrate_disable_atomic++;
- #endif
-@@ -3075,7 +3153,7 @@ void migrate_enable(void)
- {
- struct task_struct *p = current;
-
-- if (in_atomic() || p->flags & PF_NO_SETAFFINITY) {
-+ if (in_atomic()) {
- #ifdef CONFIG_SCHED_DEBUG
- p->migrate_disable_atomic--;
- #endif
diff --git a/patches/genirq-Add-default-affinity-mask-command-line-option.patch b/patches/genirq-Add-default-affinity-mask-command-line-option.patch
new file mode 100644
index 00000000000000..f370b37fe7b856
--- /dev/null
+++ b/patches/genirq-Add-default-affinity-mask-command-line-option.patch
@@ -0,0 +1,67 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 25 May 2012 16:59:47 +0200
+Subject: genirq: Add default affinity mask command line option
+
+If we isolate CPUs, then we don't want random device interrupts on them. Even
+w/o the user space irq balancer enabled we can end up with irqs on non boot
+cpus and chasing newly requested interrupts is a tedious task.
+
+Allow to restrict the default irq affinity mask.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ Documentation/kernel-parameters.txt | 9 +++++++++
+ kernel/irq/irqdesc.c | 21 +++++++++++++++++++--
+ 2 files changed, 28 insertions(+), 2 deletions(-)
+
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -1629,6 +1629,15 @@ bytes respectively. Such letter suffixes
+ ip= [IP_PNP]
+ See Documentation/filesystems/nfs/nfsroot.txt.
+
++ irqaffinity= [SMP] Set the default irq affinity mask
++ Format:
++ <cpu number>,...,<cpu number>
++ or
++ <cpu number>-<cpu number>
++ (must be a positive range in ascending order)
++ or a mixture
++ <cpu number>,...,<cpu number>-<cpu number>
++
+ irqfixup [HW]
+ When an interrupt is not handled search all handlers
+ for it. Intended to get systems with badly broken
+--- a/kernel/irq/irqdesc.c
++++ b/kernel/irq/irqdesc.c
+@@ -24,10 +24,27 @@
+ static struct lock_class_key irq_desc_lock_class;
+
+ #if defined(CONFIG_SMP)
++static int __init irq_affinity_setup(char *str)
++{
++ zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
++ cpulist_parse(str, irq_default_affinity);
++ /*
++ * Set at least the boot cpu. We don't want to end up with
++ * bugreports caused by random comandline masks
++ */
++ cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
++ return 1;
++}
++__setup("irqaffinity=", irq_affinity_setup);
++
+ static void __init init_irq_default_affinity(void)
+ {
+- alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
+- cpumask_setall(irq_default_affinity);
++#ifdef CONFIG_CPUMASK_OFFSTACK
++ if (!irq_default_affinity)
++ zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
++#endif
++ if (cpumask_empty(irq_default_affinity))
++ cpumask_setall(irq_default_affinity);
+ }
+ #else
+ static void __init init_irq_default_affinity(void)
diff --git a/patches/introduce_migrate_disable_cpu_light.patch b/patches/introduce_migrate_disable_cpu_light.patch
index 9799cb704499be..d80c1e713c54b8 100644
--- a/patches/introduce_migrate_disable_cpu_light.patch
+++ b/patches/introduce_migrate_disable_cpu_light.patch
@@ -32,12 +32,12 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/cpu.h | 3 ++
include/linux/preempt.h | 9 ++++++
- include/linux/sched.h | 39 ++++++++++++++++++++------
+ include/linux/sched.h | 39 +++++++++++++++++++++-----
include/linux/smp.h | 3 ++
- kernel/sched/core.c | 71 +++++++++++++++++++++++++++++++++++++++++++++++-
+ kernel/sched/core.c | 70 +++++++++++++++++++++++++++++++++++++++++++++++-
kernel/sched/debug.c | 7 ++++
lib/smp_processor_id.c | 5 ++-
- 7 files changed, 126 insertions(+), 11 deletions(-)
+ 7 files changed, 125 insertions(+), 11 deletions(-)
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -171,7 +171,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
goto out;
dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
-@@ -3022,6 +3027,70 @@ static inline void schedule_debug(struct
+@@ -3022,6 +3027,69 @@ static inline void schedule_debug(struct
schedstat_inc(this_rq(), sched_count);
}
@@ -181,7 +181,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+{
+ struct task_struct *p = current;
+
-+ if (in_atomic() || p->flags & PF_NO_SETAFFINITY) {
++ if (in_atomic()) {
+#ifdef CONFIG_SCHED_DEBUG
+ p->migrate_disable_atomic++;
+#endif
@@ -200,7 +200,6 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ preempt_disable();
+ pin_current_cpu();
+ p->migrate_disable = 1;
-+ p->nr_cpus_allowed = 1;
+ preempt_enable();
+}
+EXPORT_SYMBOL(migrate_disable);
@@ -209,7 +208,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+{
+ struct task_struct *p = current;
+
-+ if (in_atomic() || p->flags & PF_NO_SETAFFINITY) {
++ if (in_atomic()) {
+#ifdef CONFIG_SCHED_DEBUG
+ p->migrate_disable_atomic--;
+#endif
diff --git a/patches/kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch b/patches/kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch
new file mode 100644
index 00000000000000..5658b8772de868
--- /dev/null
+++ b/patches/kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch
@@ -0,0 +1,25 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 4 Feb 2016 16:38:10 +0100
+Subject: [PATCH] kernel/perf: mark perf_cpu_context's timer as irqsafe
+
+Otherwise we get a WARN_ON() backtrace and some events are reported as
+"not counted".
+
+Cc: stable-rt@vger.kernel.org
+Reported-by: Yang Shi <yang.shi@linaro.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/events/core.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -802,6 +802,7 @@ static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu)
+ raw_spin_lock_init(&cpuctx->hrtimer_lock);
+ hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
+ timer->function = perf_mux_hrtimer_handler;
++ timer->irqsafe = 1;
+ }
+
+ static int perf_mux_hrtimer_restart(struct perf_cpu_context *cpuctx)
diff --git a/patches/latencyhist-disable-jump-labels.patch b/patches/latencyhist-disable-jump-labels.patch
new file mode 100644
index 00000000000000..5e3e6e9e4c5e43
--- /dev/null
+++ b/patches/latencyhist-disable-jump-labels.patch
@@ -0,0 +1,61 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 4 Feb 2016 14:08:06 +0100
+Subject: latencyhist: disable jump-labels
+
+Atleast on X86 we die a recursive death
+
+|CPU: 3 PID: 585 Comm: bash Not tainted 4.4.1-rt4+ #198
+|Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS Debian-1.8.2-1 04/01/2014
+|task: ffff88007ab4cd00 ti: ffff88007ab94000 task.ti: ffff88007ab94000
+|RIP: 0010:[<ffffffff81684870>] [<ffffffff81684870>] int3+0x0/0x10
+|RSP: 0018:ffff88013c107fd8 EFLAGS: 00010082
+|RAX: ffff88007ab4cd00 RBX: ffffffff8100ceab RCX: 0000000080202001
+|RDX: 0000000000000000 RSI: ffffffff8100ceab RDI: ffffffff810c78b2
+|RBP: ffff88007ab97c10 R08: ffffffffff57b000 R09: 0000000000000000
+|R10: ffff88013bb64790 R11: ffff88007ab4cd68 R12: ffffffff8100ceab
+|R13: ffffffff810c78b2 R14: ffffffff810f8158 R15: ffffffff810f9120
+|FS: 0000000000000000(0000) GS:ffff88013c100000(0063) knlGS:00000000f74e3940
+|CS: 0010 DS: 002b ES: 002b CR0: 000000008005003b
+|CR2: 0000000008cf6008 CR3: 000000013b169000 CR4: 00000000000006e0
+|Call Trace:
+| <#DB>
+| [<ffffffff810f8158>] ? trace_preempt_off+0x18/0x170
+| <<EOE>>
+| [<ffffffff81077745>] preempt_count_add+0xa5/0xc0
+| [<ffffffff810c78b2>] on_each_cpu+0x22/0x90
+| [<ffffffff8100ceab>] text_poke_bp+0x5b/0xc0
+| [<ffffffff8100a29c>] arch_jump_label_transform+0x8c/0xf0
+| [<ffffffff8111c77c>] __jump_label_update+0x6c/0x80
+| [<ffffffff8111c83a>] jump_label_update+0xaa/0xc0
+| [<ffffffff8111ca54>] static_key_slow_inc+0x94/0xa0
+| [<ffffffff810e0d8d>] tracepoint_probe_register_prio+0x26d/0x2c0
+| [<ffffffff810e0df3>] tracepoint_probe_register+0x13/0x20
+| [<ffffffff810fca78>] trace_event_reg+0x98/0xd0
+| [<ffffffff810fcc8b>] __ftrace_event_enable_disable+0x6b/0x180
+| [<ffffffff810fd5b8>] event_enable_write+0x78/0xc0
+| [<ffffffff8117a768>] __vfs_write+0x28/0xe0
+| [<ffffffff8117b025>] vfs_write+0xa5/0x180
+| [<ffffffff8117bb76>] SyS_write+0x46/0xa0
+| [<ffffffff81002c91>] do_fast_syscall_32+0xa1/0x1d0
+| [<ffffffff81684d57>] sysenter_flags_fixed+0xd/0x17
+
+during
+ echo 1 > /sys/kernel/debug/tracing/events/hist/preemptirqsoff_hist/enable
+
+Reported-By: Christoph Mathys <eraserix@gmail.com>
+Cc: stable-rt@vger.kernel.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/Kconfig | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/Kconfig
++++ b/arch/Kconfig
+@@ -52,6 +52,7 @@ config KPROBES
+ config JUMP_LABEL
+ bool "Optimize very unlikely/likely branches"
+ depends on HAVE_ARCH_JUMP_LABEL
++ depends on (!INTERRUPT_OFF_HIST && !PREEMPT_OFF_HIST && !WAKEUP_LATENCY_HIST && !MISSED_TIMER_OFFSETS_HIST)
+ help
+ This option enables a transparent branch optimization that
+ makes certain almost-always-true or almost-always-false branch
diff --git a/patches/localversion.patch b/patches/localversion.patch
index 52a0267b4a27f3..5443ec26c1a57d 100644
--- a/patches/localversion.patch
+++ b/patches/localversion.patch
@@ -1,4 +1,4 @@
-Subject: v4.4.1-rt4
+Subject: v4.4.1-rt5
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 08 Jul 2011 20:25:16 +0200
@@ -10,4 +10,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt4
++-rt5
diff --git a/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch b/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch
index b80cc17664860b..208c29c1fc9c20 100644
--- a/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch
+++ b/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch
@@ -93,7 +93,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ __this_cpu_inc(xmit_recursion);
+}
+
-+static inline int xmit_rec_dec(void)
++static inline void xmit_rec_dec(void)
+{
+ __this_cpu_dec(xmit_recursion);
+}
diff --git a/patches/preempt-lazy-check-preempt_schedule.patch b/patches/preempt-lazy-check-preempt_schedule.patch
index e932f49e0f301f..cd3f869cdfd541 100644
--- a/patches/preempt-lazy-check-preempt_schedule.patch
+++ b/patches/preempt-lazy-check-preempt_schedule.patch
@@ -13,7 +13,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3461,6 +3461,30 @@ static void __sched notrace preempt_sche
+@@ -3460,6 +3460,30 @@ static void __sched notrace preempt_sche
} while (need_resched());
}
@@ -44,7 +44,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_PREEMPT
/*
* this is the entry point to schedule() from in-kernel preemption
-@@ -3475,6 +3499,8 @@ asmlinkage __visible void __sched notrac
+@@ -3474,6 +3498,8 @@ asmlinkage __visible void __sched notrac
*/
if (likely(!preemptible()))
return;
@@ -53,7 +53,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
preempt_schedule_common();
}
-@@ -3501,15 +3527,9 @@ asmlinkage __visible void __sched notrac
+@@ -3500,15 +3526,9 @@ asmlinkage __visible void __sched notrac
if (likely(!preemptible()))
return;
diff --git a/patches/preempt-lazy-support.patch b/patches/preempt-lazy-support.patch
index 0b6b29cb070211..fff441a11e2c62 100644
--- a/patches/preempt-lazy-support.patch
+++ b/patches/preempt-lazy-support.patch
@@ -313,8 +313,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ preempt_lazy_disable();
pin_current_cpu();
p->migrate_disable = 1;
- p->nr_cpus_allowed = 1;
-@@ -3182,6 +3218,7 @@ void migrate_enable(void)
+ preempt_enable();
+@@ -3181,6 +3217,7 @@ void migrate_enable(void)
unpin_current_cpu();
preempt_enable();
@@ -322,7 +322,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(migrate_enable);
#endif
-@@ -3319,6 +3356,7 @@ static void __sched notrace __schedule(b
+@@ -3318,6 +3355,7 @@ static void __sched notrace __schedule(b
next = pick_next_task(rq, prev);
clear_tsk_need_resched(prev);
@@ -330,7 +330,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
clear_preempt_need_resched();
rq->clock_skip_update = 0;
-@@ -3464,6 +3502,14 @@ asmlinkage __visible void __sched notrac
+@@ -3463,6 +3501,14 @@ asmlinkage __visible void __sched notrac
if (likely(!preemptible()))
return;
@@ -345,7 +345,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
do {
preempt_disable_notrace();
/*
-@@ -5204,7 +5250,9 @@ void init_idle(struct task_struct *idle,
+@@ -5203,7 +5249,9 @@ void init_idle(struct task_struct *idle,
/* Set the preempt count _outside_ the spinlocks! */
init_idle_preempt_count(idle, cpu);
diff --git a/patches/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch b/patches/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch
index 0ad66cbb99fcbe..432688fd87263c 100644
--- a/patches/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch
+++ b/patches/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch
@@ -15,7 +15,7 @@ Link: http://lkml.kernel.org/n/tip-ykb97nsfmobq44xketrxs977@git.kernel.org
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -1686,6 +1686,13 @@ asmlinkage void early_printk(const char
+@@ -271,6 +271,13 @@ asmlinkage void early_printk(const char
*/
static bool __read_mostly printk_killswitch;
diff --git a/patches/printk-kill.patch b/patches/printk-kill.patch
index 7715c6ccc9a90d..fa3175f45c4930 100644
--- a/patches/printk-kill.patch
+++ b/patches/printk-kill.patch
@@ -8,9 +8,9 @@ it does not dead-lock with the early printk code.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/printk.h | 2 +
- kernel/printk/printk.c | 76 ++++++++++++++++++++++++++++++++++++-------------
+ kernel/printk/printk.c | 79 ++++++++++++++++++++++++++++++++++++-------------
kernel/watchdog.c | 10 ++++++
- 3 files changed, 68 insertions(+), 20 deletions(-)
+ 3 files changed, 71 insertions(+), 20 deletions(-)
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -28,9 +28,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
typedef __printf(1, 0) int (*printk_func_t)(const char *fmt, va_list args);
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -1656,6 +1656,55 @@ static size_t cont_print_text(char *text
- return textlen;
- }
+@@ -241,6 +241,58 @@ struct printk_log {
+ */
+ static DEFINE_RAW_SPINLOCK(logbuf_lock);
+#ifdef CONFIG_EARLY_PRINTK
+struct console *early_console;
@@ -67,6 +67,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ printk_killswitch = true;
+}
+
++#ifdef CONFIG_PRINTK
+static int forced_early_printk(const char *fmt, va_list ap)
+{
+ if (!printk_killswitch)
@@ -74,6 +75,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ early_vprintk(fmt, ap);
+ return 1;
+}
++#endif
++
+#else
+static inline int forced_early_printk(const char *fmt, va_list ap)
+{
@@ -81,10 +84,10 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+}
+#endif
+
- asmlinkage int vprintk_emit(int facility, int level,
- const char *dict, size_t dictlen,
- const char *fmt, va_list args)
-@@ -1672,6 +1721,13 @@ asmlinkage int vprintk_emit(int facility
+ #ifdef CONFIG_PRINTK
+ DECLARE_WAIT_QUEUE_HEAD(log_wait);
+ /* the next printk record to read by syslog(READ) or /proc/kmsg */
+@@ -1672,6 +1724,13 @@ asmlinkage int vprintk_emit(int facility
/* cpu currently holding logbuf_lock in this function */
static unsigned int logbuf_cpu = UINT_MAX;
@@ -98,7 +101,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (level == LOGLEVEL_SCHED) {
level = LOGLEVEL_DEFAULT;
in_sched = true;
-@@ -1961,26 +2017,6 @@ DEFINE_PER_CPU(printk_func_t, printk_fun
+@@ -1961,26 +2020,6 @@ DEFINE_PER_CPU(printk_func_t, printk_fun
#endif /* CONFIG_PRINTK */
diff --git a/patches/printk-rt-aware.patch b/patches/printk-rt-aware.patch
index 8fa8586d520491..25e54a185883b6 100644
--- a/patches/printk-rt-aware.patch
+++ b/patches/printk-rt-aware.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -1443,6 +1443,7 @@ static void call_console_drivers(int lev
+@@ -1502,6 +1502,7 @@ static void call_console_drivers(int lev
if (!console_drivers)
return;
@@ -20,7 +20,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
for_each_console(con) {
if (exclusive_console && con != exclusive_console)
continue;
-@@ -1458,6 +1459,7 @@ static void call_console_drivers(int lev
+@@ -1517,6 +1518,7 @@ static void call_console_drivers(int lev
else
con->write(con, text, len);
}
@@ -28,7 +28,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -1518,6 +1520,15 @@ static inline int can_use_console(unsign
+@@ -1577,6 +1579,15 @@ static inline int can_use_console(unsign
static int console_trylock_for_printk(void)
{
unsigned int cpu = smp_processor_id();
@@ -44,7 +44,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (!console_trylock())
return 0;
-@@ -1876,8 +1887,7 @@ asmlinkage int vprintk_emit(int facility
+@@ -1879,8 +1890,7 @@ asmlinkage int vprintk_emit(int facility
* console_sem which would prevent anyone from printing to
* console
*/
@@ -54,7 +54,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Try to acquire and then immediately release the console
* semaphore. The release will print out buffers and wake up
-@@ -1885,7 +1895,7 @@ asmlinkage int vprintk_emit(int facility
+@@ -1888,7 +1898,7 @@ asmlinkage int vprintk_emit(int facility
*/
if (console_trylock_for_printk())
console_unlock();
@@ -63,7 +63,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
lockdep_on();
}
-@@ -2245,11 +2255,16 @@ static void console_cont_flush(char *tex
+@@ -2248,11 +2258,16 @@ static void console_cont_flush(char *tex
goto out;
len = cont_print_text(text, size);
@@ -80,7 +80,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return;
out:
raw_spin_unlock_irqrestore(&logbuf_lock, flags);
-@@ -2348,12 +2363,17 @@ void console_unlock(void)
+@@ -2351,12 +2366,17 @@ void console_unlock(void)
console_idx = log_next(console_idx);
console_seq++;
console_prev = msg->flags;
diff --git a/patches/sched-might-sleep-do-not-account-rcu-depth.patch b/patches/sched-might-sleep-do-not-account-rcu-depth.patch
index 32015ea01dc216..0cb168055d1d39 100644
--- a/patches/sched-might-sleep-do-not-account-rcu-depth.patch
+++ b/patches/sched-might-sleep-do-not-account-rcu-depth.patch
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Internal to kernel */
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -7668,7 +7668,7 @@ void __init sched_init(void)
+@@ -7667,7 +7667,7 @@ void __init sched_init(void)
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
static inline int preempt_count_equals(int preempt_offset)
{
diff --git a/patches/sched-mmdrop-delayed.patch b/patches/sched-mmdrop-delayed.patch
index ef97c152881fc4..57e895a6321d16 100644
--- a/patches/sched-mmdrop-delayed.patch
+++ b/patches/sched-mmdrop-delayed.patch
@@ -98,7 +98,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (unlikely(prev_state == TASK_DEAD)) {
if (prev->sched_class->task_dead)
prev->sched_class->task_dead(prev);
-@@ -5256,6 +5260,8 @@ void sched_setnuma(struct task_struct *p
+@@ -5255,6 +5259,8 @@ void sched_setnuma(struct task_struct *p
#endif /* CONFIG_NUMA_BALANCING */
#ifdef CONFIG_HOTPLUG_CPU
@@ -107,7 +107,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Ensures that the idle task is using init_mm right before its cpu goes
* offline.
-@@ -5270,7 +5276,11 @@ void idle_task_exit(void)
+@@ -5269,7 +5275,11 @@ void idle_task_exit(void)
switch_mm(mm, &init_mm, current);
finish_arch_post_lock_switch();
}
@@ -120,7 +120,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -5642,6 +5652,10 @@ migration_call(struct notifier_block *nf
+@@ -5641,6 +5651,10 @@ migration_call(struct notifier_block *nf
case CPU_DEAD:
calc_load_migrate(rq);
diff --git a/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch b/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch
index 0bcde6e4e96ae2..39553775c30678 100644
--- a/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch
+++ b/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch
@@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3265,8 +3265,10 @@ static void __sched notrace __schedule(b
+@@ -3264,8 +3264,10 @@ static void __sched notrace __schedule(b
* If a worker went to sleep, notify and ask workqueue
* whether it wants to wake up a task to maintain
* concurrency.
diff --git a/patches/series b/patches/series
index 1d6c181dddb63e..389bac0609f9ba 100644
--- a/patches/series
+++ b/patches/series
@@ -28,12 +28,12 @@ rfc-arm-smp-__cpu_disable-fix-sleeping-function-called-from-invalid-context.patc
############################################################
# Stuff broken upstream, need to be sent
############################################################
-
rtmutex--Handle-non-enqueued-waiters-gracefully
############################################################
# Submitted on LKML
############################################################
+genirq-Add-default-affinity-mask-command-line-option.patch
# SPARC part of erly printk consolidation
sparc64-use-generic-rwsem-spinlocks-rt.patch
@@ -138,6 +138,7 @@ pci-access-use-__wake_up_all_locked.patch
# ANON RW SEMAPHORES
# TRACING
+latencyhist-disable-jump-labels.patch
latency-hist.patch
latency_hist-update-sched_wakeup-probe.patch
trace-latency-hist-Consider-new-argument-when-probin.patch
@@ -524,6 +525,7 @@ lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch
# PERF
perf-make-swevent-hrtimer-irqsafe.patch
+kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch
# RCU
rcu-disable-rcu-fast-no-hz-on-rt.patch
diff --git a/patches/workqueue-distangle-from-rq-lock.patch b/patches/workqueue-distangle-from-rq-lock.patch
index 68a5d2af599963..8ceae86231e51e 100644
--- a/patches/workqueue-distangle-from-rq-lock.patch
+++ b/patches/workqueue-distangle-from-rq-lock.patch
@@ -95,7 +95,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* wake_up_process - Wake up a specific process
* @p: The process to be woken up.
*
-@@ -3282,21 +3232,6 @@ static void __sched notrace __schedule(b
+@@ -3281,21 +3231,6 @@ static void __sched notrace __schedule(b
} else {
deactivate_task(rq, prev, DEQUEUE_SLEEP);
prev->on_rq = 0;
@@ -117,7 +117,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
switch_count = &prev->nvcsw;
}
-@@ -3329,6 +3264,14 @@ static inline void sched_submit_work(str
+@@ -3328,6 +3263,14 @@ static inline void sched_submit_work(str
{
if (!tsk->state || tsk_is_pi_blocked(tsk))
return;
@@ -132,7 +132,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* If we are going to sleep and we have plugged IO queued,
* make sure to submit it to avoid deadlocks.
-@@ -3337,6 +3280,12 @@ static inline void sched_submit_work(str
+@@ -3336,6 +3279,12 @@ static inline void sched_submit_work(str
blk_schedule_flush_plug(tsk);
}
@@ -145,7 +145,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
asmlinkage __visible void __sched schedule(void)
{
struct task_struct *tsk = current;
-@@ -3347,6 +3296,7 @@ asmlinkage __visible void __sched schedu
+@@ -3346,6 +3295,7 @@ asmlinkage __visible void __sched schedu
__schedule(false);
sched_preempt_enable_no_resched();
} while (need_resched());
diff --git a/patches/workqueue-prevent-deadlock-stall.patch b/patches/workqueue-prevent-deadlock-stall.patch
index 590931ab7386f0..26765f6e834a77 100644
--- a/patches/workqueue-prevent-deadlock-stall.patch
+++ b/patches/workqueue-prevent-deadlock-stall.patch
@@ -43,7 +43,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3378,9 +3378,8 @@ static void __sched notrace __schedule(b
+@@ -3377,9 +3377,8 @@ static void __sched notrace __schedule(b
static inline void sched_submit_work(struct task_struct *tsk)
{
@@ -54,7 +54,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
/*
* If a worker went to sleep, notify and ask workqueue whether
* it wants to wake up a task to maintain concurrency.
-@@ -3388,6 +3387,10 @@ static inline void sched_submit_work(str
+@@ -3387,6 +3386,10 @@ static inline void sched_submit_work(str
if (tsk->flags & PF_WQ_WORKER)
wq_worker_sleeping(tsk);