summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2015-10-31 15:59:49 +0100
committerThomas Gleixner <tglx@linutronix.de>2015-10-31 15:59:49 +0100
commit1909cd47a5513a74456f85e17bf6edc92db04b95 (patch)
tree9b8beb73da65a8a31dc12d93d15fc5dcadee004a
parent1390972267496f6019f4f808b1addc9ca07e08ae (diff)
download4.9-rt-patches-1909cd47a5513a74456f85e17bf6edc92db04b95.tar.gz
[ANNOUNCE] 4.1.10.-rt11
Dear RT folks! I'm pleased to announce the v4.1.10-rt11 patch set. Changes since v4.1.10-rt11: Eric Dumazet (1): inet: fix potential deadlock in reqsk_queue_unlink() Josh Cartwright (1): net: Make synchronize_rcu_expedited() conditional on !RT_FULL Mathieu Desnoyers (1): latency_hist: Update sched_wakeup probe Peter Zijlstra (1): sched: Introduce the trace_sched_waking tracepoint Thomas Gleixner (2): softirq: Sanitize local_bh_[en|dis]able for RT v4.1.10-rt11 Yang Shi (1): trace: Add missing tracer macros Known issues: - bcache stays disabled - CPU hotplug is not better than before - The netlink_release() OOPS, reported by Clark, is still on the list, but unsolved due to lack of information The delta patch against 4.1.10-rt10 is appended below and can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.1/incr/patch-4.1.10-rt10-rt11.patch.xz You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v4.1.10-rt11 The RT patch against 4.1.10 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.1/patch-4.1.10-rt11.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.1/patches-4.1.10-rt11.tar.xz Enjoy! tglx Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--patches/inet-fix-potential-deadlock-in-reqsk-queue-unlink35
-rw-r--r--patches/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch4
-rw-r--r--patches/latency-hist.patch12
-rw-r--r--patches/latency_hist-update-sched_wakeup-probe.patch47
-rw-r--r--patches/localversion.patch4
-rw-r--r--patches/net__Make_synchronize-rcu_expedited_conditional-on-non-rt37
-rw-r--r--patches/sched-introduce-the-27trace_sched_waking-27-tracepoint.patch185
-rw-r--r--patches/series15
-rw-r--r--patches/softirq-split-locks.patch98
9 files changed, 368 insertions, 69 deletions
diff --git a/patches/inet-fix-potential-deadlock-in-reqsk-queue-unlink b/patches/inet-fix-potential-deadlock-in-reqsk-queue-unlink
new file mode 100644
index 00000000000000..b92455cf2993c4
--- /dev/null
+++ b/patches/inet-fix-potential-deadlock-in-reqsk-queue-unlink
@@ -0,0 +1,35 @@
+From: Eric Dumazet <edumazet@google.com>
+Date: Thu Aug 13 15:44:51 2015 -0700
+Subject: inet: fix potential deadlock in reqsk_queue_unlink()
+
+Upstream commit: 83fccfc3940c
+
+When replacing del_timer() with del_timer_sync(), I introduced
+a deadlock condition :
+
+reqsk_queue_unlink() is called from inet_csk_reqsk_queue_drop()
+
+inet_csk_reqsk_queue_drop() can be called from many contexts,
+one being the timer handler itself (reqsk_timer_handler()).
+
+In this case, del_timer_sync() loops forever.
+
+Simple fix is to test if timer is pending.
+
+Fixes: 2235f2ac75fd ("inet: fix races with reqsk timers")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
+index b27fc401c6a9..e664706b350c 100644
+--- a/net/ipv4/inet_connection_sock.c
++++ b/net/ipv4/inet_connection_sock.c
+@@ -584,7 +584,7 @@ static bool reqsk_queue_unlink(struct request_sock_queue *queue,
+ }
+
+ spin_unlock(&queue->syn_wait_lock);
+- if (del_timer_sync(&req->rsk_timer))
++ if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer))
+ reqsk_put(req);
+ return found;
+ }
diff --git a/patches/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch b/patches/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch
index b7fb1bacd4e84e..96f9a8a363bd49 100644
--- a/patches/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch
+++ b/patches/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch
@@ -138,9 +138,9 @@ Index: linux-rt-devel/kernel/softirq.c
===================================================================
--- linux-rt-devel.orig/kernel/softirq.c
+++ linux-rt-devel/kernel/softirq.c
-@@ -606,6 +606,15 @@ void local_bh_enable_ip(unsigned long ip
+@@ -576,6 +576,15 @@ void __local_bh_enable(void)
}
- EXPORT_SYMBOL(local_bh_enable_ip);
+ EXPORT_SYMBOL(__local_bh_enable);
+void _local_bh_enable(void)
+{
diff --git a/patches/latency-hist.patch b/patches/latency-hist.patch
index 21725135615bf9..60e3b51ed190d8 100644
--- a/patches/latency-hist.patch
+++ b/patches/latency-hist.patch
@@ -16,14 +16,14 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Documentation/trace/histograms.txt | 186 +++++
include/linux/hrtimer.h | 3
include/linux/sched.h | 6
- include/trace/events/hist.h | 72 ++
+ include/trace/events/hist.h | 74 ++
include/trace/events/latency_hist.h | 29
kernel/time/hrtimer.c | 21
kernel/trace/Kconfig | 104 +++
kernel/trace/Makefile | 4
kernel/trace/latency_hist.c | 1178 ++++++++++++++++++++++++++++++++++++
kernel/trace/trace_irqsoff.c | 11
- 10 files changed, 1614 insertions(+)
+ 10 files changed, 1616 insertions(+)
Index: linux-rt-devel/Documentation/trace/histograms.txt
===================================================================
@@ -251,7 +251,7 @@ Index: linux-rt-devel/include/trace/events/hist.h
===================================================================
--- /dev/null
+++ linux-rt-devel/include/trace/events/hist.h
-@@ -0,0 +1,72 @@
+@@ -0,0 +1,74 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hist
+
@@ -263,6 +263,7 @@ Index: linux-rt-devel/include/trace/events/hist.h
+
+#if !defined(CONFIG_PREEMPT_OFF_HIST) && !defined(CONFIG_INTERRUPT_OFF_HIST)
+#define trace_preemptirqsoff_hist(a, b)
++#define trace_preemptirqsoff_hist_rcuidle(a, b)
+#else
+TRACE_EVENT(preemptirqsoff_hist,
+
@@ -287,6 +288,7 @@ Index: linux-rt-devel/include/trace/events/hist.h
+
+#ifndef CONFIG_MISSED_TIMER_OFFSETS_HIST
+#define trace_hrtimer_interrupt(a, b, c, d)
++#define trace_hrtimer_interrupt_rcuidle(a, b, c, d)
+#else
+TRACE_EVENT(hrtimer_interrupt,
+
@@ -1769,7 +1771,7 @@ Index: linux-rt-devel/kernel/trace/trace_irqsoff.c
#ifdef CONFIG_PROVE_LOCKING
void time_hardirqs_on(unsigned long a0, unsigned long a1)
{
-+ trace_preemptirqsoff_hist(IRQS_ON, 0);
++ trace_preemptirqsoff_hist_rcuidle(IRQS_ON, 0);
if (!preempt_trace() && irq_trace())
stop_critical_timing(a0, a1);
}
@@ -1777,7 +1779,7 @@ Index: linux-rt-devel/kernel/trace/trace_irqsoff.c
{
if (!preempt_trace() && irq_trace())
start_critical_timing(a0, a1);
-+ trace_preemptirqsoff_hist(IRQS_OFF, 1);
++ trace_preemptirqsoff_hist_rcuidle(IRQS_OFF, 1);
}
#else /* !CONFIG_PROVE_LOCKING */
diff --git a/patches/latency_hist-update-sched_wakeup-probe.patch b/patches/latency_hist-update-sched_wakeup-probe.patch
new file mode 100644
index 00000000000000..8ec3e20d2c7a21
--- /dev/null
+++ b/patches/latency_hist-update-sched_wakeup-probe.patch
@@ -0,0 +1,47 @@
+Subject: latency_hist: Update sched_wakeup probe
+From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Date: Sun, 25 Oct 2015 18:06:05 -0400
+
+"sched: Introduce the 'trace_sched_waking' tracepoint" introduces a
+prototype change for the sched_wakeup probe: the "success" argument is
+removed. Update the latency_hist probe following this change.
+
+Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Cc: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Julien Desfossez <jdesfossez@efficios.com>
+Cc: Francis Giraldeau <francis.giraldeau@gmail.com>
+Cc: Mike Galbraith <efault@gmx.de>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Link: http://lkml.kernel.org/r/1445810765-18732-1-git-send-email-mathieu.desnoyers@efficios.com
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ kernel/trace/latency_hist.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/trace/latency_hist.c b/kernel/trace/latency_hist.c
+index 66a69eb..b6c1d14 100644
+--- a/kernel/trace/latency_hist.c
++++ b/kernel/trace/latency_hist.c
+@@ -115,7 +115,7 @@ static DEFINE_PER_CPU(struct hist_data, wakeup_latency_hist_sharedprio);
+ static char *wakeup_latency_hist_dir = "wakeup";
+ static char *wakeup_latency_hist_dir_sharedprio = "sharedprio";
+ static notrace void probe_wakeup_latency_hist_start(void *v,
+- struct task_struct *p, int success);
++ struct task_struct *p);
+ static notrace void probe_wakeup_latency_hist_stop(void *v,
+ struct task_struct *prev, struct task_struct *next);
+ static notrace void probe_sched_migrate_task(void *,
+@@ -869,7 +869,7 @@ static notrace void probe_sched_migrate_task(void *v, struct task_struct *task,
+ }
+
+ static notrace void probe_wakeup_latency_hist_start(void *v,
+- struct task_struct *p, int success)
++ struct task_struct *p)
+ {
+ unsigned long flags;
+ struct task_struct *curr = current;
+--
+2.1.4
+
+
+
diff --git a/patches/localversion.patch b/patches/localversion.patch
index c119b754e11ae2..113cb973a48250 100644
--- a/patches/localversion.patch
+++ b/patches/localversion.patch
@@ -1,4 +1,4 @@
-Subject: v4.1.10-rt10
+Subject: v4.1.10-rt11
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 08 Jul 2011 20:25:16 +0200
@@ -12,4 +12,4 @@ Index: linux-rt-devel/localversion-rt
--- /dev/null
+++ linux-rt-devel/localversion-rt
@@ -0,0 +1 @@
-+-rt10
++-rt11
diff --git a/patches/net__Make_synchronize-rcu_expedited_conditional-on-non-rt b/patches/net__Make_synchronize-rcu_expedited_conditional-on-non-rt
new file mode 100644
index 00000000000000..7a974975b96efa
--- /dev/null
+++ b/patches/net__Make_synchronize-rcu_expedited_conditional-on-non-rt
@@ -0,0 +1,37 @@
+Date: Tue, 27 Oct 2015 07:31:53 -0500
+From: Josh Cartwright <joshc@ni.com>
+Subject: net: Make synchronize_rcu_expedited() conditional on !RT_FULL
+
+While the use of synchronize_rcu_expedited() might make
+synchronize_net() "faster", it does so at significant cost on RT
+systems, as expediting a grace period forcibly preempts any
+high-priority RT tasks (via the stop_machine() mechanism).
+
+Without this change, we can observe a latency spike up to 30us with
+cyclictest by rapidly unplugging/reestablishing an ethernet link.
+
+Suggested-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Signed-off-by: Josh Cartwright <joshc@ni.com>
+Cc: bigeasy@linutronix.de
+Acked-by: Eric Dumazet <eric.dumazet@gmail.com>
+Acked-by: David S. Miller <davem@davemloft.net>
+Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Link: http://lkml.kernel.org/r/20151027123153.GG8245@jcartwri.amer.corp.natinst.com
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ net/core/dev.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+Index: linux-rt-devel/net/core/dev.c
+===================================================================
+--- linux-rt-devel.orig/net/core/dev.c
++++ linux-rt-devel/net/core/dev.c
+@@ -6969,7 +6969,7 @@ EXPORT_SYMBOL(free_netdev);
+ void synchronize_net(void)
+ {
+ might_sleep();
+- if (rtnl_is_locked())
++ if (rtnl_is_locked() && !IS_ENABLED(CONFIG_PREEMPT_RT_FULL))
+ synchronize_rcu_expedited();
+ else
+ synchronize_rcu();
diff --git a/patches/sched-introduce-the-27trace_sched_waking-27-tracepoint.patch b/patches/sched-introduce-the-27trace_sched_waking-27-tracepoint.patch
new file mode 100644
index 00000000000000..a35676ede93723
--- /dev/null
+++ b/patches/sched-introduce-the-27trace_sched_waking-27-tracepoint.patch
@@ -0,0 +1,185 @@
+Subject: sched: Introduce the trace_sched_waking tracepoint
+Date: Sun, 25 Oct 2015 16:35:24 -0400
+From: Peter Zijlstra <peterz@infradead.org>
+
+Upstream commit fbd705a0c6184580d0e2fbcbd47a37b6e5822511
+
+Mathieu reported that since 317f394160e9 ("sched: Move the second half
+of ttwu() to the remote cpu") trace_sched_wakeup() can happen out of
+context of the waker.
+
+This is a problem when you want to analyse wakeup paths because it is
+now very hard to correlate the wakeup event to whoever issued the
+wakeup.
+
+OTOH trace_sched_wakeup() is issued at the point where we set
+p->state = TASK_RUNNING, which is right were we hand the task off to
+the scheduler, so this is an important point when looking at
+scheduling behaviour, up to here its been the wakeup path everything
+hereafter is due to scheduler policy.
+
+To bridge this gap, introduce a second tracepoint: trace_sched_waking.
+It is guaranteed to be called in the waker context.
+
+[ Ported to linux-4.1.y-rt kernel by Mathieu Desnoyers. Resolved
+ conflict: try_to_wake_up_local() does not exist in -rt kernel. Removed
+ its instrumentation hunk. ]
+
+Reported-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+CC: Julien Desfossez <jdesfossez@efficios.com>
+CC: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Francis Giraldeau <francis.giraldeau@gmail.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Mike Galbraith <efault@gmx.de>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+CC: Ingo Molnar <mingo@kernel.org>
+Link: http://lkml.kernel.org/r/20150609091336.GQ3644@twins.programming.kicks-ass.net
+Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+---
+Julien Desfossez is currently implementing an in-kernel latency tracker
+module performing automated latency/deadline analysis, which main target
+is the real time kernel.
+
+In order to follow the wakeup chains, we need the sched_waking
+tracepoint. This is a backport of this tracepoint to the 4.1-rt kernel.
+I'm not sure what is the policy regarding patch backport from mainline
+to -rt kernels, hence the RFC.
+
+Thanks,
+
+Mathieu
+---
+ include/trace/events/sched.h | 30 +++++++++++++++++++++---------
+ kernel/sched/core.c | 8 +++++---
+ kernel/trace/trace_sched_switch.c | 2 +-
+ kernel/trace/trace_sched_wakeup.c | 2 +-
+ 4 files changed, 28 insertions(+), 14 deletions(-)
+
+diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
+index 30fedaf..3b63828 100644
+--- a/include/trace/events/sched.h
++++ b/include/trace/events/sched.h
+@@ -55,9 +55,9 @@ TRACE_EVENT(sched_kthread_stop_ret,
+ */
+ DECLARE_EVENT_CLASS(sched_wakeup_template,
+
+- TP_PROTO(struct task_struct *p, int success),
++ TP_PROTO(struct task_struct *p),
+
+- TP_ARGS(__perf_task(p), success),
++ TP_ARGS(__perf_task(p)),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+@@ -71,25 +71,37 @@ DECLARE_EVENT_CLASS(sched_wakeup_template,
+ memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+ __entry->pid = p->pid;
+ __entry->prio = p->prio;
+- __entry->success = success;
++ __entry->success = 1; /* rudiment, kill when possible */
+ __entry->target_cpu = task_cpu(p);
+ ),
+
+- TP_printk("comm=%s pid=%d prio=%d success=%d target_cpu=%03d",
++ TP_printk("comm=%s pid=%d prio=%d target_cpu=%03d",
+ __entry->comm, __entry->pid, __entry->prio,
+- __entry->success, __entry->target_cpu)
++ __entry->target_cpu)
+ );
+
++/*
++ * Tracepoint called when waking a task; this tracepoint is guaranteed to be
++ * called from the waking context.
++ */
++DEFINE_EVENT(sched_wakeup_template, sched_waking,
++ TP_PROTO(struct task_struct *p),
++ TP_ARGS(p));
++
++/*
++ * Tracepoint called when the task is actually woken; p->state == TASK_RUNNNG.
++ * It it not always called from the waking context.
++ */
+ DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
+- TP_PROTO(struct task_struct *p, int success),
+- TP_ARGS(p, success));
++ TP_PROTO(struct task_struct *p),
++ TP_ARGS(p));
+
+ /*
+ * Tracepoint for waking up a new task:
+ */
+ DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
+- TP_PROTO(struct task_struct *p, int success),
+- TP_ARGS(p, success));
++ TP_PROTO(struct task_struct *p),
++ TP_ARGS(p));
+
+ #ifdef CREATE_TRACE_POINTS
+ static inline long __trace_sched_switch_state(struct task_struct *p)
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 799b75b..b8b53df 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -1558,9 +1558,9 @@ static void
+ ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
+ {
+ check_preempt_curr(rq, p, wake_flags);
+- trace_sched_wakeup(p, true);
+-
+ p->state = TASK_RUNNING;
++ trace_sched_wakeup(p);
++
+ #ifdef CONFIG_SMP
+ if (p->sched_class->task_woken)
+ p->sched_class->task_woken(rq, p);
+@@ -1784,6 +1784,8 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
+ if (!(wake_flags & WF_LOCK_SLEEPER))
+ p->saved_state = TASK_RUNNING;
+
++ trace_sched_waking(p);
++
+ success = 1; /* we're going to change ->state */
+ cpu = task_cpu(p);
+
+@@ -2188,7 +2190,7 @@ void wake_up_new_task(struct task_struct *p)
+ rq = __task_rq_lock(p);
+ activate_task(rq, p, 0);
+ p->on_rq = TASK_ON_RQ_QUEUED;
+- trace_sched_wakeup_new(p, true);
++ trace_sched_wakeup_new(p);
+ check_preempt_curr(rq, p, WF_FORK);
+ #ifdef CONFIG_SMP
+ if (p->sched_class->task_woken)
+diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
+index 419ca37..f270088 100644
+--- a/kernel/trace/trace_sched_switch.c
++++ b/kernel/trace/trace_sched_switch.c
+@@ -26,7 +26,7 @@ probe_sched_switch(void *ignore, struct task_struct *prev, struct task_struct *n
+ }
+
+ static void
+-probe_sched_wakeup(void *ignore, struct task_struct *wakee, int success)
++probe_sched_wakeup(void *ignore, struct task_struct *wakee)
+ {
+ if (unlikely(!sched_ref))
+ return;
+diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
+index d6e1003..79a2a5f 100644
+--- a/kernel/trace/trace_sched_wakeup.c
++++ b/kernel/trace/trace_sched_wakeup.c
+@@ -514,7 +514,7 @@ static void wakeup_reset(struct trace_array *tr)
+ }
+
+ static void
+-probe_wakeup(void *ignore, struct task_struct *p, int success)
++probe_wakeup(void *ignore, struct task_struct *p)
+ {
+ struct trace_array_cpu *data;
+ int cpu = smp_processor_id();
+--
+2.1.4
+
+
+
diff --git a/patches/series b/patches/series
index c4f0a2778b4827..aa70e378b04690 100644
--- a/patches/series
+++ b/patches/series
@@ -6,6 +6,7 @@
# UPSTREAM changes queued
############################################################
xfs--clean-up-inode-lockdep-annotations
+inet-fix-potential-deadlock-in-reqsk-queue-unlink
############################################################
# UPSTREAM FIXES, patches pending
@@ -160,7 +161,6 @@ futex-avoid-double-wake-up-in-PI-futex-wait-wake-on-.patch
# TRACING
latency-hist.patch
-tracing-fix-rcu-splat-from-idle-cpu-on-boot.patch
# HW LATENCY DETECTOR - this really wants a rewrite
hwlatdetect.patch
@@ -266,8 +266,6 @@ ipc-make-rt-aware.patch
# RELAY
relay-fix-timer-madness.patch
-# NETWORKING
-
# TIMERS
timers-prepare-for-full-preemption.patch
timers-preempt-rt-support.patch
@@ -309,9 +307,6 @@ re-migrate_disable-race-with-cpu-hotplug-3f.patch
ftrace-migrate-disable-tracing.patch
hotplug-use-migrate-disable.patch
-# NETWORKING
-sunrpc-make-svc_xprt_do_enqueue-use-get_cpu_light.patch
-
# NOHZ
# LOCKDEP
@@ -429,7 +424,9 @@ debugobjects-rt.patch
# JUMPLABEL
jump-label-rt.patch
-# NET
+# NETWORKING
+sunrpc-make-svc_xprt_do_enqueue-use-get_cpu_light.patch
+net__Make_synchronize-rcu_expedited_conditional-on-non-rt
skbufhead-raw-lock.patch
net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch
@@ -579,5 +576,9 @@ md-disable-bcache.patch
# WORKQUEUE SIGH
workqueue-prevent-deadlock-stall.patch
+# TRACING
+sched-introduce-the-27trace_sched_waking-27-tracepoint.patch
+latency_hist-update-sched_wakeup-probe.patch
+
# Add RT to version
localversion.patch
diff --git a/patches/softirq-split-locks.patch b/patches/softirq-split-locks.patch
index 8dd8c636847cfb..9e5d585646577a 100644
--- a/patches/softirq-split-locks.patch
+++ b/patches/softirq-split-locks.patch
@@ -24,39 +24,61 @@ threads.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- include/linux/bottom_half.h | 12
+ include/linux/bottom_half.h | 34 ++
include/linux/interrupt.h | 15 +
- include/linux/preempt_mask.h | 15 -
+ include/linux/preempt_mask.h | 15 +
include/linux/sched.h | 3
init/main.c | 1
- kernel/softirq.c | 518 ++++++++++++++++++++++++++++++++++++-------
+ kernel/softirq.c | 488 ++++++++++++++++++++++++++++++++++++-------
kernel/time/tick-sched.c | 9
net/core/dev.c | 6
- 8 files changed, 485 insertions(+), 94 deletions(-)
+ 8 files changed, 477 insertions(+), 94 deletions(-)
Index: linux-rt-devel/include/linux/bottom_half.h
===================================================================
--- linux-rt-devel.orig/include/linux/bottom_half.h
+++ linux-rt-devel/include/linux/bottom_half.h
-@@ -4,6 +4,17 @@
+@@ -4,6 +4,39 @@
#include <linux/preempt.h>
#include <linux/preempt_mask.h>
+#ifdef CONFIG_PREEMPT_RT_FULL
+
-+extern void local_bh_disable(void);
++extern void __local_bh_disable(void);
+extern void _local_bh_enable(void);
-+extern void local_bh_enable(void);
-+extern void local_bh_enable_ip(unsigned long ip);
-+extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt);
-+extern void __local_bh_enable_ip(unsigned long ip, unsigned int cnt);
++extern void __local_bh_enable(void);
++
++static inline void local_bh_disable(void)
++{
++ __local_bh_disable();
++}
++
++static inline void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
++{
++ __local_bh_disable();
++}
++
++static inline void local_bh_enable(void)
++{
++ __local_bh_enable();
++}
++
++static inline void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
++{
++ __local_bh_enable();
++}
++
++static inline void local_bh_enable_ip(unsigned long ip)
++{
++ __local_bh_enable();
++}
+
+#else
+
#ifdef CONFIG_TRACE_IRQFLAGS
extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt);
#else
-@@ -31,5 +42,6 @@ static inline void local_bh_enable(void)
+@@ -31,5 +64,6 @@ static inline void local_bh_enable(void)
{
__local_bh_enable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET);
}
@@ -415,7 +437,7 @@ Index: linux-rt-devel/kernel/softirq.c
pending = local_softirq_pending();
if (pending) {
-@@ -321,6 +445,276 @@ asmlinkage __visible void do_softirq(voi
+@@ -321,6 +445,246 @@ asmlinkage __visible void do_softirq(voi
}
/*
@@ -528,26 +550,14 @@ Index: linux-rt-devel/kernel/softirq.c
+ }
+}
+
-+static void __local_bh_disable(void)
++void __local_bh_disable(void)
+{
+ if (++current->softirq_nestcnt == 1)
+ migrate_disable();
+}
++EXPORT_SYMBOL(__local_bh_disable);
+
-+void local_bh_disable(void)
-+{
-+ __local_bh_disable();
-+}
-+EXPORT_SYMBOL(local_bh_disable);
-+
-+void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
-+{
-+ __local_bh_disable();
-+ if (cnt & PREEMPT_CHECK_OFFSET)
-+ preempt_disable();
-+}
-+
-+static void __local_bh_enable(void)
++void __local_bh_enable(void)
+{
+ if (WARN_ON(current->softirq_nestcnt == 0))
+ return;
@@ -560,25 +570,7 @@ Index: linux-rt-devel/kernel/softirq.c
+ if (--current->softirq_nestcnt == 0)
+ migrate_enable();
+}
-+
-+void local_bh_enable(void)
-+{
-+ __local_bh_enable();
-+}
-+EXPORT_SYMBOL(local_bh_enable);
-+
-+extern void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
-+{
-+ __local_bh_enable();
-+ if (cnt & PREEMPT_CHECK_OFFSET)
-+ preempt_enable();
-+}
-+
-+void local_bh_enable_ip(unsigned long ip)
-+{
-+ local_bh_enable();
-+}
-+EXPORT_SYMBOL(local_bh_enable_ip);
++EXPORT_SYMBOL(__local_bh_enable);
+
+int in_serving_softirq(void)
+{
@@ -692,7 +684,7 @@ Index: linux-rt-devel/kernel/softirq.c
* Enter an interrupt context.
*/
void irq_enter(void)
-@@ -331,9 +725,9 @@ void irq_enter(void)
+@@ -331,9 +695,9 @@ void irq_enter(void)
* Prevent raise_softirq from needlessly waking up ksoftirqd
* here, as softirq will be serviced on return from interrupt.
*/
@@ -704,7 +696,7 @@ Index: linux-rt-devel/kernel/softirq.c
}
__irq_enter();
-@@ -341,6 +735,7 @@ void irq_enter(void)
+@@ -341,6 +705,7 @@ void irq_enter(void)
static inline void invoke_softirq(void)
{
@@ -712,7 +704,7 @@ Index: linux-rt-devel/kernel/softirq.c
if (!force_irqthreads) {
#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
/*
-@@ -360,6 +755,15 @@ static inline void invoke_softirq(void)
+@@ -360,6 +725,15 @@ static inline void invoke_softirq(void)
} else {
wakeup_softirqd();
}
@@ -728,7 +720,7 @@ Index: linux-rt-devel/kernel/softirq.c
}
static inline void tick_irq_exit(void)
-@@ -396,26 +800,6 @@ void irq_exit(void)
+@@ -396,26 +770,6 @@ void irq_exit(void)
trace_hardirq_exit(); /* must be last! */
}
@@ -755,7 +747,7 @@ Index: linux-rt-devel/kernel/softirq.c
void raise_softirq(unsigned int nr)
{
unsigned long flags;
-@@ -425,12 +809,6 @@ void raise_softirq(unsigned int nr)
+@@ -425,12 +779,6 @@ void raise_softirq(unsigned int nr)
local_irq_restore(flags);
}
@@ -768,7 +760,7 @@ Index: linux-rt-devel/kernel/softirq.c
void open_softirq(int nr, void (*action)(struct softirq_action *))
{
softirq_vec[nr].action = action;
-@@ -733,23 +1111,7 @@ EXPORT_SYMBOL(tasklet_unlock_wait);
+@@ -733,23 +1081,7 @@ EXPORT_SYMBOL(tasklet_unlock_wait);
static int ksoftirqd_should_run(unsigned int cpu)
{
@@ -793,7 +785,7 @@ Index: linux-rt-devel/kernel/softirq.c
}
#ifdef CONFIG_HOTPLUG_CPU
-@@ -831,6 +1193,8 @@ static struct notifier_block cpu_nfb = {
+@@ -831,6 +1163,8 @@ static struct notifier_block cpu_nfb = {
static struct smp_hotplug_thread softirq_threads = {
.store = &ksoftirqd,