summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2016-12-01 17:42:36 +0100
committerSebastian Andrzej Siewior <bigeasy@linutronix.de>2016-12-01 17:54:58 +0100
commit02d978ea9304bb96a3c0d5984c0903e4dd6304f6 (patch)
treeef2ca083d92770adf4d927dea42768b9a367d562
parent38c2f12d14909ce82aeecd63883d2052b3a3a608 (diff)
download4.12-rt-patches-02d978ea9304bb96a3c0d5984c0903e4dd6304f6.tar.gz
[ANNOUNCE] v4.8.11-rt7
Dear RT folks! I'm pleased to announce the v4.8.11-rt7 patch set. Changes since v4.8.11-rt6: - A fix for a race in the futex/rtmutex code which was there since the very beginning. Reported by David Daney, fixed Thomas Gleixner - A fix for the kprobe code on ARM by Yang Shi. - It is no longer possible to force an expedited RCU grace period on -RT. We had one spot in the network where it was disabled on RT due to high latencies it caused. Suggested by Luiz Capitulino and patched by Julia Cartwright. - Expedited RCU grace periods are now forced during boot which should speed the boot process (even on -RT). Known issues - CPU hotplug got a little better but can deadlock. The delta patch against 4.8.11-rt6 is appended below and can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.8/incr/patch-4.8.11-rt6-rt7.patch.xz You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v4.8.11-rt7 The RT patch against 4.8.11 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.8/patch-4.8.11-rt7.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.8/patches-4.8.11-rt7.tar.xz Sebastian Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-rw-r--r--patches/arm-kprobe-replace-patch_lock-to-raw-lock.patch75
-rw-r--r--patches/localversion.patch2
-rw-r--r--patches/mm-disable-sloub-rt.patch4
-rw-r--r--patches/net__Make_synchronize-rcu_expedited_conditional-on-non-rt.patch35
-rw-r--r--patches/rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch29
-rw-r--r--patches/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch4
-rw-r--r--patches/rcu-update-make-RCU_EXPEDITE_BOOT-default.patch60
-rw-r--r--patches/rtmutex-Prevent-dequeue-vs.-unlock-race.patch172
-rw-r--r--patches/rtmutex-futex-prepare-rt.patch18
-rw-r--r--patches/sched-disable-rt-group-sched-on-rt.patch2
-rw-r--r--patches/series5
-rw-r--r--patches/slub-disable-SLUB_CPU_PARTIAL.patch2
12 files changed, 356 insertions, 52 deletions
diff --git a/patches/arm-kprobe-replace-patch_lock-to-raw-lock.patch b/patches/arm-kprobe-replace-patch_lock-to-raw-lock.patch
new file mode 100644
index 0000000000000..e324787558738
--- /dev/null
+++ b/patches/arm-kprobe-replace-patch_lock-to-raw-lock.patch
@@ -0,0 +1,75 @@
+From 6e2639b6d72e1ef9e264aa658db3b6171d9ba12f Mon Sep 17 00:00:00 2001
+From: Yang Shi <yang.shi@linaro.org>
+Date: Thu, 10 Nov 2016 16:17:55 -0800
+Subject: [PATCH] arm: kprobe: replace patch_lock to raw lock
+
+When running kprobe on -rt kernel, the below bug is caught:
+
+BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:931
+in_atomic(): 1, irqs_disabled(): 128, pid: 14, name: migration/0
+INFO: lockdep is turned off.
+irq event stamp: 238
+hardirqs last enabled at (237): [<80b5aecc>] _raw_spin_unlock_irqrestore+0x88/0x90
+hardirqs last disabled at (238): [<80b56d88>] __schedule+0xec/0x94c
+softirqs last enabled at (0): [<80225584>] copy_process.part.5+0x30c/0x1994
+softirqs last disabled at (0): [< (null)>] (null)
+Preemption disabled at:[<802f2b98>] cpu_stopper_thread+0xc0/0x140
+
+CPU: 0 PID: 14 Comm: migration/0 Tainted: G O 4.8.3-rt2 #1
+Hardware name: Freescale LS1021A
+[<80212e7c>] (unwind_backtrace) from [<8020cd2c>] (show_stack+0x20/0x24)
+[<8020cd2c>] (show_stack) from [<80689e14>] (dump_stack+0xa0/0xcc)
+[<80689e14>] (dump_stack) from [<8025a43c>] (___might_sleep+0x1b8/0x2a4)
+[<8025a43c>] (___might_sleep) from [<80b5b324>] (rt_spin_lock+0x34/0x74)
+[<80b5b324>] (rt_spin_lock) from [<80b5c31c>] (__patch_text_real+0x70/0xe8)
+[<80b5c31c>] (__patch_text_real) from [<80b5c3ac>] (patch_text_stop_machine+0x18/0x20)
+[<80b5c3ac>] (patch_text_stop_machine) from [<802f2920>] (multi_cpu_stop+0xfc/0x134)
+[<802f2920>] (multi_cpu_stop) from [<802f2ba0>] (cpu_stopper_thread+0xc8/0x140)
+[<802f2ba0>] (cpu_stopper_thread) from [<802563a4>] (smpboot_thread_fn+0x1a4/0x354)
+[<802563a4>] (smpboot_thread_fn) from [<80251d38>] (kthread+0x104/0x11c)
+[<80251d38>] (kthread) from [<80207f70>] (ret_from_fork+0x14/0x24)
+
+Since patch_text_stop_machine() is called in stop_machine() which disables IRQ,
+sleepable lock should be not used in this atomic context, so replace patch_lock
+to raw lock.
+
+Signed-off-by: Yang Shi <yang.shi@linaro.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/arm/kernel/patch.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
+index 69bda1a5707e..1f665acaa6a9 100644
+--- a/arch/arm/kernel/patch.c
++++ b/arch/arm/kernel/patch.c
+@@ -15,7 +15,7 @@ struct patch {
+ unsigned int insn;
+ };
+
+-static DEFINE_SPINLOCK(patch_lock);
++static DEFINE_RAW_SPINLOCK(patch_lock);
+
+ static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags)
+ __acquires(&patch_lock)
+@@ -32,7 +32,7 @@ static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags)
+ return addr;
+
+ if (flags)
+- spin_lock_irqsave(&patch_lock, *flags);
++ raw_spin_lock_irqsave(&patch_lock, *flags);
+ else
+ __acquire(&patch_lock);
+
+@@ -47,7 +47,7 @@ static void __kprobes patch_unmap(int fixmap, unsigned long *flags)
+ clear_fixmap(fixmap);
+
+ if (flags)
+- spin_unlock_irqrestore(&patch_lock, *flags);
++ raw_spin_unlock_irqrestore(&patch_lock, *flags);
+ else
+ __release(&patch_lock);
+ }
+--
+2.10.2
+
diff --git a/patches/localversion.patch b/patches/localversion.patch
index 4c1841b6475d1..bbb08330835de 100644
--- a/patches/localversion.patch
+++ b/patches/localversion.patch
@@ -10,4 +10,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt6
++-rt7
diff --git a/patches/mm-disable-sloub-rt.patch b/patches/mm-disable-sloub-rt.patch
index 23a5671fdf712..e5389730ae772 100644
--- a/patches/mm-disable-sloub-rt.patch
+++ b/patches/mm-disable-sloub-rt.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/init/Kconfig
+++ b/init/Kconfig
-@@ -1761,6 +1761,7 @@ choice
+@@ -1748,6 +1748,7 @@ choice
config SLAB
bool "SLAB"
@@ -21,7 +21,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
select HAVE_HARDENED_USERCOPY_ALLOCATOR
help
The regular slab allocator that is established and known to work
-@@ -1781,6 +1782,7 @@ config SLUB
+@@ -1768,6 +1769,7 @@ config SLUB
config SLOB
depends on EXPERT
bool "SLOB (Simple Allocator)"
diff --git a/patches/net__Make_synchronize-rcu_expedited_conditional-on-non-rt.patch b/patches/net__Make_synchronize-rcu_expedited_conditional-on-non-rt.patch
deleted file mode 100644
index 520b9724d1f7d..0000000000000
--- a/patches/net__Make_synchronize-rcu_expedited_conditional-on-non-rt.patch
+++ /dev/null
@@ -1,35 +0,0 @@
-Date: Tue, 27 Oct 2015 07:31:53 -0500
-From: Josh Cartwright <joshc@ni.com>
-Subject: net: Make synchronize_rcu_expedited() conditional on !RT_FULL
-
-While the use of synchronize_rcu_expedited() might make
-synchronize_net() "faster", it does so at significant cost on RT
-systems, as expediting a grace period forcibly preempts any
-high-priority RT tasks (via the stop_machine() mechanism).
-
-Without this change, we can observe a latency spike up to 30us with
-cyclictest by rapidly unplugging/reestablishing an ethernet link.
-
-Suggested-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
-Signed-off-by: Josh Cartwright <joshc@ni.com>
-Cc: bigeasy@linutronix.de
-Acked-by: Eric Dumazet <eric.dumazet@gmail.com>
-Acked-by: David S. Miller <davem@davemloft.net>
-Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
-Link: http://lkml.kernel.org/r/20151027123153.GG8245@jcartwri.amer.corp.natinst.com
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
----
- net/core/dev.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
---- a/net/core/dev.c
-+++ b/net/core/dev.c
-@@ -7740,7 +7740,7 @@ EXPORT_SYMBOL(free_netdev);
- void synchronize_net(void)
- {
- might_sleep();
-- if (rtnl_is_locked())
-+ if (rtnl_is_locked() && !IS_ENABLED(CONFIG_PREEMPT_RT_FULL))
- synchronize_rcu_expedited();
- else
- synchronize_rcu();
diff --git a/patches/rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch b/patches/rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch
new file mode 100644
index 0000000000000..9f3b4c4a3b482
--- /dev/null
+++ b/patches/rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch
@@ -0,0 +1,29 @@
+From: Julia Cartwright <julia@ni.com>
+Date: Wed, 12 Oct 2016 11:21:14 -0500
+Subject: [PATCH] rcu: enable rcu_normal_after_boot by default for RT
+
+The forcing of an expedited grace period is an expensive and very
+RT-application unfriendly operation, as it forcibly preempts all running
+tasks on CPUs which are preventing the gp from expiring.
+
+By default, as a policy decision, disable the expediting of grace
+periods (after boot) on configurations which enable PREEMPT_RT_FULL.
+
+Suggested-by: Luiz Capitulino <lcapitulino@redhat.com>
+Signed-off-by: Julia Cartwright <julia@ni.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/rcu/update.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/rcu/update.c
++++ b/kernel/rcu/update.c
+@@ -63,7 +63,7 @@ MODULE_ALIAS("rcupdate");
+ #ifndef CONFIG_TINY_RCU
+ module_param(rcu_expedited, int, 0);
+ module_param(rcu_normal, int, 0);
+-static int rcu_normal_after_boot;
++static int rcu_normal_after_boot = IS_ENABLED(CONFIG_PREEMPT_RT_FULL);
+ module_param(rcu_normal_after_boot, int, 0);
+ #endif /* #ifndef CONFIG_TINY_RCU */
+
diff --git a/patches/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch b/patches/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch
index b930f2e450919..4c35a15d2d425 100644
--- a/patches/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch
+++ b/patches/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch
@@ -347,7 +347,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
extern struct rcu_state rcu_preempt_state;
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
-@@ -295,6 +295,7 @@ int rcu_read_lock_held(void)
+@@ -293,6 +293,7 @@ int rcu_read_lock_held(void)
}
EXPORT_SYMBOL_GPL(rcu_read_lock_held);
@@ -355,7 +355,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
*
-@@ -321,6 +322,7 @@ int rcu_read_lock_bh_held(void)
+@@ -319,6 +320,7 @@ int rcu_read_lock_bh_held(void)
return in_softirq() || irqs_disabled();
}
EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
diff --git a/patches/rcu-update-make-RCU_EXPEDITE_BOOT-default.patch b/patches/rcu-update-make-RCU_EXPEDITE_BOOT-default.patch
new file mode 100644
index 0000000000000..d3fd1f5fb9d0e
--- /dev/null
+++ b/patches/rcu-update-make-RCU_EXPEDITE_BOOT-default.patch
@@ -0,0 +1,60 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 2 Nov 2016 16:45:58 +0100
+Subject: [PATCH] rcu: update: make RCU_EXPEDITE_BOOT default
+
+RCU_EXPEDITE_BOOT should speed up the boot process by enforcing
+synchronize_rcu_expedited() instead of synchronize_rcu() during the boot
+process. There should be no reason why one does not want this and there
+is no need worry about real time latency at this point.
+Therefore make it default.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ init/Kconfig | 13 -------------
+ kernel/rcu/update.c | 6 ++----
+ 2 files changed, 2 insertions(+), 17 deletions(-)
+
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -771,19 +771,6 @@ config RCU_NOCB_CPU_ALL
+
+ endchoice
+
+-config RCU_EXPEDITE_BOOT
+- bool
+- default n
+- help
+- This option enables expedited grace periods at boot time,
+- as if rcu_expedite_gp() had been invoked early in boot.
+- The corresponding rcu_unexpedite_gp() is invoked from
+- rcu_end_inkernel_boot(), which is intended to be invoked
+- at the end of the kernel-only boot sequence, just before
+- init is exec'ed.
+-
+- Accept the default if unsure.
+-
+ endmenu # "RCU Subsystem"
+
+ config BUILD_BIN2C
+--- a/kernel/rcu/update.c
++++ b/kernel/rcu/update.c
+@@ -130,8 +130,7 @@ bool rcu_gp_is_normal(void)
+ }
+ EXPORT_SYMBOL_GPL(rcu_gp_is_normal);
+
+-static atomic_t rcu_expedited_nesting =
+- ATOMIC_INIT(IS_ENABLED(CONFIG_RCU_EXPEDITE_BOOT) ? 1 : 0);
++static atomic_t rcu_expedited_nesting = ATOMIC_INIT(1);
+
+ /*
+ * Should normal grace-period primitives be expedited? Intended for
+@@ -179,8 +178,7 @@ EXPORT_SYMBOL_GPL(rcu_unexpedite_gp);
+ */
+ void rcu_end_inkernel_boot(void)
+ {
+- if (IS_ENABLED(CONFIG_RCU_EXPEDITE_BOOT))
+- rcu_unexpedite_gp();
++ rcu_unexpedite_gp();
+ if (rcu_normal_after_boot)
+ WRITE_ONCE(rcu_normal, 1);
+ }
diff --git a/patches/rtmutex-Prevent-dequeue-vs.-unlock-race.patch b/patches/rtmutex-Prevent-dequeue-vs.-unlock-race.patch
new file mode 100644
index 0000000000000..61cc7ba5f738c
--- /dev/null
+++ b/patches/rtmutex-Prevent-dequeue-vs.-unlock-race.patch
@@ -0,0 +1,172 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 1 Dec 2016 16:47:21 +0100
+Subject: [PATCH] rtmutex: Prevent dequeue vs. unlock race
+
+David reported a futex/rtmutex state corruption. It's caused by the
+following problem:
+
+CPU0 CPU1 CPU2
+
+l->owner=T1
+ rt_mutex_lock(l)
+ lock(l->wait_lock)
+ l->owner = T1 | HAS_WAITERS;
+ enqueue(T2)
+ boost()
+ unlock(l->wait_lock)
+ schedule()
+
+ rt_mutex_lock(l)
+ lock(l->wait_lock)
+ l->owner = T1 | HAS_WAITERS;
+ enqueue(T3)
+ boost()
+ unlock(l->wait_lock)
+ schedule()
+ signal(->T2) signal(->T3)
+ lock(l->wait_lock)
+ dequeue(T2)
+ deboost()
+ unlock(l->wait_lock)
+ lock(l->wait_lock)
+ dequeue(T3)
+ ===> wait list is now empty
+ deboost()
+ unlock(l->wait_lock)
+ lock(l->wait_lock)
+ fixup_rt_mutex_waiters()
+ if (wait_list_empty(l)) {
+ owner = l->owner & ~HAS_WAITERS;
+ l->owner = owner
+ ==> l->owner = T1
+ }
+
+ lock(l->wait_lock)
+rt_mutex_unlock(l) fixup_rt_mutex_waiters()
+ if (wait_list_empty(l)) {
+ owner = l->owner & ~HAS_WAITERS;
+cmpxchg(l->owner, T1, NULL)
+ ===> Success (l->owner = NULL)
+ l->owner = owner
+ ==> l->owner = T1
+ }
+
+That means the problem is caused by fixup_rt_mutex_waiters() which does the
+RMW to clear the waiters bit unconditionally when there are no waiters in
+the rtmutexes rbtree.
+
+This can be fatal: A concurrent unlock can release the rtmutex in the
+fastpath because the waiters bit is not set. If the cmpxchg() gets in the
+middle of the RMW operation then the previous owner, which just unlocked
+the rtmutex is set as the owner again when the write takes place after the
+successfull cmpxchg().
+
+The solution is rather trivial: Verify that the owner member of the rtmutex
+has the waiters bit set before clearing it. This does not require a
+cmpxchg() or other atomic operations because the waiters bit can only be
+set and cleared with the rtmutex wait_lock held. It's also safe against the
+fast path unlock attempt. The unlock attempt via cmpxchg() will either see
+the bit set and take the slowpath or see the bit cleared and release it
+atomically in the fastpath.
+
+It's remarkable that the test program provided by David triggers on ARM64
+and MIPS64 really quick, but it refuses to reproduce on x8664, while the
+problem exists there as well. That refusal might explain that this got not
+discovered earlier despite the bug existing from day one of the rtmutex
+implementation more than 10 years ago.
+
+Thanks to David for meticulously instrumenting the code and providing the
+information which allowed to decode this subtle problem.
+
+Fixes: 23f78d4a03c5 ("[PATCH] pi-futex: rt mutex core")
+Cc: stable@vger.kernel.org
+Cc: stable-rt@vger.kernel.org
+Reported-by: David Daney <ddaney@caviumnetworks.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/locking/rtmutex.c | 68 ++++++++++++++++++++++++++++++++++++++++++++++--
+ 1 file changed, 66 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
+index 1ec0f48962b3..2c49d76f96c3 100644
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -65,8 +65,72 @@ static inline void clear_rt_mutex_waiters(struct rt_mutex *lock)
+
+ static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
+ {
+- if (!rt_mutex_has_waiters(lock))
+- clear_rt_mutex_waiters(lock);
++ unsigned long owner, *p = (unsigned long *) &lock->owner;
++
++ if (rt_mutex_has_waiters(lock))
++ return;
++
++ /*
++ * The rbtree has no waiters enqueued, now make sure that the
++ * lock->owner still has the waiters bit set, otherwise the
++ * following can happen:
++ *
++ * CPU 0 CPU 1 CPU2
++ * l->owner=T1
++ * rt_mutex_lock(l)
++ * lock(l->lock)
++ * l->owner = T1 | HAS_WAITERS;
++ * enqueue(T2)
++ * boost()
++ * unlock(l->lock)
++ * block()
++ *
++ * rt_mutex_lock(l)
++ * lock(l->lock)
++ * l->owner = T1 | HAS_WAITERS;
++ * enqueue(T3)
++ * boost()
++ * unlock(l->lock)
++ * block()
++ * signal(->T2) signal(->T3)
++ * lock(l->lock)
++ * dequeue(T2)
++ * deboost()
++ * unlock(l->lock)
++ * lock(l->lock)
++ * dequeue(T3)
++ * ==> wait list is empty
++ * deboost()
++ * unlock(l->lock)
++ * lock(l->lock)
++ * fixup_rt_mutex_waiters()
++ * if (wait_list_empty(l) {
++ * l->owner = owner
++ * owner = l->owner & ~HAS_WAITERS;
++ * ==> l->owner = T1
++ * }
++ * lock(l->lock)
++ * rt_mutex_unlock(l) fixup_rt_mutex_waiters()
++ * if (wait_list_empty(l) {
++ * owner = l->owner & ~HAS_WAITERS;
++ * cmpxchg(l->owner, T1, NULL)
++ * ===> Success (l->owner = NULL)
++ *
++ * l->owner = owner
++ * ==> l->owner = T1
++ * }
++ *
++ * With the check for the waiter bit in place T3 on CPU2 will not
++ * overwrite. All tasks fiddling with the waiters bit are
++ * serialized by l->lock, so nothing else can modify the waiters
++ * bit. If the bit is set then nothing can change l->owner either
++ * so the simple RMW is safe. The cmpxchg() will simply fail if it
++ * happens in the middle of the RMW because the waiters bit is
++ * still set.
++ */
++ owner = READ_ONCE(*p);
++ if (owner & RT_MUTEX_HAS_WAITERS)
++ WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
+ }
+
+ /*
+--
+2.10.2
+
diff --git a/patches/rtmutex-futex-prepare-rt.patch b/patches/rtmutex-futex-prepare-rt.patch
index 8ddbc2f20da47..c4ccea1577768 100644
--- a/patches/rtmutex-futex-prepare-rt.patch
+++ b/patches/rtmutex-futex-prepare-rt.patch
@@ -138,8 +138,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* haven't already.
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -69,6 +69,11 @@ static void fixup_rt_mutex_waiters(struc
- clear_rt_mutex_waiters(lock);
+@@ -133,6 +133,11 @@ static void fixup_rt_mutex_waiters(struc
+ WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
}
+static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter)
@@ -150,7 +150,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* We can speed up the acquire/release, if there's no debugging state to be
* set up.
-@@ -357,7 +362,8 @@ int max_lock_depth = 1024;
+@@ -421,7 +426,8 @@ int max_lock_depth = 1024;
static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
{
@@ -160,7 +160,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -493,7 +499,7 @@ static int rt_mutex_adjust_prio_chain(st
+@@ -557,7 +563,7 @@ static int rt_mutex_adjust_prio_chain(st
* reached or the state of the chain has changed while we
* dropped the locks.
*/
@@ -169,7 +169,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
goto out_unlock_pi;
/*
-@@ -907,6 +913,23 @@ static int task_blocks_on_rt_mutex(struc
+@@ -971,6 +977,23 @@ static int task_blocks_on_rt_mutex(struc
return -EDEADLK;
raw_spin_lock(&task->pi_lock);
@@ -193,7 +193,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
__rt_mutex_adjust_prio(task);
waiter->task = task;
waiter->lock = lock;
-@@ -930,7 +953,7 @@ static int task_blocks_on_rt_mutex(struc
+@@ -994,7 +1017,7 @@ static int task_blocks_on_rt_mutex(struc
rt_mutex_enqueue_pi(owner, waiter);
__rt_mutex_adjust_prio(owner);
@@ -202,7 +202,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
chain_walk = 1;
} else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
chain_walk = 1;
-@@ -1014,7 +1037,7 @@ static void remove_waiter(struct rt_mute
+@@ -1078,7 +1101,7 @@ static void remove_waiter(struct rt_mute
{
bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
struct task_struct *owner = rt_mutex_owner(lock);
@@ -211,7 +211,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
raw_spin_lock(&current->pi_lock);
rt_mutex_dequeue(lock, waiter);
-@@ -1038,7 +1061,8 @@ static void remove_waiter(struct rt_mute
+@@ -1102,7 +1125,8 @@ static void remove_waiter(struct rt_mute
__rt_mutex_adjust_prio(owner);
/* Store the lock on which owner is blocked or NULL */
@@ -221,7 +221,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
raw_spin_unlock(&owner->pi_lock);
-@@ -1074,7 +1098,7 @@ void rt_mutex_adjust_pi(struct task_stru
+@@ -1138,7 +1162,7 @@ void rt_mutex_adjust_pi(struct task_stru
raw_spin_lock_irqsave(&task->pi_lock, flags);
waiter = task->pi_blocked_on;
diff --git a/patches/sched-disable-rt-group-sched-on-rt.patch b/patches/sched-disable-rt-group-sched-on-rt.patch
index e50bb88320405..165ca217c75af 100644
--- a/patches/sched-disable-rt-group-sched-on-rt.patch
+++ b/patches/sched-disable-rt-group-sched-on-rt.patch
@@ -18,7 +18,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/init/Kconfig
+++ b/init/Kconfig
-@@ -1054,6 +1054,7 @@ config CFS_BANDWIDTH
+@@ -1041,6 +1041,7 @@ config CFS_BANDWIDTH
config RT_GROUP_SCHED
bool "Group scheduling for SCHED_RR/FIFO"
depends on CGROUP_SCHED
diff --git a/patches/series b/patches/series
index 70680082ca41d..4730d04a30f14 100644
--- a/patches/series
+++ b/patches/series
@@ -37,6 +37,7 @@ iommu-vt-d-don-t-disable-preemption-while-accessing-.patch
lockdep-Quiet-gcc-about-dangerous-__builtin_return_a.patch
x86-apic-get-rid-of-warning-acpi_ioapic_lock-defined.patch
rxrpc-remove-unused-static-variables.patch
+rcu-update-make-RCU_EXPEDITE_BOOT-default.patch
# Wants a different fix for upstream
NFSv4-replace-seqcount_t-with-a-seqlock_t.patch
@@ -44,6 +45,7 @@ NFSv4-replace-seqcount_t-with-a-seqlock_t.patch
############################################################
# Submitted on LKML
############################################################
+rtmutex-Prevent-dequeue-vs.-unlock-race.patch
# SPARC part of erly printk consolidation
sparc64-use-generic-rwsem-spinlocks-rt.patch
@@ -93,6 +95,7 @@ signal-revert-ptrace-preempt-magic.patch
# ARM lock annotation
arm-convert-boot-lock-to-raw.patch
+arm-kprobe-replace-patch_lock-to-raw-lock.patch
# PREEMPT_ENABLE_NO_RESCHED
@@ -427,7 +430,6 @@ seqlock-prevent-rt-starvation.patch
# NETWORKING
sunrpc-make-svc_xprt_do_enqueue-use-get_cpu_light.patch
-net__Make_synchronize-rcu_expedited_conditional-on-non-rt.patch
skbufhead-raw-lock.patch
net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch
net-move-xmit_recursion-to-per-task-variable-on-RT.patch
@@ -547,6 +549,7 @@ kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch
rcu-disable-rcu-fast-no-hz-on-rt.patch
rcu-Eliminate-softirq-processing-from-rcutree.patch
rcu-make-RCU_BOOST-default-on-RT.patch
+rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch
# PREEMPT LAZY
preempt-lazy-support.patch
diff --git a/patches/slub-disable-SLUB_CPU_PARTIAL.patch b/patches/slub-disable-SLUB_CPU_PARTIAL.patch
index d0679b2f9aa26..571a433a3d1e0 100644
--- a/patches/slub-disable-SLUB_CPU_PARTIAL.patch
+++ b/patches/slub-disable-SLUB_CPU_PARTIAL.patch
@@ -36,7 +36,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/init/Kconfig
+++ b/init/Kconfig
-@@ -1801,7 +1801,7 @@ config SLAB_FREELIST_RANDOM
+@@ -1788,7 +1788,7 @@ config SLAB_FREELIST_RANDOM
config SLUB_CPU_PARTIAL
default y