summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2017-06-27 14:35:40 +0200
committerSebastian Andrzej Siewior <bigeasy@linutronix.de>2017-06-27 14:35:40 +0200
commit13eb647183b33a3096a69994dd6845b82ca34a6b (patch)
treeb7da0913246368f36b246dd1cf19b4a2c4327d1f
parentdf5eae12d19053a83e99debbb7b9319e9f0ef491 (diff)
download4.12-rt-patches-13eb647183b33a3096a69994dd6845b82ca34a6b.tar.gz
[ANNOUNCE] v4.11.7-rt3
Dear RT folks! I'm pleased to announce the v4.11.7-rt3 patch set. Changes since v4.11.7-rt2: - Clearing a swap-slot took a sleeping lock in a preempt-disable region. Fixed by dropping the preempt-disable region. - The capability check code on arm64 took a mutex in a atomic section. The backport of a few patches from upstream made this visible and now the fix for this has been also backported. - The removal of TASK_ALL in the last release uncovered a bug where we mixed normal wake ups and wake ups made for waiters of sleeping spinlock. Reported by Mike Galbraith. - Lock stealing for RTMutex wasn't working in v4.11. Reported and fixed by Mike Galbraith. - Code now compiles for RT + !CONFIG_POSIX_TIMERS. Reported by kbuild test robot. Known issues - CPU hotplug got a little better but can deadlock. The delta patch against v4.11.7-rt2 is appended below and can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.11/incr/patch-4.11.7-rt2-rt3.patch.xz You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v4.11.7-rt3 The RT patch against v4.11.7 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patch-4.11.7-rt3.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.7-rt3.tar.xz Sebastian diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -115,6 +115,7 @@ struct arm64_cpu_capabilities { extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS]; +extern struct static_key_false arm64_const_caps_ready; bool this_cpu_has_cap(unsigned int cap); @@ -124,7 +125,7 @@ static inline bool cpu_have_feature(unsigned int num) } /* System capability check for constant caps */ -static inline bool cpus_have_const_cap(int num) +static inline bool __cpus_have_const_cap(int num) { if (num >= ARM64_NCAPS) return false; @@ -138,6 +139,14 @@ static inline bool cpus_have_cap(unsigned int num) return test_bit(num, cpu_hwcaps); } +static inline bool cpus_have_const_cap(int num) +{ + if (static_branch_likely(&arm64_const_caps_ready)) + return __cpus_have_const_cap(num); + else + return cpus_have_cap(num); +} + static inline void cpus_set_cap(unsigned int num) { if (num >= ARM64_NCAPS) { @@ -145,7 +154,6 @@ static inline void cpus_set_cap(unsigned int num) num, ARM64_NCAPS); } else { __set_bit(num, cpu_hwcaps); - static_branch_enable(&cpu_hwcap_keys[num]); } } diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -24,6 +24,7 @@ #include <linux/types.h> #include <linux/kvm_types.h> +#include <asm/cpufeature.h> #include <asm/kvm.h> #include <asm/kvm_asm.h> #include <asm/kvm_mmio.h> @@ -356,9 +357,12 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr, unsigned long vector_ptr) { /* - * Call initialization code, and switch to the full blown - * HYP code. + * Call initialization code, and switch to the full blown HYP code. + * If the cpucaps haven't been finalized yet, something has gone very + * wrong, and hyp will crash and burn when it uses any + * cpus_have_const_cap() wrapper. */ + BUG_ON(!static_branch_likely(&arm64_const_caps_ready)); __kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr); } diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -975,8 +975,16 @@ void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps, */ void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps) { - for (; caps->matches; caps++) - if (caps->enable && cpus_have_cap(caps->capability)) + for (; caps->matches; caps++) { + unsigned int num = caps->capability; + + if (!cpus_have_cap(num)) + continue; + + /* Ensure cpus_have_const_cap(num) works */ + static_branch_enable(&cpu_hwcap_keys[num]); + + if (caps->enable) { /* * Use stop_machine() as it schedules the work allowing * us to modify PSTATE, instead of on_each_cpu() which @@ -984,6 +992,8 @@ void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps) * we return. */ stop_machine(caps->enable, NULL, cpu_online_mask); + } + } } /* @@ -1086,6 +1096,14 @@ static void __init setup_feature_capabilities(void) enable_cpu_capabilities(arm64_features); } +DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready); +EXPORT_SYMBOL(arm64_const_caps_ready); + +static void __init mark_const_caps_ready(void) +{ + static_branch_enable(&arm64_const_caps_ready); +} + /* * Check if the current CPU has a given feature capability. * Should be called from non-preemptible context. @@ -1112,6 +1130,7 @@ void __init setup_cpu_features(void) /* Set the CPU feature capabilies */ setup_feature_capabilities(); enable_errata_workarounds(); + mark_const_caps_ready(); setup_elf_hwcaps(arm64_elf_hwcaps); if (system_supports_32bit_el0()) diff --git a/include/linux/init_task.h b/include/linux/init_task.h --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -167,7 +167,7 @@ extern struct cred init_cred; # define INIT_PERF_EVENTS(tsk) #endif -#ifdef CONFIG_PREEMPT_RT_BASE +#if defined(CONFIG_POSIX_TIMERS) && defined(CONFIG_PREEMPT_RT_BASE) # define INIT_TIMER_LIST .posix_timer_list = NULL, #else # define INIT_TIMER_LIST diff --git a/include/linux/sched.h b/include/linux/sched.h --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -797,6 +797,7 @@ struct task_struct { raw_spinlock_t pi_lock; struct wake_q_node wake_q; + struct wake_q_node wake_q_sleeper; #ifdef CONFIG_RT_MUTEXES /* PI waiters blocked on a rt_mutex held by this task: */ diff --git a/include/linux/sched/wake_q.h b/include/linux/sched/wake_q.h --- a/include/linux/sched/wake_q.h +++ b/include/linux/sched/wake_q.h @@ -46,8 +46,20 @@ static inline void wake_q_init(struct wake_q_head *head) head->lastp = &head->first; } -extern void wake_q_add(struct wake_q_head *head, - struct task_struct *task); +extern void __wake_q_add(struct wake_q_head *head, + struct task_struct *task, bool sleeper); +static inline void wake_q_add(struct wake_q_head *head, + struct task_struct *task) +{ + __wake_q_add(head, task, false); +} + +static inline void wake_q_add_sleeper(struct wake_q_head *head, + struct task_struct *task) +{ + __wake_q_add(head, task, true); +} + extern void __wake_up_q(struct wake_q_head *head, bool sleeper); static inline void wake_up_q(struct wake_q_head *head) { diff --git a/kernel/fork.c b/kernel/fork.c --- a/kernel/fork.c +++ b/kernel/fork.c @@ -575,6 +575,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) tsk->splice_pipe = NULL; tsk->task_frag.page = NULL; tsk->wake_q.next = NULL; + tsk->wake_q_sleeper.next = NULL; account_kernel_stack(tsk, 1); diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -236,26 +236,19 @@ static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock, } #endif -#define STEAL_NORMAL 0 -#define STEAL_LATERAL 1 - /* * Only use with rt_mutex_waiter_{less,equal}() */ -#define task_to_waiter(p) \ - &(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = (p)->dl.deadline } +#define task_to_waiter(p) &(struct rt_mutex_waiter) \ + { .prio = (p)->prio, .deadline = (p)->dl.deadline, .task = (p) } static inline int rt_mutex_waiter_less(struct rt_mutex_waiter *left, - struct rt_mutex_waiter *right, int mode) + struct rt_mutex_waiter *right) { - if (mode == STEAL_NORMAL) { - if (left->prio < right->prio) - return 1; - } else { - if (left->prio <= right->prio) - return 1; - } + if (left->prio < right->prio) + return 1; + /* * If both waiters have dl_prio(), we check the deadlines of the * associated tasks. @@ -287,6 +280,27 @@ rt_mutex_waiter_equal(struct rt_mutex_waiter *left, return 1; } +#define STEAL_NORMAL 0 +#define STEAL_LATERAL 1 + +static inline int +rt_mutex_steal(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, int mode) +{ + struct rt_mutex_waiter *top_waiter = rt_mutex_top_waiter(lock); + + if (waiter == top_waiter || rt_mutex_waiter_less(waiter, top_waiter)) + return 1; + + /* + * Note that RT tasks are excluded from lateral-steals + * to prevent the introduction of an unbounded latency. + */ + if (mode == STEAL_NORMAL || rt_task(waiter->task)) + return 0; + + return rt_mutex_waiter_equal(waiter, top_waiter); +} + static void rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter) { @@ -298,7 +312,7 @@ rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter) while (*link) { parent = *link; entry = rb_entry(parent, struct rt_mutex_waiter, tree_entry); - if (rt_mutex_waiter_less(waiter, entry, STEAL_NORMAL)) { + if (rt_mutex_waiter_less(waiter, entry)) { link = &parent->rb_left; } else { link = &parent->rb_right; @@ -337,7 +351,7 @@ rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter) while (*link) { parent = *link; entry = rb_entry(parent, struct rt_mutex_waiter, pi_tree_entry); - if (rt_mutex_waiter_less(waiter, entry, STEAL_NORMAL)) { + if (rt_mutex_waiter_less(waiter, entry)) { link = &parent->rb_left; } else { link = &parent->rb_right; @@ -847,6 +861,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, * @task: The task which wants to acquire the lock * @waiter: The waiter that is queued to the lock's wait tree if the * callsite called task_blocked_on_lock(), otherwise NULL + * @mode: Lock steal mode (STEAL_NORMAL, STEAL_LATERAL) */ static int __try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, @@ -886,14 +901,11 @@ static int __try_to_take_rt_mutex(struct rt_mutex *lock, */ if (waiter) { /* - * If waiter is not the highest priority waiter of - * @lock, give up. + * If waiter is not the highest priority waiter of @lock, + * or its peer when lateral steal is allowed, give up. */ - if (waiter != rt_mutex_top_waiter(lock)) { - /* XXX rt_mutex_waiter_less() ? */ + if (!rt_mutex_steal(lock, waiter, mode)) return 0; - } - /* * We can acquire the lock. Remove the waiter from the * lock waiters tree. @@ -910,25 +922,12 @@ static int __try_to_take_rt_mutex(struct rt_mutex *lock, * not need to be dequeued. */ if (rt_mutex_has_waiters(lock)) { - struct task_struct *pown = rt_mutex_top_waiter(lock)->task; - - if (task != pown) - return 0; - /* - * Note that RT tasks are excluded from lateral-steals - * to prevent the introduction of an unbounded latency. + * If @task->prio is greater than the top waiter + * priority (kernel view), or equal to it when a + * lateral steal is forbidden, @task lost. */ - if (rt_task(task)) - mode = STEAL_NORMAL; - /* - * If @task->prio is greater than or equal to - * the top waiter priority (kernel view), - * @task lost. - */ - if (!rt_mutex_waiter_less(task_to_waiter(task), - rt_mutex_top_waiter(lock), - mode)) + if (!rt_mutex_steal(lock, task_to_waiter(task), mode)) return 0; /* * The current top waiter stays enqueued. We @@ -1507,7 +1506,7 @@ static void mark_wakeup_next_waiter(struct wake_q_head *wake_q, */ preempt_disable(); if (waiter->savestate) - wake_q_add(wake_sleeper_q, waiter->task); + wake_q_add_sleeper(wake_sleeper_q, waiter->task); else wake_q_add(wake_q, waiter->task); raw_spin_unlock(&current->pi_lock); diff --git a/kernel/sched/core.c b/kernel/sched/core.c --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -437,9 +437,15 @@ static bool set_nr_if_polling(struct task_struct *p) #endif #endif -void wake_q_add(struct wake_q_head *head, struct task_struct *task) +void __wake_q_add(struct wake_q_head *head, struct task_struct *task, + bool sleeper) { - struct wake_q_node *node = &task->wake_q; + struct wake_q_node *node; + + if (sleeper) + node = &task->wake_q_sleeper; + else + node = &task->wake_q; /* * Atomically grab the task, if ->wake_q is !nil already it means @@ -468,12 +474,17 @@ void __wake_up_q(struct wake_q_head *head, bool sleeper) while (node != WAKE_Q_TAIL) { struct task_struct *task; - task = container_of(node, struct task_struct, wake_q); + if (sleeper) + task = container_of(node, struct task_struct, wake_q_sleeper); + else + task = container_of(node, struct task_struct, wake_q); BUG_ON(!task); /* Task can safely be re-inserted now: */ node = node->next; - task->wake_q.next = NULL; - + if (sleeper) + task->wake_q_sleeper.next = NULL; + else + task->wake_q.next = NULL; /* * wake_up_process() implies a wmb() to pair with the queueing * in wake_q_add() so as not to miss wakeups. diff --git a/localversion-rt b/localversion-rt --- a/localversion-rt +++ b/localversion-rt @@ -1 +1 @@ --rt2 +-rt3 diff --git a/mm/swap_slots.c b/mm/swap_slots.c --- a/mm/swap_slots.c +++ b/mm/swap_slots.c @@ -267,11 +267,11 @@ int free_swap_slot(swp_entry_t entry) { struct swap_slots_cache *cache; - cache = &get_cpu_var(swp_slots); + cache = raw_cpu_ptr(&swp_slots); if (use_swap_slot_cache && cache->slots_ret) { spin_lock_irq(&cache->free_lock); /* Swap slots cache may be deactivated before acquiring lock */ - if (!use_swap_slot_cache) { + if (!use_swap_slot_cache || !cache->slots_ret) { spin_unlock_irq(&cache->free_lock); goto direct_free; } @@ -291,7 +291,6 @@ int free_swap_slot(swp_entry_t entry) direct_free: swapcache_free_entries(&entry, 1); } - put_cpu_var(swp_slots); return 0; } Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-rw-r--r--patches/arm64-cpufeature-don-t-use-mutex-in-bringup-path.patch169
-rw-r--r--patches/completion-use-simple-wait-queues.patch4
-rw-r--r--patches/cpu-rt-rework-cpu-down.patch6
-rw-r--r--patches/drmi915_Use_local_lockunlock_irq()_in_intel_pipe_update_startend().patch6
-rw-r--r--patches/futex-rtmutex-Cure-RT-double-blocking-issue.patch4
-rw-r--r--patches/genirq-update-irq_set_irqchip_state-documentation.patch2
-rw-r--r--patches/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch2
-rw-r--r--patches/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch2
-rw-r--r--patches/kernel-locking-use-an-exclusive-wait_q-for-sleeper.patch141
-rw-r--r--patches/localversion.patch2
-rw-r--r--patches/mm-rt-kmap-atomic-scheduling.patch2
-rw-r--r--patches/mm-swap-don-t-disable-preemption-while-taking-the-pe.patch45
-rw-r--r--patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch2
-rw-r--r--patches/net-wireless-warn-nort.patch2
-rw-r--r--patches/posix-timers-thread-posix-cpu-timers-on-rt.patch2
-rw-r--r--patches/preempt-lazy-support.patch22
-rw-r--r--patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch8
-rw-r--r--patches/rt-locking-Reenable-migration-accross-schedule.patch8
-rw-r--r--patches/rtmutex-Fix-lock-stealing-logic.patch161
-rw-r--r--patches/rtmutex-Provide-locked-slowpath.patch10
-rw-r--r--patches/rtmutex-Provide-rt_mutex_lock_state.patch6
-rw-r--r--patches/rtmutex-add-a-first-shot-of-ww_mutex.patch28
-rw-r--r--patches/series5
-rw-r--r--patches/workqueue-distangle-from-rq-lock.patch12
-rw-r--r--patches/workqueue-prevent-deadlock-stall.patch4
25 files changed, 588 insertions, 67 deletions
diff --git a/patches/arm64-cpufeature-don-t-use-mutex-in-bringup-path.patch b/patches/arm64-cpufeature-don-t-use-mutex-in-bringup-path.patch
new file mode 100644
index 00000000000000..ce4f8554592295
--- /dev/null
+++ b/patches/arm64-cpufeature-don-t-use-mutex-in-bringup-path.patch
@@ -0,0 +1,169 @@
+From: Mark Rutland <mark.rutland@arm.com>
+Date: Tue, 16 May 2017 15:18:05 +0100
+Subject: [PATCH] arm64/cpufeature: don't use mutex in bringup path
+
+Commit b2bb439ad99a1497daa392a527c0e52c69915ce9 upstream
+
+Currently, cpus_set_cap() calls static_branch_enable_cpuslocked(), which
+must take the jump_label mutex.
+
+We call cpus_set_cap() in the secondary bringup path, from the idle
+thread where interrupts are disabled. Taking a mutex in this path "is a
+NONO" regardless of whether it's contended, and something we must avoid.
+We didn't spot this until recently, as ___might_sleep() won't warn for
+this case until all CPUs have been brought up.
+
+This patch avoids taking the mutex in the secondary bringup path. The
+poking of static keys is deferred until enable_cpu_capabilities(), which
+runs in a suitable context on the boot CPU. To account for the static
+keys being set later, cpus_have_const_cap() is updated to use another
+static key to check whether the const cap keys have been initialised,
+falling back to the caps bitmap until this is the case.
+
+This means that users of cpus_have_const_cap() gain should only gain a
+single additional NOP in the fast path once the const caps are
+initialised, but should always see the current cap value.
+
+The hyp code should never dereference the caps array, since the caps are
+initialized before we run the module initcall to initialise hyp. A check
+is added to the hyp init code to document this requirement.
+
+This change will sidestep a number of issues when the upcoming hotplug
+locking rework is merged.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Reviewed-by: Marc Zyniger <marc.zyngier@arm.com>
+Reviewed-by: Suzuki Poulose <suzuki.poulose@arm.com>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Cc: Christoffer Dall <christoffer.dall@linaro.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Sebastian Sewior <bigeasy@linutronix.de>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/arm64/include/asm/cpufeature.h | 12 ++++++++++--
+ arch/arm64/include/asm/kvm_host.h | 8 ++++++--
+ arch/arm64/kernel/cpufeature.c | 23 +++++++++++++++++++++--
+ 3 files changed, 37 insertions(+), 6 deletions(-)
+
+--- a/arch/arm64/include/asm/cpufeature.h
++++ b/arch/arm64/include/asm/cpufeature.h
+@@ -115,6 +115,7 @@ struct arm64_cpu_capabilities {
+
+ extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
+ extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
++extern struct static_key_false arm64_const_caps_ready;
+
+ bool this_cpu_has_cap(unsigned int cap);
+
+@@ -124,7 +125,7 @@ static inline bool cpu_have_feature(unsi
+ }
+
+ /* System capability check for constant caps */
+-static inline bool cpus_have_const_cap(int num)
++static inline bool __cpus_have_const_cap(int num)
+ {
+ if (num >= ARM64_NCAPS)
+ return false;
+@@ -138,6 +139,14 @@ static inline bool cpus_have_cap(unsigne
+ return test_bit(num, cpu_hwcaps);
+ }
+
++static inline bool cpus_have_const_cap(int num)
++{
++ if (static_branch_likely(&arm64_const_caps_ready))
++ return __cpus_have_const_cap(num);
++ else
++ return cpus_have_cap(num);
++}
++
+ static inline void cpus_set_cap(unsigned int num)
+ {
+ if (num >= ARM64_NCAPS) {
+@@ -145,7 +154,6 @@ static inline void cpus_set_cap(unsigned
+ num, ARM64_NCAPS);
+ } else {
+ __set_bit(num, cpu_hwcaps);
+- static_branch_enable(&cpu_hwcap_keys[num]);
+ }
+ }
+
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -24,6 +24,7 @@
+
+ #include <linux/types.h>
+ #include <linux/kvm_types.h>
++#include <asm/cpufeature.h>
+ #include <asm/kvm.h>
+ #include <asm/kvm_asm.h>
+ #include <asm/kvm_mmio.h>
+@@ -356,9 +357,12 @@ static inline void __cpu_init_hyp_mode(p
+ unsigned long vector_ptr)
+ {
+ /*
+- * Call initialization code, and switch to the full blown
+- * HYP code.
++ * Call initialization code, and switch to the full blown HYP code.
++ * If the cpucaps haven't been finalized yet, something has gone very
++ * wrong, and hyp will crash and burn when it uses any
++ * cpus_have_const_cap() wrapper.
+ */
++ BUG_ON(!static_branch_likely(&arm64_const_caps_ready));
+ __kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr);
+ }
+
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -975,8 +975,16 @@ void update_cpu_capabilities(const struc
+ */
+ void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
+ {
+- for (; caps->matches; caps++)
+- if (caps->enable && cpus_have_cap(caps->capability))
++ for (; caps->matches; caps++) {
++ unsigned int num = caps->capability;
++
++ if (!cpus_have_cap(num))
++ continue;
++
++ /* Ensure cpus_have_const_cap(num) works */
++ static_branch_enable(&cpu_hwcap_keys[num]);
++
++ if (caps->enable) {
+ /*
+ * Use stop_machine() as it schedules the work allowing
+ * us to modify PSTATE, instead of on_each_cpu() which
+@@ -984,6 +992,8 @@ void __init enable_cpu_capabilities(cons
+ * we return.
+ */
+ stop_machine(caps->enable, NULL, cpu_online_mask);
++ }
++ }
+ }
+
+ /*
+@@ -1086,6 +1096,14 @@ static void __init setup_feature_capabil
+ enable_cpu_capabilities(arm64_features);
+ }
+
++DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready);
++EXPORT_SYMBOL(arm64_const_caps_ready);
++
++static void __init mark_const_caps_ready(void)
++{
++ static_branch_enable(&arm64_const_caps_ready);
++}
++
+ /*
+ * Check if the current CPU has a given feature capability.
+ * Should be called from non-preemptible context.
+@@ -1112,6 +1130,7 @@ void __init setup_cpu_features(void)
+ /* Set the CPU feature capabilies */
+ setup_feature_capabilities();
+ enable_errata_workarounds();
++ mark_const_caps_ready();
+ setup_elf_hwcaps(arm64_elf_hwcaps);
+
+ if (system_supports_32bit_el0())
diff --git a/patches/completion-use-simple-wait-queues.patch b/patches/completion-use-simple-wait-queues.patch
index d7aeb69a61db49..44e26d774c3864 100644
--- a/patches/completion-use-simple-wait-queues.patch
+++ b/patches/completion-use-simple-wait-queues.patch
@@ -276,7 +276,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
EXPORT_SYMBOL(completion_done);
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -7476,7 +7476,10 @@ void migrate_disable(void)
+@@ -7487,7 +7487,10 @@ void migrate_disable(void)
return;
}
#ifdef CONFIG_SCHED_DEBUG
@@ -288,7 +288,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
if (p->migrate_disable) {
-@@ -7509,7 +7512,10 @@ void migrate_enable(void)
+@@ -7520,7 +7523,10 @@ void migrate_enable(void)
}
#ifdef CONFIG_SCHED_DEBUG
diff --git a/patches/cpu-rt-rework-cpu-down.patch b/patches/cpu-rt-rework-cpu-down.patch
index 4b106ced444aa3..e68ab60434ac9f 100644
--- a/patches/cpu-rt-rework-cpu-down.patch
+++ b/patches/cpu-rt-rework-cpu-down.patch
@@ -56,7 +56,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1346,6 +1346,10 @@ extern int task_can_attach(struct task_s
+@@ -1347,6 +1347,10 @@ extern int task_can_attach(struct task_s
#ifdef CONFIG_SMP
extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
@@ -67,7 +67,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#else
static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
{
-@@ -1356,6 +1360,9 @@ static inline int set_cpus_allowed_ptr(s
+@@ -1357,6 +1361,9 @@ static inline int set_cpus_allowed_ptr(s
return -EINVAL;
return 0;
}
@@ -438,7 +438,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* interrupt affinities.
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -1111,6 +1111,84 @@ void do_set_cpus_allowed(struct task_str
+@@ -1122,6 +1122,84 @@ void do_set_cpus_allowed(struct task_str
__do_set_cpus_allowed_tail(p, new_mask);
}
diff --git a/patches/drmi915_Use_local_lockunlock_irq()_in_intel_pipe_update_startend().patch b/patches/drmi915_Use_local_lockunlock_irq()_in_intel_pipe_update_startend().patch
index c574a092c809e5..38a0ece74ccb29 100644
--- a/patches/drmi915_Use_local_lockunlock_irq()_in_intel_pipe_update_startend().patch
+++ b/patches/drmi915_Use_local_lockunlock_irq()_in_intel_pipe_update_startend().patch
@@ -78,7 +78,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* intel_pipe_update_start() - start update of a set of display registers
* @crtc: the crtc of which the registers are going to be updated
-@@ -95,7 +98,7 @@ void intel_pipe_update_start(struct inte
+@@ -98,7 +101,7 @@ void intel_pipe_update_start(struct inte
min = vblank_start - intel_usecs_to_scanlines(adjusted_mode, 100);
max = vblank_start - 1;
@@ -87,7 +87,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (min <= 0 || max <= 0)
return;
-@@ -125,11 +128,11 @@ void intel_pipe_update_start(struct inte
+@@ -128,11 +131,11 @@ void intel_pipe_update_start(struct inte
break;
}
@@ -101,7 +101,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
finish_wait(wq, &wait);
-@@ -181,7 +184,7 @@ void intel_pipe_update_end(struct intel_
+@@ -202,7 +205,7 @@ void intel_pipe_update_end(struct intel_
crtc->base.state->event = NULL;
}
diff --git a/patches/futex-rtmutex-Cure-RT-double-blocking-issue.patch b/patches/futex-rtmutex-Cure-RT-double-blocking-issue.patch
index 9ff5501968c559..3501f53fb95d25 100644
--- a/patches/futex-rtmutex-Cure-RT-double-blocking-issue.patch
+++ b/patches/futex-rtmutex-Cure-RT-double-blocking-issue.patch
@@ -26,7 +26,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -2408,6 +2408,7 @@ int rt_mutex_wait_proxy_lock(struct rt_m
+@@ -2407,6 +2407,7 @@ int rt_mutex_wait_proxy_lock(struct rt_m
struct hrtimer_sleeper *to,
struct rt_mutex_waiter *waiter)
{
@@ -34,7 +34,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int ret;
raw_spin_lock_irq(&lock->wait_lock);
-@@ -2419,6 +2420,24 @@ int rt_mutex_wait_proxy_lock(struct rt_m
+@@ -2418,6 +2419,24 @@ int rt_mutex_wait_proxy_lock(struct rt_m
* have to fix that up.
*/
fixup_rt_mutex_waiters(lock);
diff --git a/patches/genirq-update-irq_set_irqchip_state-documentation.patch b/patches/genirq-update-irq_set_irqchip_state-documentation.patch
index 59de23114e209d..75131c2118bba5 100644
--- a/patches/genirq-update-irq_set_irqchip_state-documentation.patch
+++ b/patches/genirq-update-irq_set_irqchip_state-documentation.patch
@@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
-@@ -2113,7 +2113,7 @@ EXPORT_SYMBOL_GPL(irq_get_irqchip_state)
+@@ -2115,7 +2115,7 @@ EXPORT_SYMBOL_GPL(irq_get_irqchip_state)
* This call sets the internal irqchip state of an interrupt,
* depending on the value of @which.
*
diff --git a/patches/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch b/patches/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch
index c32694faab8a26..18e5ffba5980df 100644
--- a/patches/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch
+++ b/patches/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch
@@ -18,7 +18,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
-@@ -12113,7 +12113,7 @@ void intel_check_page_flip(struct drm_i9
+@@ -12115,7 +12115,7 @@ void intel_check_page_flip(struct drm_i9
struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
struct intel_flip_work *work;
diff --git a/patches/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch b/patches/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch
index 5ba4ad58d1d16c..b56f6d991a5ebf 100644
--- a/patches/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch
+++ b/patches/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch
@@ -81,7 +81,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return ret;
}
-@@ -1340,6 +1348,9 @@ static int
+@@ -1342,6 +1350,9 @@ static int
irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
}
diff --git a/patches/kernel-locking-use-an-exclusive-wait_q-for-sleeper.patch b/patches/kernel-locking-use-an-exclusive-wait_q-for-sleeper.patch
new file mode 100644
index 00000000000000..3474123a12ac4c
--- /dev/null
+++ b/patches/kernel-locking-use-an-exclusive-wait_q-for-sleeper.patch
@@ -0,0 +1,141 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 22 Jun 2017 17:53:34 +0200
+Subject: [PATCH] kernel/locking: use an exclusive wait_q for sleepers
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+If a task is queued as a sleeper for a wakeup and never goes to
+schedule() (because it just obtained the lock) then it will receive a
+spurious wake up which is not "bad", it is considered. Until that wake
+up happens this task can no be enqueued for any wake ups handled by the
+WAKE_Q infrastructure (because a task can only be enqueued once). This
+wouldn't be bad if we would use the same wakeup mechanism for the wake
+up of sleepers as we do for "normal" wake ups. But we don't…
+
+So.
+ T1 T2 T3
+ spin_lock(x) spin_unlock(x);
+ wake_q_add_sleeper(q1, T1)
+ spin_unlock(x)
+ set_state(TASK_INTERRUPTIBLE)
+ if (!condition)
+ schedule()
+ condition = true
+ wake_q_add(q2, T1)
+ // T1 not added, still enqueued
+ wake_up_q(q2)
+ wake_up_q_sleeper(q1)
+ // T1 not woken up, wrong task state
+
+In order to solve this race this patch adds a wake_q_node for the
+sleeper case.
+
+Reported-by: Mike Galbraith <efault@gmx.de>
+Cc: stable-rt@vger.kernel.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/sched.h | 1 +
+ include/linux/sched/wake_q.h | 16 ++++++++++++++--
+ kernel/fork.c | 1 +
+ kernel/locking/rtmutex.c | 2 +-
+ kernel/sched/core.c | 21 ++++++++++++++++-----
+ 5 files changed, 33 insertions(+), 8 deletions(-)
+
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -800,6 +800,7 @@ struct task_struct {
+ raw_spinlock_t pi_lock;
+
+ struct wake_q_node wake_q;
++ struct wake_q_node wake_q_sleeper;
+
+ #ifdef CONFIG_RT_MUTEXES
+ /* PI waiters blocked on a rt_mutex held by this task: */
+--- a/include/linux/sched/wake_q.h
++++ b/include/linux/sched/wake_q.h
+@@ -46,8 +46,20 @@ static inline void wake_q_init(struct wa
+ head->lastp = &head->first;
+ }
+
+-extern void wake_q_add(struct wake_q_head *head,
+- struct task_struct *task);
++extern void __wake_q_add(struct wake_q_head *head,
++ struct task_struct *task, bool sleeper);
++static inline void wake_q_add(struct wake_q_head *head,
++ struct task_struct *task)
++{
++ __wake_q_add(head, task, false);
++}
++
++static inline void wake_q_add_sleeper(struct wake_q_head *head,
++ struct task_struct *task)
++{
++ __wake_q_add(head, task, true);
++}
++
+ extern void __wake_up_q(struct wake_q_head *head, bool sleeper);
+ static inline void wake_up_q(struct wake_q_head *head)
+ {
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -575,6 +575,7 @@ static struct task_struct *dup_task_stru
+ tsk->splice_pipe = NULL;
+ tsk->task_frag.page = NULL;
+ tsk->wake_q.next = NULL;
++ tsk->wake_q_sleeper.next = NULL;
+
+ account_kernel_stack(tsk, 1);
+
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -1463,7 +1463,7 @@ static void mark_wakeup_next_waiter(stru
+ */
+ preempt_disable();
+ if (waiter->savestate)
+- wake_q_add(wake_sleeper_q, waiter->task);
++ wake_q_add_sleeper(wake_sleeper_q, waiter->task);
+ else
+ wake_q_add(wake_q, waiter->task);
+ raw_spin_unlock(&current->pi_lock);
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -437,9 +437,15 @@ static bool set_nr_if_polling(struct tas
+ #endif
+ #endif
+
+-void wake_q_add(struct wake_q_head *head, struct task_struct *task)
++void __wake_q_add(struct wake_q_head *head, struct task_struct *task,
++ bool sleeper)
+ {
+- struct wake_q_node *node = &task->wake_q;
++ struct wake_q_node *node;
++
++ if (sleeper)
++ node = &task->wake_q_sleeper;
++ else
++ node = &task->wake_q;
+
+ /*
+ * Atomically grab the task, if ->wake_q is !nil already it means
+@@ -468,12 +474,17 @@ void __wake_up_q(struct wake_q_head *hea
+ while (node != WAKE_Q_TAIL) {
+ struct task_struct *task;
+
+- task = container_of(node, struct task_struct, wake_q);
++ if (sleeper)
++ task = container_of(node, struct task_struct, wake_q_sleeper);
++ else
++ task = container_of(node, struct task_struct, wake_q);
+ BUG_ON(!task);
+ /* Task can safely be re-inserted now: */
+ node = node->next;
+- task->wake_q.next = NULL;
+-
++ if (sleeper)
++ task->wake_q_sleeper.next = NULL;
++ else
++ task->wake_q.next = NULL;
+ /*
+ * wake_up_process() implies a wmb() to pair with the queueing
+ * in wake_q_add() so as not to miss wakeups.
diff --git a/patches/localversion.patch b/patches/localversion.patch
index 279489a1d1455d..e36eb4b6666a7e 100644
--- a/patches/localversion.patch
+++ b/patches/localversion.patch
@@ -10,4 +10,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt2
++-rt3
diff --git a/patches/mm-rt-kmap-atomic-scheduling.patch b/patches/mm-rt-kmap-atomic-scheduling.patch
index e6825847fe6258..9748516d525fb7 100644
--- a/patches/mm-rt-kmap-atomic-scheduling.patch
+++ b/patches/mm-rt-kmap-atomic-scheduling.patch
@@ -229,7 +229,7 @@ Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins
/* task_struct member predeclarations (sorted alphabetically): */
struct audit_context;
-@@ -1062,6 +1063,12 @@ struct task_struct {
+@@ -1063,6 +1064,12 @@ struct task_struct {
int softirq_nestcnt;
unsigned int softirqs_raised;
#endif
diff --git a/patches/mm-swap-don-t-disable-preemption-while-taking-the-pe.patch b/patches/mm-swap-don-t-disable-preemption-while-taking-the-pe.patch
new file mode 100644
index 00000000000000..e61ed54e3cf6be
--- /dev/null
+++ b/patches/mm-swap-don-t-disable-preemption-while-taking-the-pe.patch
@@ -0,0 +1,45 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 23 Jun 2017 11:43:30 +0200
+Subject: [PATCH] mm, swap: don't disable preemption while taking the per-CPU
+ cache
+
+get_cpu_var() disables preemption and returns the per-CPU version of the
+variable. Disabling preemption is useful to ensure atomic access to the
+variable within the critical section.
+In this case however, after the per-CPU version of the variable is
+obtained the ->free_lock is acquired. For that reason it seems the raw
+accessor could be used. It only seems that ->slots_ret should be
+retested (because with disabled preemption this variable can not be set
+to NULL otherwise).
+This popped up during PREEMPT-RT testing because it tries to take
+spinlocks in a preempt disabled section.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ mm/swap_slots.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/mm/swap_slots.c
++++ b/mm/swap_slots.c
+@@ -267,11 +267,11 @@ int free_swap_slot(swp_entry_t entry)
+ {
+ struct swap_slots_cache *cache;
+
+- cache = &get_cpu_var(swp_slots);
++ cache = raw_cpu_ptr(&swp_slots);
+ if (use_swap_slot_cache && cache->slots_ret) {
+ spin_lock_irq(&cache->free_lock);
+ /* Swap slots cache may be deactivated before acquiring lock */
+- if (!use_swap_slot_cache) {
++ if (!use_swap_slot_cache || !cache->slots_ret) {
+ spin_unlock_irq(&cache->free_lock);
+ goto direct_free;
+ }
+@@ -291,7 +291,6 @@ int free_swap_slot(swp_entry_t entry)
+ direct_free:
+ swapcache_free_entries(&entry, 1);
+ }
+- put_cpu_var(swp_slots);
+
+ return 0;
+ }
diff --git a/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch b/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch
index 7b8f4e800c5078..762f1bfdc96045 100644
--- a/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch
+++ b/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch
@@ -80,7 +80,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1065,6 +1065,9 @@ struct task_struct {
+@@ -1066,6 +1066,9 @@ struct task_struct {
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
unsigned long task_state_change;
#endif
diff --git a/patches/net-wireless-warn-nort.patch b/patches/net-wireless-warn-nort.patch
index 603bf54f690465..782144a7938041 100644
--- a/patches/net-wireless-warn-nort.patch
+++ b/patches/net-wireless-warn-nort.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
-@@ -4224,7 +4224,7 @@ void ieee80211_rx_napi(struct ieee80211_
+@@ -4229,7 +4229,7 @@ void ieee80211_rx_napi(struct ieee80211_
struct ieee80211_supported_band *sband;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
diff --git a/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch b/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch
index 284179e10cb21f..20226ae7032ffa 100644
--- a/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch
+++ b/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch
@@ -23,7 +23,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
# define INIT_PERF_EVENTS(tsk)
#endif
-+#ifdef CONFIG_PREEMPT_RT_BASE
++#if defined(CONFIG_POSIX_TIMERS) && defined(CONFIG_PREEMPT_RT_BASE)
+# define INIT_TIMER_LIST .posix_timer_list = NULL,
+#else
+# define INIT_TIMER_LIST
diff --git a/patches/preempt-lazy-support.patch b/patches/preempt-lazy-support.patch
index fe5c3b0c154145..0492f4aadda50e 100644
--- a/patches/preempt-lazy-support.patch
+++ b/patches/preempt-lazy-support.patch
@@ -140,7 +140,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1513,6 +1513,44 @@ static inline int test_tsk_need_resched(
+@@ -1514,6 +1514,44 @@ static inline int test_tsk_need_resched(
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
@@ -233,7 +233,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
default PREEMPT_NONE
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -517,6 +517,48 @@ void resched_curr(struct rq *rq)
+@@ -528,6 +528,48 @@ void resched_curr(struct rq *rq)
trace_sched_wake_idle_without_ipi(cpu);
}
@@ -282,7 +282,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void resched_cpu(int cpu)
{
struct rq *rq = cpu_rq(cpu);
-@@ -2525,6 +2567,9 @@ int sched_fork(unsigned long clone_flags
+@@ -2536,6 +2578,9 @@ int sched_fork(unsigned long clone_flags
p->on_cpu = 0;
#endif
init_task_preempt_count(p);
@@ -292,7 +292,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_SMP
plist_node_init(&p->pushable_tasks, MAX_PRIO);
RB_CLEAR_NODE(&p->pushable_dl_tasks);
-@@ -3516,6 +3561,7 @@ static void __sched notrace __schedule(b
+@@ -3527,6 +3572,7 @@ static void __sched notrace __schedule(b
next = pick_next_task(rq, prev, &rf);
clear_tsk_need_resched(prev);
@@ -300,7 +300,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
clear_preempt_need_resched();
if (likely(prev != next)) {
-@@ -3667,6 +3713,30 @@ static void __sched notrace preempt_sche
+@@ -3678,6 +3724,30 @@ static void __sched notrace preempt_sche
} while (need_resched());
}
@@ -331,7 +331,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_PREEMPT
/*
* this is the entry point to schedule() from in-kernel preemption
-@@ -3681,7 +3751,8 @@ asmlinkage __visible void __sched notrac
+@@ -3692,7 +3762,8 @@ asmlinkage __visible void __sched notrac
*/
if (likely(!preemptible()))
return;
@@ -341,7 +341,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
preempt_schedule_common();
}
NOKPROBE_SYMBOL(preempt_schedule);
-@@ -3708,6 +3779,9 @@ asmlinkage __visible void __sched notrac
+@@ -3719,6 +3790,9 @@ asmlinkage __visible void __sched notrac
if (likely(!preemptible()))
return;
@@ -351,7 +351,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
do {
/*
* Because the function tracer can trace preempt_count_sub()
-@@ -5537,7 +5611,9 @@ void init_idle(struct task_struct *idle,
+@@ -5548,7 +5622,9 @@ void init_idle(struct task_struct *idle,
/* Set the preempt count _outside_ the spinlocks! */
init_idle_preempt_count(idle, cpu);
@@ -362,7 +362,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* The idle tasks have their own, simple scheduling class:
*/
-@@ -7512,6 +7588,7 @@ void migrate_disable(void)
+@@ -7523,6 +7599,7 @@ void migrate_disable(void)
/* get_online_cpus(); */
preempt_disable();
@@ -370,7 +370,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pin_current_cpu();
p->migrate_disable = 1;
-@@ -7581,6 +7658,7 @@ void migrate_enable(void)
+@@ -7592,6 +7669,7 @@ void migrate_enable(void)
arg.dest_cpu = dest_cpu;
unpin_current_cpu();
@@ -378,7 +378,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
preempt_enable();
stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
tlb_migrate_finish(p->mm);
-@@ -7591,6 +7669,7 @@ void migrate_enable(void)
+@@ -7602,6 +7680,7 @@ void migrate_enable(void)
}
unpin_current_cpu();
/* put_online_cpus(); */
diff --git a/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch b/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
index c512ed2bfbe370..35ae8a1dcc809f 100644
--- a/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
+++ b/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
@@ -43,7 +43,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#define task_contributes_to_load(task) ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
(task->flags & PF_FROZEN) == 0 && \
(task->state & TASK_NOLOAD) == 0)
-@@ -1500,6 +1496,51 @@ static inline int test_tsk_need_resched(
+@@ -1501,6 +1497,51 @@ static inline int test_tsk_need_resched(
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
@@ -115,7 +115,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
spin_unlock_irq(&task->sighand->siglock);
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -1363,6 +1363,18 @@ int migrate_swap(struct task_struct *cur
+@@ -1374,6 +1374,18 @@ int migrate_swap(struct task_struct *cur
return ret;
}
@@ -134,7 +134,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* wait_task_inactive - wait for a thread to unschedule.
*
-@@ -1407,7 +1419,7 @@ unsigned long wait_task_inactive(struct
+@@ -1418,7 +1430,7 @@ unsigned long wait_task_inactive(struct
* is actually now running somewhere else!
*/
while (task_running(rq, p)) {
@@ -143,7 +143,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 0;
cpu_relax();
}
-@@ -1422,7 +1434,8 @@ unsigned long wait_task_inactive(struct
+@@ -1433,7 +1445,8 @@ unsigned long wait_task_inactive(struct
running = task_running(rq, p);
queued = task_on_rq_queued(p);
ncsw = 0;
diff --git a/patches/rt-locking-Reenable-migration-accross-schedule.patch b/patches/rt-locking-Reenable-migration-accross-schedule.patch
index 3fb05c261c1c6a..7a5f17a30c8b5f 100644
--- a/patches/rt-locking-Reenable-migration-accross-schedule.patch
+++ b/patches/rt-locking-Reenable-migration-accross-schedule.patch
@@ -18,7 +18,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -981,14 +981,19 @@ static int __try_to_take_rt_mutex(struct
+@@ -980,14 +980,19 @@ static int __try_to_take_rt_mutex(struct
* preemptible spin_lock functions:
*/
static inline void rt_spin_lock_fastlock(struct rt_mutex *lock,
@@ -40,7 +40,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static inline void rt_spin_lock_fastunlock(struct rt_mutex *lock,
-@@ -1046,7 +1051,8 @@ static int task_blocks_on_rt_mutex(struc
+@@ -1045,7 +1050,8 @@ static int task_blocks_on_rt_mutex(struc
* We store the current state under p->pi_lock in p->saved_state and
* the try_to_wake_up() code handles this accordingly.
*/
@@ -50,7 +50,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
struct task_struct *lock_owner, *self = current;
struct rt_mutex_waiter waiter, *top_waiter;
-@@ -1090,8 +1096,13 @@ static void noinline __sched rt_spin_lo
+@@ -1089,8 +1095,13 @@ static void noinline __sched rt_spin_lo
debug_rt_mutex_print_deadlock(&waiter);
@@ -65,7 +65,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
raw_spin_lock_irqsave(&lock->wait_lock, flags);
-@@ -1149,38 +1160,35 @@ static void noinline __sched rt_spin_lo
+@@ -1148,38 +1159,35 @@ static void noinline __sched rt_spin_lo
void __lockfunc rt_spin_lock__no_mg(spinlock_t *lock)
{
diff --git a/patches/rtmutex-Fix-lock-stealing-logic.patch b/patches/rtmutex-Fix-lock-stealing-logic.patch
new file mode 100644
index 00000000000000..6321ecf740d8dd
--- /dev/null
+++ b/patches/rtmutex-Fix-lock-stealing-logic.patch
@@ -0,0 +1,161 @@
+From: Mike Galbraith <efault@gmx.de>
+Date: Fri, 23 Jun 2017 09:37:14 +0200
+Subject: rtmutex: Fix lock stealing logic
+
+1. When trying to acquire an rtmutex, we first try to grab it without
+queueing the waiter, and explicitly check for that initial attempt
+in the !waiter path of __try_to_take_rt_mutex(). Checking whether
+the lock taker is top waiter before allowing a steal attempt in that
+path is a thinko: the lock taker has not yet blocked.
+
+2. It seems wrong to change the definition of rt_mutex_waiter_less()
+to mean less or perhaps equal when we have an rt_mutex_waiter_equal().
+
+Remove the thinko, restore rt_mutex_waiter_less(), implement and use
+rt_mutex_steal() based upon rt_mutex_waiter_less/equal(), moving all
+qualification criteria into the function itself.
+
+Reviewed-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Mike Galbraith <efault@gmx.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/locking/rtmutex.c | 75 +++++++++++++++++++++++------------------------
+ 1 file changed, 37 insertions(+), 38 deletions(-)
+
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -235,26 +235,19 @@ static inline bool unlock_rt_mutex_safe(
+ }
+ #endif
+
+-#define STEAL_NORMAL 0
+-#define STEAL_LATERAL 1
+-
+ /*
+ * Only use with rt_mutex_waiter_{less,equal}()
+ */
+-#define task_to_waiter(p) \
+- &(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = (p)->dl.deadline }
++#define task_to_waiter(p) &(struct rt_mutex_waiter) \
++ { .prio = (p)->prio, .deadline = (p)->dl.deadline, .task = (p) }
+
+ static inline int
+ rt_mutex_waiter_less(struct rt_mutex_waiter *left,
+- struct rt_mutex_waiter *right, int mode)
++ struct rt_mutex_waiter *right)
+ {
+- if (mode == STEAL_NORMAL) {
+- if (left->prio < right->prio)
+- return 1;
+- } else {
+- if (left->prio <= right->prio)
+- return 1;
+- }
++ if (left->prio < right->prio)
++ return 1;
++
+ /*
+ * If both waiters have dl_prio(), we check the deadlines of the
+ * associated tasks.
+@@ -286,6 +279,27 @@ rt_mutex_waiter_equal(struct rt_mutex_wa
+ return 1;
+ }
+
++#define STEAL_NORMAL 0
++#define STEAL_LATERAL 1
++
++static inline int
++rt_mutex_steal(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, int mode)
++{
++ struct rt_mutex_waiter *top_waiter = rt_mutex_top_waiter(lock);
++
++ if (waiter == top_waiter || rt_mutex_waiter_less(waiter, top_waiter))
++ return 1;
++
++ /*
++ * Note that RT tasks are excluded from lateral-steals
++ * to prevent the introduction of an unbounded latency.
++ */
++ if (mode == STEAL_NORMAL || rt_task(waiter->task))
++ return 0;
++
++ return rt_mutex_waiter_equal(waiter, top_waiter);
++}
++
+ static void
+ rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
+ {
+@@ -297,7 +311,7 @@ rt_mutex_enqueue(struct rt_mutex *lock,
+ while (*link) {
+ parent = *link;
+ entry = rb_entry(parent, struct rt_mutex_waiter, tree_entry);
+- if (rt_mutex_waiter_less(waiter, entry, STEAL_NORMAL)) {
++ if (rt_mutex_waiter_less(waiter, entry)) {
+ link = &parent->rb_left;
+ } else {
+ link = &parent->rb_right;
+@@ -336,7 +350,7 @@ rt_mutex_enqueue_pi(struct task_struct *
+ while (*link) {
+ parent = *link;
+ entry = rb_entry(parent, struct rt_mutex_waiter, pi_tree_entry);
+- if (rt_mutex_waiter_less(waiter, entry, STEAL_NORMAL)) {
++ if (rt_mutex_waiter_less(waiter, entry)) {
+ link = &parent->rb_left;
+ } else {
+ link = &parent->rb_right;
+@@ -846,6 +860,7 @@ static int rt_mutex_adjust_prio_chain(st
+ * @task: The task which wants to acquire the lock
+ * @waiter: The waiter that is queued to the lock's wait tree if the
+ * callsite called task_blocked_on_lock(), otherwise NULL
++ * @mode: Lock steal mode (STEAL_NORMAL, STEAL_LATERAL)
+ */
+ static int __try_to_take_rt_mutex(struct rt_mutex *lock,
+ struct task_struct *task,
+@@ -885,14 +900,11 @@ static int __try_to_take_rt_mutex(struct
+ */
+ if (waiter) {
+ /*
+- * If waiter is not the highest priority waiter of
+- * @lock, give up.
++ * If waiter is not the highest priority waiter of @lock,
++ * or its peer when lateral steal is allowed, give up.
+ */
+- if (waiter != rt_mutex_top_waiter(lock)) {
+- /* XXX rt_mutex_waiter_less() ? */
++ if (!rt_mutex_steal(lock, waiter, mode))
+ return 0;
+- }
+-
+ /*
+ * We can acquire the lock. Remove the waiter from the
+ * lock waiters tree.
+@@ -909,25 +921,12 @@ static int __try_to_take_rt_mutex(struct
+ * not need to be dequeued.
+ */
+ if (rt_mutex_has_waiters(lock)) {
+- struct task_struct *pown = rt_mutex_top_waiter(lock)->task;
+-
+- if (task != pown)
+- return 0;
+-
+- /*
+- * Note that RT tasks are excluded from lateral-steals
+- * to prevent the introduction of an unbounded latency.
+- */
+- if (rt_task(task))
+- mode = STEAL_NORMAL;
+ /*
+- * If @task->prio is greater than or equal to
+- * the top waiter priority (kernel view),
+- * @task lost.
++ * If @task->prio is greater than the top waiter
++ * priority (kernel view), or equal to it when a
++ * lateral steal is forbidden, @task lost.
+ */
+- if (!rt_mutex_waiter_less(task_to_waiter(task),
+- rt_mutex_top_waiter(lock),
+- mode))
++ if (!rt_mutex_steal(lock, task_to_waiter(task), mode))
+ return 0;
+ /*
+ * The current top waiter stays enqueued. We
diff --git a/patches/rtmutex-Provide-locked-slowpath.patch b/patches/rtmutex-Provide-locked-slowpath.patch
index c86e9e1b0a7e75..02c417da3c7f4e 100644
--- a/patches/rtmutex-Provide-locked-slowpath.patch
+++ b/patches/rtmutex-Provide-locked-slowpath.patch
@@ -18,7 +18,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -1752,30 +1752,13 @@ static void ww_mutex_account_lock(struct
+@@ -1751,30 +1751,13 @@ static void ww_mutex_account_lock(struct
}
#endif
@@ -55,7 +55,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_PREEMPT_RT_FULL
if (ww_ctx) {
-@@ -1791,7 +1774,6 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+@@ -1790,7 +1773,6 @@ rt_mutex_slowlock(struct rt_mutex *lock,
if (try_to_take_rt_mutex(lock, current, NULL)) {
if (ww_ctx)
ww_mutex_account_lock(lock, ww_ctx);
@@ -63,7 +63,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 0;
}
-@@ -1801,13 +1783,13 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+@@ -1800,13 +1782,13 @@ rt_mutex_slowlock(struct rt_mutex *lock,
if (unlikely(timeout))
hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
@@ -81,7 +81,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* ww_mutex received EDEADLK, let it become EALREADY */
ret = __mutex_lock_check_stamp(lock, ww_ctx);
BUG_ON(!ret);
-@@ -1816,10 +1798,10 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+@@ -1815,10 +1797,10 @@ rt_mutex_slowlock(struct rt_mutex *lock,
if (unlikely(ret)) {
__set_current_state(TASK_RUNNING);
if (rt_mutex_has_waiters(lock))
@@ -94,7 +94,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
} else if (ww_ctx) {
ww_mutex_account_lock(lock, ww_ctx);
}
-@@ -1829,6 +1811,36 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+@@ -1828,6 +1810,36 @@ rt_mutex_slowlock(struct rt_mutex *lock,
* unconditionally. We might have to fix that up.
*/
fixup_rt_mutex_waiters(lock);
diff --git a/patches/rtmutex-Provide-rt_mutex_lock_state.patch b/patches/rtmutex-Provide-rt_mutex_lock_state.patch
index 931c591523aaf7..3be875875aa790 100644
--- a/patches/rtmutex-Provide-rt_mutex_lock_state.patch
+++ b/patches/rtmutex-Provide-rt_mutex_lock_state.patch
@@ -24,7 +24,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
extern int rt_mutex_timed_lock(struct rt_mutex *lock,
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -2020,21 +2020,32 @@ rt_mutex_fastunlock(struct rt_mutex *loc
+@@ -2019,21 +2019,32 @@ rt_mutex_fastunlock(struct rt_mutex *loc
}
/**
@@ -61,7 +61,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* @lock: the rt_mutex to be locked
*
* Returns:
-@@ -2043,20 +2054,10 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock);
+@@ -2042,20 +2053,10 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock);
*/
int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
{
@@ -83,7 +83,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
* rt_mutex_lock_killable - lock a rt_mutex killable
*
-@@ -2066,16 +2067,21 @@ int __sched rt_mutex_futex_trylock(struc
+@@ -2065,16 +2066,21 @@ int __sched rt_mutex_futex_trylock(struc
* Returns:
* 0 on success
* -EINTR when interrupted by a signal
diff --git a/patches/rtmutex-add-a-first-shot-of-ww_mutex.patch b/patches/rtmutex-add-a-first-shot-of-ww_mutex.patch
index 4a16d5e24ba4d4..ea217e46dcefe3 100644
--- a/patches/rtmutex-add-a-first-shot-of-ww_mutex.patch
+++ b/patches/rtmutex-add-a-first-shot-of-ww_mutex.patch
@@ -35,7 +35,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
#include "rtmutex_common.h"
-@@ -1286,8 +1287,8 @@ int atomic_dec_and_spin_lock(atomic_t *a
+@@ -1285,8 +1286,8 @@ int atomic_dec_and_spin_lock(atomic_t *a
}
EXPORT_SYMBOL(atomic_dec_and_spin_lock);
@@ -46,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/*
-@@ -1301,6 +1302,40 @@ EXPORT_SYMBOL(__rt_spin_lock_init);
+@@ -1300,6 +1301,40 @@ EXPORT_SYMBOL(__rt_spin_lock_init);
#endif /* PREEMPT_RT_FULL */
@@ -87,7 +87,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
static inline int
try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
struct rt_mutex_waiter *waiter)
-@@ -1581,7 +1616,8 @@ void rt_mutex_init_waiter(struct rt_mute
+@@ -1580,7 +1615,8 @@ void rt_mutex_init_waiter(struct rt_mute
static int __sched
__rt_mutex_slowlock(struct rt_mutex *lock, int state,
struct hrtimer_sleeper *timeout,
@@ -97,7 +97,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
{
int ret = 0;
-@@ -1599,6 +1635,12 @@ static int __sched
+@@ -1598,6 +1634,12 @@ static int __sched
break;
}
@@ -110,7 +110,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
raw_spin_unlock_irq(&lock->wait_lock);
debug_rt_mutex_print_deadlock(waiter);
-@@ -1633,13 +1675,91 @@ static void rt_mutex_handle_deadlock(int
+@@ -1632,13 +1674,91 @@ static void rt_mutex_handle_deadlock(int
}
}
@@ -203,7 +203,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
{
struct rt_mutex_waiter waiter;
unsigned long flags;
-@@ -1657,8 +1777,20 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+@@ -1656,8 +1776,20 @@ rt_mutex_slowlock(struct rt_mutex *lock,
*/
raw_spin_lock_irqsave(&lock->wait_lock, flags);
@@ -224,7 +224,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
return 0;
}
-@@ -1673,13 +1805,23 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+@@ -1672,13 +1804,23 @@ rt_mutex_slowlock(struct rt_mutex *lock,
if (likely(!ret))
/* sleep on the mutex */
@@ -250,7 +250,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
}
/*
-@@ -1809,29 +1951,33 @@ static bool __sched rt_mutex_slowunlock(
+@@ -1808,29 +1950,33 @@ static bool __sched rt_mutex_slowunlock(
*/
static inline int
rt_mutex_fastlock(struct rt_mutex *lock, int state,
@@ -288,7 +288,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
}
static inline int
-@@ -1882,7 +2028,7 @@ void __sched rt_mutex_lock(struct rt_mut
+@@ -1881,7 +2027,7 @@ void __sched rt_mutex_lock(struct rt_mut
{
might_sleep();
@@ -297,7 +297,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
}
EXPORT_SYMBOL_GPL(rt_mutex_lock);
-@@ -1899,7 +2045,7 @@ int __sched rt_mutex_lock_interruptible(
+@@ -1898,7 +2044,7 @@ int __sched rt_mutex_lock_interruptible(
{
might_sleep();
@@ -306,7 +306,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
}
EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
-@@ -1926,7 +2072,7 @@ int __sched rt_mutex_lock_killable(struc
+@@ -1925,7 +2071,7 @@ int __sched rt_mutex_lock_killable(struc
{
might_sleep();
@@ -315,7 +315,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
}
EXPORT_SYMBOL_GPL(rt_mutex_lock_killable);
-@@ -1950,6 +2096,7 @@ rt_mutex_timed_lock(struct rt_mutex *loc
+@@ -1949,6 +2095,7 @@ rt_mutex_timed_lock(struct rt_mutex *loc
return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
RT_MUTEX_MIN_CHAINWALK,
@@ -323,7 +323,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
rt_mutex_slowlock);
}
EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
-@@ -2248,7 +2395,7 @@ int rt_mutex_wait_proxy_lock(struct rt_m
+@@ -2247,7 +2394,7 @@ int rt_mutex_wait_proxy_lock(struct rt_m
raw_spin_lock_irq(&lock->wait_lock);
/* sleep on the mutex */
set_current_state(TASK_INTERRUPTIBLE);
@@ -332,7 +332,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
/*
* try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
* have to fix that up.
-@@ -2315,24 +2462,98 @@ bool rt_mutex_cleanup_proxy_lock(struct
+@@ -2314,24 +2461,98 @@ bool rt_mutex_cleanup_proxy_lock(struct
return cleanup;
}
diff --git a/patches/series b/patches/series
index 79054834412864..6fd16afd3a7d89 100644
--- a/patches/series
+++ b/patches/series
@@ -46,6 +46,8 @@ lockdep-Fix-per-cpu-static-objects.patch
0004-MAINTAINERS-Add-FUTEX-SUBSYSTEM.patch
futex-rt_mutex-Fix-rt_mutex_cleanup_proxy_lock.patch
+arm64-cpufeature-don-t-use-mutex-in-bringup-path.patch
+
###
# get_online_cpus() rework.
# cpus_allowed queue from sched/core
@@ -135,6 +137,7 @@ fs-dcache-init-in_lookup_hashtable.patch
iommu-iova-don-t-disable-preempt-around-this_cpu_ptr.patch
iommu-vt-d-don-t-disable-preemption-while-accessing-.patch
rxrpc-remove-unused-static-variables.patch
+mm-swap-don-t-disable-preemption-while-taking-the-pe.patch
# Wants a different fix for upstream
NFSv4-replace-seqcount_t-with-a-seqlock_t.patch
@@ -431,6 +434,8 @@ spinlock-types-separate-raw.patch
rtmutex-avoid-include-hell.patch
rtmutex_dont_include_rcu.patch
rt-add-rt-locks.patch
+rtmutex-Fix-lock-stealing-logic.patch
+kernel-locking-use-an-exclusive-wait_q-for-sleeper.patch
rtmutex-add-a-first-shot-of-ww_mutex.patch
rtmutex-Provide-rt_mutex_lock_state.patch
rtmutex-Provide-locked-slowpath.patch
diff --git a/patches/workqueue-distangle-from-rq-lock.patch b/patches/workqueue-distangle-from-rq-lock.patch
index ec7be2fdb22b12..9b2ab733b968a6 100644
--- a/patches/workqueue-distangle-from-rq-lock.patch
+++ b/patches/workqueue-distangle-from-rq-lock.patch
@@ -31,7 +31,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -1690,10 +1690,6 @@ static inline void ttwu_activate(struct
+@@ -1701,10 +1701,6 @@ static inline void ttwu_activate(struct
{
activate_task(rq, p, en_flags);
p->on_rq = TASK_ON_RQ_QUEUED;
@@ -42,7 +42,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -2146,58 +2142,6 @@ try_to_wake_up(struct task_struct *p, un
+@@ -2157,58 +2153,6 @@ try_to_wake_up(struct task_struct *p, un
}
/**
@@ -101,7 +101,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* wake_up_process - Wake up a specific process
* @p: The process to be woken up.
*
-@@ -3485,21 +3429,6 @@ static void __sched notrace __schedule(b
+@@ -3496,21 +3440,6 @@ static void __sched notrace __schedule(b
atomic_inc(&rq->nr_iowait);
delayacct_blkio_start();
}
@@ -123,7 +123,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
switch_count = &prev->nvcsw;
}
-@@ -3564,6 +3493,14 @@ static inline void sched_submit_work(str
+@@ -3575,6 +3504,14 @@ static inline void sched_submit_work(str
{
if (!tsk->state || tsk_is_pi_blocked(tsk))
return;
@@ -138,7 +138,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* If we are going to sleep and we have plugged IO queued,
* make sure to submit it to avoid deadlocks.
-@@ -3572,6 +3509,12 @@ static inline void sched_submit_work(str
+@@ -3583,6 +3520,12 @@ static inline void sched_submit_work(str
blk_schedule_flush_plug(tsk);
}
@@ -151,7 +151,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
asmlinkage __visible void __sched schedule(void)
{
struct task_struct *tsk = current;
-@@ -3582,6 +3525,7 @@ asmlinkage __visible void __sched schedu
+@@ -3593,6 +3536,7 @@ asmlinkage __visible void __sched schedu
__schedule(false);
sched_preempt_enable_no_resched();
} while (need_resched());
diff --git a/patches/workqueue-prevent-deadlock-stall.patch b/patches/workqueue-prevent-deadlock-stall.patch
index 7b7d866735e35c..fc3edf8f21ba75 100644
--- a/patches/workqueue-prevent-deadlock-stall.patch
+++ b/patches/workqueue-prevent-deadlock-stall.patch
@@ -43,7 +43,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3615,9 +3615,8 @@ void __noreturn do_task_dead(void)
+@@ -3626,9 +3626,8 @@ void __noreturn do_task_dead(void)
static inline void sched_submit_work(struct task_struct *tsk)
{
@@ -54,7 +54,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
/*
* If a worker went to sleep, notify and ask workqueue whether
* it wants to wake up a task to maintain concurrency.
-@@ -3625,6 +3624,10 @@ static inline void sched_submit_work(str
+@@ -3636,6 +3635,10 @@ static inline void sched_submit_work(str
if (tsk->flags & PF_WQ_WORKER)
wq_worker_sleeping(tsk);