aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMike Galbraith <umgwanakikbuti@gmail.com>2014-05-02 13:13:22 +0200
committerSebastian Andrzej Siewior <bigeasy@linutronix.de>2016-02-13 00:36:10 +0100
commitb429a3edaee5673132f515c6c68e33118f3a4473 (patch)
treef2b3503db35df800fedb0867e9b6893d7ff7be48
parent70e8ed9e6bd1192866c3f8c049708e28cffc08c2 (diff)
downloadrt-linux-b429a3edaee5673132f515c6c68e33118f3a4473.tar.gz
stomp-machine: create lg_global_trylock_relax() primitive
Create lg_global_trylock_relax() for use by stopper thread when it cannot schedule, to deal with stop_cpus_lock, which is now an lglock. Signed-off-by: Mike Galbraith <umgwanakikbuti@gmail.com> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-rw-r--r--include/linux/lglock.h6
-rw-r--r--include/linux/spinlock_rt.h1
-rw-r--r--kernel/locking/lglock.c25
-rw-r--r--kernel/locking/rtmutex.c5
4 files changed, 37 insertions, 0 deletions
diff --git a/include/linux/lglock.h b/include/linux/lglock.h
index 492005d40f7cf..6f035f635d0ec 100644
--- a/include/linux/lglock.h
+++ b/include/linux/lglock.h
@@ -82,6 +82,12 @@ void lg_double_unlock(struct lglock *lg, int cpu1, int cpu2);
void lg_global_lock(struct lglock *lg);
void lg_global_unlock(struct lglock *lg);
+#ifndef CONFIG_PREEMPT_RT_FULL
+#define lg_global_trylock_relax(name) lg_global_lock(name)
+#else
+void lg_global_trylock_relax(struct lglock *lg);
+#endif
+
#else
/* When !CONFIG_SMP, map lglock to spinlock */
#define lglock spinlock
diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h
index ca08d3b2d9e88..f757096b230c7 100644
--- a/include/linux/spinlock_rt.h
+++ b/include/linux/spinlock_rt.h
@@ -34,6 +34,7 @@ extern int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock);
*/
extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock);
extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock);
+extern int __lockfunc __rt_spin_trylock(struct rt_mutex *lock);
#define spin_lock(lock) \
do { \
diff --git a/kernel/locking/lglock.c b/kernel/locking/lglock.c
index 390dfc188f28a..d8be4fcc14f8f 100644
--- a/kernel/locking/lglock.c
+++ b/kernel/locking/lglock.c
@@ -127,3 +127,28 @@ void lg_global_unlock(struct lglock *lg)
preempt_enable_nort();
}
EXPORT_SYMBOL(lg_global_unlock);
+
+#ifdef CONFIG_PREEMPT_RT_FULL
+/*
+ * HACK: If you use this, you get to keep the pieces.
+ * Used in queue_stop_cpus_work() when stop machinery
+ * is called from inactive CPU, so we can't schedule.
+ */
+# define lg_do_trylock_relax(l) \
+ do { \
+ while (!__rt_spin_trylock(l)) \
+ cpu_relax(); \
+ } while (0)
+
+void lg_global_trylock_relax(struct lglock *lg)
+{
+ int i;
+
+ lock_acquire_exclusive(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
+ for_each_possible_cpu(i) {
+ lg_lock_ptr *lock;
+ lock = per_cpu_ptr(lg->lock, i);
+ lg_do_trylock_relax(lock);
+ }
+}
+#endif
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index d2b2c38ba56c5..a6f5326e40564 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1151,6 +1151,11 @@ void __lockfunc rt_spin_unlock_wait(spinlock_t *lock)
}
EXPORT_SYMBOL(rt_spin_unlock_wait);
+int __lockfunc __rt_spin_trylock(struct rt_mutex *lock)
+{
+ return rt_mutex_trylock(lock);
+}
+
int __lockfunc rt_spin_trylock(spinlock_t *lock)
{
int ret = rt_mutex_trylock(&lock->lock);