aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2011-06-25 09:21:04 +0200
committerSebastian Andrzej Siewior <bigeasy@linutronix.de>2016-02-13 00:34:55 +0100
commit981eabf0b6a49a7f12897941df1aff6f319bba36 (patch)
tree52a1ee5b2c39101d9f03adcd71c19fc21235cdb9
parent3c101a1fb986111b04098b27140788580cfa7f03 (diff)
downloadrt-linux-981eabf0b6a49a7f12897941df1aff6f319bba36.tar.gz
sched: Add saved_state for tasks blocked on sleeping locks
Spinlocks are state preserving in !RT. RT changes the state when a task gets blocked on a lock. So we need to remember the state before the lock contention. If a regular wakeup (not a RTmutex related wakeup) happens, the saved_state is updated to running. When the lock sleep is done, the saved state is restored. Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--include/linux/sched.h2
-rw-r--r--kernel/sched/core.c31
-rw-r--r--kernel/sched/sched.h1
3 files changed, 33 insertions, 1 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 5f6dc5bba0ca3d..86c567cfe2ccdf 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1378,6 +1378,7 @@ struct tlbflush_unmap_batch {
struct task_struct {
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
+ volatile long saved_state; /* saved state for "spinlock sleepers" */
void *stack;
atomic_t usage;
unsigned int flags; /* per process flags, defined below */
@@ -2483,6 +2484,7 @@ extern void xtime_update(unsigned long ticks);
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
+extern int wake_up_lock_sleeper(struct task_struct * tsk);
extern void wake_up_new_task(struct task_struct *tsk);
#ifdef CONFIG_SMP
extern void kick_process(struct task_struct *tsk);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index b09fefa8ea16e4..900dca3928a457 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1949,8 +1949,25 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
*/
smp_mb__before_spinlock();
raw_spin_lock_irqsave(&p->pi_lock, flags);
- if (!(p->state & state))
+ if (!(p->state & state)) {
+ /*
+ * The task might be running due to a spinlock sleeper
+ * wakeup. Check the saved state and set it to running
+ * if the wakeup condition is true.
+ */
+ if (!(wake_flags & WF_LOCK_SLEEPER)) {
+ if (p->saved_state & state)
+ p->saved_state = TASK_RUNNING;
+ }
goto out;
+ }
+
+ /*
+ * If this is a regular wakeup, then we can unconditionally
+ * clear the saved state of a "lock sleeper".
+ */
+ if (!(wake_flags & WF_LOCK_SLEEPER))
+ p->saved_state = TASK_RUNNING;
trace_sched_waking(p);
@@ -2083,6 +2100,18 @@ int wake_up_process(struct task_struct *p)
}
EXPORT_SYMBOL(wake_up_process);
+/**
+ * wake_up_lock_sleeper - Wake up a specific process blocked on a "sleeping lock"
+ * @p: The process to be woken up.
+ *
+ * Same as wake_up_process() above, but wake_flags=WF_LOCK_SLEEPER to indicate
+ * the nature of the wakeup.
+ */
+int wake_up_lock_sleeper(struct task_struct *p)
+{
+ return try_to_wake_up(p, TASK_ALL, WF_LOCK_SLEEPER);
+}
+
int wake_up_state(struct task_struct *p, unsigned int state)
{
return try_to_wake_up(p, state, 0);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index b242775bf670e1..ceae7a810f60e8 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1100,6 +1100,7 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
#define WF_SYNC 0x01 /* waker goes to sleep after wakeup */
#define WF_FORK 0x02 /* child wakeup after fork */
#define WF_MIGRATED 0x4 /* internal use, task got migrated */
+#define WF_LOCK_SLEEPER 0x08 /* wakeup spinlock "sleeper" */
/*
* To aid in avoiding the subversion of "niceness" due to uneven distribution