aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2015-11-15 18:40:17 +0100
committerSebastian Andrzej Siewior <bigeasy@linutronix.de>2016-02-13 00:36:24 +0100
commit3382a172b546cea7778e0c937f6786af1735b147 (patch)
treeb288f784a13e9f220a1e39f1bc7d3a16fbe778fa
parent93b0cba2ec75deff1b3a664d2fac9ad8b8292d8f (diff)
downloadrt-linux-3382a172b546cea7778e0c937f6786af1735b147.tar.gz
irqwork: Move irq safe work to irq context
On architectures where arch_irq_work_has_interrupt() returns false, we end up running the irq safe work from the softirq context. That results in a potential deadlock in the scheduler irq work which expects that function to be called with interrupts disabled. Split the irq_work_tick() function into a hard and soft variant. Call the hard variant from the tick interrupt and add the soft variant to the timer softirq. Reported-and-tested-by: Yanjiang Jin <yanjiang.jin@windriver.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: stable-rt@vger.kernel.org
-rw-r--r--include/linux/irq_work.h6
-rw-r--r--kernel/irq_work.c9
-rw-r--r--kernel/time/timer.c6
3 files changed, 17 insertions, 4 deletions
diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h
index 0e427a9997f3bd..2543aab05daa05 100644
--- a/include/linux/irq_work.h
+++ b/include/linux/irq_work.h
@@ -52,4 +52,10 @@ static inline bool irq_work_needs_cpu(void) { return false; }
static inline void irq_work_run(void) { }
#endif
+#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL)
+void irq_work_tick_soft(void);
+#else
+static inline void irq_work_tick_soft(void) { }
+#endif
+
#endif /* _LINUX_IRQ_WORK_H */
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index 0ddaf1e66d8c92..2899ba0d23d175 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -200,8 +200,17 @@ void irq_work_tick(void)
if (!llist_empty(raised) && !arch_irq_work_has_interrupt())
irq_work_run_list(raised);
+
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT_FULL))
+ irq_work_run_list(this_cpu_ptr(&lazy_list));
+}
+
+#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL)
+void irq_work_tick_soft(void)
+{
irq_work_run_list(this_cpu_ptr(&lazy_list));
}
+#endif
/*
* Synchronize against the irq_work @entry, ensures the entry is not
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 606580f85ac568..fee8682c209e3f 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -1484,7 +1484,7 @@ void update_process_times(int user_tick)
scheduler_tick();
run_local_timers();
rcu_check_callbacks(user_tick);
-#if defined(CONFIG_IRQ_WORK) && !defined(CONFIG_PREEMPT_RT_FULL)
+#if defined(CONFIG_IRQ_WORK)
if (in_irq())
irq_work_tick();
#endif
@@ -1498,9 +1498,7 @@ static void run_timer_softirq(struct softirq_action *h)
{
struct tvec_base *base = this_cpu_ptr(&tvec_bases);
-#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL)
- irq_work_tick();
-#endif
+ irq_work_tick_soft();
if (time_after_eq(jiffies, base->timer_jiffies))
__run_timers(base);