aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2012-01-31 13:01:27 +0100
committerSebastian Andrzej Siewior <bigeasy@linutronix.de>2016-02-13 00:35:00 +0100
commitfecde7ae00445f4d6ae9c60c0a8893023fa0d4cd (patch)
tree1e377dff52306da00447ce5547acccae3d876486
parent3c25a8b5718ca866c5984e0492c5070503bb3aa6 (diff)
downloadrt-linux-fecde7ae00445f4d6ae9c60c0a8893023fa0d4cd.tar.gz
genirq: Allow disabling of softirq processing in irq thread context
The processing of softirqs in irq thread context is a performance gain for the non-rt workloads of a system, but it's counterproductive for interrupts which are explicitely related to the realtime workload. Allow such interrupts to prevent softirq processing in their thread context. Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--include/linux/interrupt.h2
-rw-r--r--include/linux/irq.h4
-rw-r--r--kernel/irq/manage.c13
-rw-r--r--kernel/irq/settings.h12
-rw-r--r--kernel/softirq.c9
5 files changed, 38 insertions, 2 deletions
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index c2c447f2de5c55..79a9622b5a38c2 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -61,6 +61,7 @@
* interrupt handler after suspending interrupts. For system
* wakeup devices users need to implement wakeup detection in
* their interrupt handlers.
+ * IRQF_NO_SOFTIRQ_CALL - Do not process softirqs in the irq thread context (RT)
*/
#define IRQF_SHARED 0x00000080
#define IRQF_PROBE_SHARED 0x00000100
@@ -74,6 +75,7 @@
#define IRQF_NO_THREAD 0x00010000
#define IRQF_EARLY_RESUME 0x00020000
#define IRQF_COND_SUSPEND 0x00040000
+#define IRQF_NO_SOFTIRQ_CALL 0x00080000
#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 3c1c96786248cb..311d3f06145278 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -72,6 +72,7 @@ enum irqchip_irq_state;
* IRQ_IS_POLLED - Always polled by another interrupt. Exclude
* it from the spurious interrupt detection
* mechanism and from core side polling.
+ * IRQ_NO_SOFTIRQ_CALL - No softirq processing in the irq thread context (RT)
* IRQ_DISABLE_UNLAZY - Disable lazy irq disable
*/
enum {
@@ -99,13 +100,14 @@ enum {
IRQ_PER_CPU_DEVID = (1 << 17),
IRQ_IS_POLLED = (1 << 18),
IRQ_DISABLE_UNLAZY = (1 << 19),
+ IRQ_NO_SOFTIRQ_CALL = (1 << 20),
};
#define IRQF_MODIFY_MASK \
(IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \
- IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY)
+ IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY | IRQ_NO_SOFTIRQ_CALL)
#define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 8842388560a44f..5b1ad63d6a6607 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -940,7 +940,15 @@ irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
local_bh_disable();
ret = action->thread_fn(action->irq, action->dev_id);
irq_finalize_oneshot(desc, action);
- local_bh_enable();
+ /*
+ * Interrupts which have real time requirements can be set up
+ * to avoid softirq processing in the thread handler. This is
+ * safe as these interrupts do not raise soft interrupts.
+ */
+ if (irq_settings_no_softirq_call(desc))
+ _local_bh_enable();
+ else
+ local_bh_enable();
return ret;
}
@@ -1390,6 +1398,9 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
}
+ if (new->flags & IRQF_NO_SOFTIRQ_CALL)
+ irq_settings_set_no_softirq_call(desc);
+
/* Set default affinity mask once everything is setup */
setup_affinity(desc, mask);
diff --git a/kernel/irq/settings.h b/kernel/irq/settings.h
index 320579d8909100..2df2d4445b1ee3 100644
--- a/kernel/irq/settings.h
+++ b/kernel/irq/settings.h
@@ -16,6 +16,7 @@ enum {
_IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID,
_IRQ_IS_POLLED = IRQ_IS_POLLED,
_IRQ_DISABLE_UNLAZY = IRQ_DISABLE_UNLAZY,
+ _IRQ_NO_SOFTIRQ_CALL = IRQ_NO_SOFTIRQ_CALL,
_IRQF_MODIFY_MASK = IRQF_MODIFY_MASK,
};
@@ -30,6 +31,7 @@ enum {
#define IRQ_PER_CPU_DEVID GOT_YOU_MORON
#define IRQ_IS_POLLED GOT_YOU_MORON
#define IRQ_DISABLE_UNLAZY GOT_YOU_MORON
+#define IRQ_NO_SOFTIRQ_CALL GOT_YOU_MORON
#undef IRQF_MODIFY_MASK
#define IRQF_MODIFY_MASK GOT_YOU_MORON
@@ -40,6 +42,16 @@ irq_settings_clr_and_set(struct irq_desc *desc, u32 clr, u32 set)
desc->status_use_accessors |= (set & _IRQF_MODIFY_MASK);
}
+static inline bool irq_settings_no_softirq_call(struct irq_desc *desc)
+{
+ return desc->status_use_accessors & _IRQ_NO_SOFTIRQ_CALL;
+}
+
+static inline void irq_settings_set_no_softirq_call(struct irq_desc *desc)
+{
+ desc->status_use_accessors |= _IRQ_NO_SOFTIRQ_CALL;
+}
+
static inline bool irq_settings_is_per_cpu(struct irq_desc *desc)
{
return desc->status_use_accessors & _IRQ_PER_CPU;
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 0e373e97d0c7d3..13549d0b1df944 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -578,6 +578,15 @@ void __local_bh_enable(void)
}
EXPORT_SYMBOL(__local_bh_enable);
+void _local_bh_enable(void)
+{
+ if (WARN_ON(current->softirq_nestcnt == 0))
+ return;
+ if (--current->softirq_nestcnt == 0)
+ migrate_enable();
+}
+EXPORT_SYMBOL(_local_bh_enable);
+
int in_serving_softirq(void)
{
return current->flags & PF_IN_SOFTIRQ;