summaryrefslogtreecommitdiffstats
path: root/net-iptables-Fix-xt_info-locking.patch
blob: ea3d929497db263816631b2db803626e8dba12f4 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
From f8ae916b4a177f6852fd37fea0ad9e0a09a866c5 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Tue, 13 Jul 2010 15:41:55 +0200
Subject: [PATCH] net: iptables: Fix xt_info locking

commit 5bbbedcfeec4fb13f514cabf4383b62c2e141f76 in tip.

xt_info locking is an open coded rw_lock which works fine in mainline,
but on RT it's racy. Replace it with a real rwlock.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
---
 include/linux/netfilter/x_tables.h |   34 +++++++++++++++++++++++++---------
 net/netfilter/x_tables.c           |    5 +++++
 2 files changed, 30 insertions(+), 9 deletions(-)

diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index dcc03d7..ae2ef0f 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -464,7 +464,11 @@ extern void xt_free_table_info(struct xt_table_info *info);
  *  necessary for reading the counters.
  */
 struct xt_info_lock {
+#ifndef CONFIG_PREEMPT_RT
 	spinlock_t lock;
+#else
+	rwlock_t lock;
+#endif
 	unsigned char readers;
 };
 DECLARE_PER_CPU(struct xt_info_lock, xt_info_locks);
@@ -491,11 +495,14 @@ static inline int xt_info_rdlock_bh(void)
 	preempt_disable_rt();
 	cpu = smp_processor_id();
 	lock = &per_cpu(xt_info_locks, cpu);
-	if (likely(!lock->readers++)) {
-		preempt_enable_rt();
-		spin_lock(&lock->lock);
-	} else
-		preempt_enable_rt();
+
+#ifndef CONFIG_PREEMPT_RT
+	if (likely(!--lock->readers))
+		spin_unlock(&lock->lock);
+#else
+	preempt_enable_rt();
+	read_lock(&lock->lock);
+#endif
 	return cpu;
 }
 
@@ -503,13 +510,14 @@ static inline void xt_info_rdunlock_bh(int cpu)
 {
 	struct xt_info_lock *lock = &per_cpu(xt_info_locks, cpu);
 
-	preempt_disable_rt();
-
+#ifndef CONFIG_PREEMPT_RT
 	if (likely(!--lock->readers)) {
 		preempt_enable_rt();
 		spin_unlock(&lock->lock);
-	} else
-		preempt_enable_rt();
+	}
+#else
+	read_unlock(&lock->lock);
+#endif
 
 	local_bh_enable();
 }
@@ -521,12 +529,20 @@ static inline void xt_info_rdunlock_bh(int cpu)
  */
 static inline void xt_info_wrlock(unsigned int cpu)
 {
+#ifndef CONFIG_PREEMPT_RT
 	spin_lock(&per_cpu(xt_info_locks, cpu).lock);
+#else
+	write_lock(&per_cpu(xt_info_locks, cpu).lock);
+#endif
 }
 
 static inline void xt_info_wrunlock(unsigned int cpu)
 {
+#ifndef CONFIG_PREEMPT_RT
 	spin_unlock(&per_cpu(xt_info_locks, cpu).lock);
+#else
+	write_unlock(&per_cpu(xt_info_locks, cpu).lock);
+#endif
 }
 
 /*
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 665f5be..da3955e 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -1244,7 +1244,12 @@ static int __init xt_init(void)
 
 	for_each_possible_cpu(i) {
 		struct xt_info_lock *lock = &per_cpu(xt_info_locks, i);
+
+#ifndef CONFIG_PREEMPT_RT
 		spin_lock_init(&lock->lock);
+#else
+		rwlock_init(&lock->lock);
+#endif
 		lock->readers = 0;
 	}
 
-- 
1.7.0.4