From c8d3ff7efeba6ccaf2fe36d07c5f53fc81e864ae Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 26 Jul 2009 12:23:12 +0200 Subject: [PATCH] net: fix the xtables smp_processor_id assumptions for -rt commit 21ece08cde53262625c46bd64eb66a81be5b458c in tip. Signed-off-by: Thomas Gleixner Signed-off-by: Paul Gortmaker --- include/linux/netfilter/x_tables.h | 4 ++-- net/ipv4/netfilter/arp_tables.c | 5 +++-- net/ipv4/netfilter/ip_tables.c | 9 +++++---- net/ipv6/netfilter/ip6_tables.c | 11 +++++++---- 4 files changed, 17 insertions(+), 12 deletions(-) diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index 84c7c92..5c588ca 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -487,14 +487,14 @@ static inline void xt_info_rdlock_bh(void) struct xt_info_lock *lock; local_bh_disable(); - lock = &__get_cpu_var(xt_info_locks); + lock = &__raw_get_cpu_var(xt_info_locks); if (likely(!lock->readers++)) spin_lock(&lock->lock); } static inline void xt_info_rdunlock_bh(void) { - struct xt_info_lock *lock = &__get_cpu_var(xt_info_locks); + struct xt_info_lock *lock = &__raw_get_cpu_var(xt_info_locks); if (likely(!--lock->readers)) spin_unlock(&lock->lock); diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index f3c60dc..9438d18 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -717,7 +717,7 @@ static void get_counters(const struct xt_table_info *t, struct arpt_entry *iter; unsigned int cpu; unsigned int i; - unsigned int curcpu; + unsigned int curcpu = NR_CPUS; /* Instead of clearing (by a previous call to memset()) * the counters and using adds, we set the counters @@ -727,6 +727,7 @@ static void get_counters(const struct xt_table_info *t, * if new softirq were to run and call ipt_do_table */ local_bh_disable(); +#ifndef CONFIG_PREEMPT_RT curcpu = smp_processor_id(); i = 0; @@ -735,7 +736,7 @@ static void get_counters(const struct xt_table_info *t, iter->counters.pcnt); ++i; } - +#endif for_each_possible_cpu(cpu) { if (cpu == curcpu) continue; diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index b29c66d..83501d8 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -357,7 +357,7 @@ ipt_do_table(struct sk_buff *skb, IP_NF_ASSERT(table->valid_hooks & (1 << hook)); xt_info_rdlock_bh(); private = table->private; - table_base = private->entries[smp_processor_id()]; + table_base = private->entries[raw_smp_processor_id()]; e = get_entry(table_base, private->hook_entry[hook]); @@ -905,7 +905,7 @@ get_counters(const struct xt_table_info *t, struct ipt_entry *iter; unsigned int cpu; unsigned int i; - unsigned int curcpu; + unsigned int curcpu = NR_CPUS; /* Instead of clearing (by a previous call to memset()) * the counters and using adds, we set the counters @@ -915,6 +915,7 @@ get_counters(const struct xt_table_info *t, * if new softirq were to run and call ipt_do_table */ local_bh_disable(); +#ifndef CONFIG_PREEMPT_RT curcpu = smp_processor_id(); i = 0; @@ -923,7 +924,7 @@ get_counters(const struct xt_table_info *t, iter->counters.pcnt); ++i; } - +#endif for_each_possible_cpu(cpu) { if (cpu == curcpu) continue; @@ -1397,7 +1398,7 @@ do_add_counters(struct net *net, const void __user *user, i = 0; /* Choose the copy that is on our node */ - curcpu = smp_processor_id(); + curcpu = raw_smp_processor_id(); loc_cpu_entry = private->entries[curcpu]; xt_info_wrlock(curcpu); xt_entry_foreach(iter, loc_cpu_entry, private->size) { diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 053133a..66c78d0 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -935,7 +935,7 @@ get_counters(const struct xt_table_info *t, struct ip6t_entry *iter; unsigned int cpu; unsigned int i; - unsigned int curcpu; + unsigned int curcpu = NR_CPUS; /* Instead of clearing (by a previous call to memset()) * the counters and using adds, we set the counters @@ -945,6 +945,8 @@ get_counters(const struct xt_table_info *t, * if new softirq were to run and call ipt_do_table */ local_bh_disable(); + +#ifndef CONFIG_PREEMPT_RT curcpu = smp_processor_id(); i = 0; @@ -953,7 +955,7 @@ get_counters(const struct xt_table_info *t, iter->counters.pcnt); ++i; } - +#endif for_each_possible_cpu(cpu) { if (cpu == curcpu) continue; @@ -974,12 +976,13 @@ static struct xt_counters *alloc_counters(const struct xt_table *table) unsigned int countersize; struct xt_counters *counters; const struct xt_table_info *private = table->private; + int node = cpu_to_node(raw_smp_processor_id()); /* We need atomic snapshot of counters: rest doesn't change (other than comefrom, which userspace doesn't care about). */ countersize = sizeof(struct xt_counters) * private->number; - counters = vmalloc_node(countersize, numa_node_id()); + counters = vmalloc_node(countersize, node); if (counters == NULL) return ERR_PTR(-ENOMEM); @@ -1429,7 +1432,7 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len, i = 0; /* Choose the copy that is on our node */ - curcpu = smp_processor_id(); + curcpu = raw_smp_processor_id(); xt_info_wrlock(curcpu); loc_cpu_entry = private->entries[curcpu]; xt_entry_foreach(iter, loc_cpu_entry, private->size) { -- 1.7.0.4