on_each_cpu() is tidier, and handles preemption. Signed-off-by: Andrew Morton --- 25-akpm/kernel/rcupdate.c | 9 +++++---- 1 files changed, 5 insertions(+), 4 deletions(-) diff -puN kernel/rcupdate.c~reiser4-rcu-barrier-fix kernel/rcupdate.c --- 25/kernel/rcupdate.c~reiser4-rcu-barrier-fix Wed Aug 18 16:53:57 2004 +++ 25-akpm/kernel/rcupdate.c Wed Aug 18 16:57:42 2004 @@ -139,16 +139,18 @@ static void rcu_barrier_callback(struct complete(&rcu_barrier_completion); } +/* + * Called with preemption disabled, and from cross-cpu IRQ context. + */ static void rcu_barrier_func(void *notused) { - int cpu = get_cpu(); + int cpu = smp_processor_id(); struct rcu_data *rdp = &per_cpu(rcu_data, cpu); struct rcu_head *head; head = &rdp->barrier; atomic_inc(&rcu_barrier_cpu_count); call_rcu(head, rcu_barrier_callback); - put_cpu_no_resched(); } /** @@ -161,8 +163,7 @@ void rcu_barrier(void) down(&rcu_barrier_sema); init_completion(&rcu_barrier_completion); atomic_set(&rcu_barrier_cpu_count, 0); - rcu_barrier_func(NULL); - smp_call_function(rcu_barrier_func, NULL, 0, 1); + on_each_cpu(rcu_barrier_func, NULL, 0, 1); wait_for_completion(&rcu_barrier_completion); up(&rcu_barrier_sema); } _