aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2011-04-07 14:09:42 +0200
committerIngo Molnar <mingo@elte.hu>2011-04-11 12:58:16 +0200
commitc4a8849af939082052d8117f9ea3e170a99ff232 (patch)
tree845dac046d187c58643f987aac4a4eebeebbb88f /kernel/sched.c
parentf4ad9bd208c98f32a6f9136618e0b8bebe3fb370 (diff)
downloadlinux-c4a8849af939082052d8117f9ea3e170a99ff232.tar.gz
sched: Remove obsolete arch_ prefixes
Non weak static functions clearly are not arch specific, so remove the arch_ prefix. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Link: http://lkml.kernel.org/r/20110407122941.820460566@chello.nl Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 48013633d792f..d3e183c85f49d 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -231,7 +231,7 @@ static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
#endif
/*
- * sched_domains_mutex serializes calls to arch_init_sched_domains,
+ * sched_domains_mutex serializes calls to init_sched_domains,
* detach_destroy_domains and partition_sched_domains.
*/
static DEFINE_MUTEX(sched_domains_mutex);
@@ -7670,7 +7670,7 @@ void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
* For now this just excludes isolated cpus, but could be used to
* exclude other special cases in the future.
*/
-static int arch_init_sched_domains(const struct cpumask *cpu_map)
+static int init_sched_domains(const struct cpumask *cpu_map)
{
int err;
@@ -7687,7 +7687,7 @@ static int arch_init_sched_domains(const struct cpumask *cpu_map)
return err;
}
-static void arch_destroy_sched_domains(const struct cpumask *cpu_map,
+static void destroy_sched_domains(const struct cpumask *cpu_map,
struct cpumask *tmpmask)
{
free_sched_groups(cpu_map, tmpmask);
@@ -7706,7 +7706,7 @@ static void detach_destroy_domains(const struct cpumask *cpu_map)
for_each_cpu(i, cpu_map)
cpu_attach_domain(NULL, &def_root_domain, i);
synchronize_sched();
- arch_destroy_sched_domains(cpu_map, to_cpumask(tmpmask));
+ destroy_sched_domains(cpu_map, to_cpumask(tmpmask));
}
/* handle null as "default" */
@@ -7815,7 +7815,7 @@ match2:
}
#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
-static void arch_reinit_sched_domains(void)
+static void reinit_sched_domains(void)
{
get_online_cpus();
@@ -7848,7 +7848,7 @@ static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt)
else
sched_mc_power_savings = level;
- arch_reinit_sched_domains();
+ reinit_sched_domains();
return count;
}
@@ -7974,7 +7974,7 @@ void __init sched_init_smp(void)
#endif
get_online_cpus();
mutex_lock(&sched_domains_mutex);
- arch_init_sched_domains(cpu_active_mask);
+ init_sched_domains(cpu_active_mask);
cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
if (cpumask_empty(non_isolated_cpus))
cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);