diff options
author | Stephen Rothwell <sfr@canb.auug.org.au> | 2024-05-01 15:28:42 +1000 |
---|---|---|
committer | Stephen Rothwell <sfr@canb.auug.org.au> | 2024-05-01 15:28:42 +1000 |
commit | 7f9b96eda5aa4e689169143a8c07c9a905c494fb (patch) | |
tree | 2b78c60e7c48b4bc01197db97fcda82c7880f85e | |
parent | 92e855734328bbccf9262927c6b32a13c765b087 (diff) | |
parent | 36a71c558b3e47e2fb19fafc9cf1aa6564211eee (diff) | |
download | linux-next-7f9b96eda5aa4e689169143a8c07c9a905c494fb.tar.gz |
Merge branch 'bitmap-for-next' of https://github.com/norov/linux.git
Notice: this object is not reachable from any branch.
Notice: this object is not reachable from any branch.
-rw-r--r-- | include/linux/cpumask.h | 10 | ||||
-rw-r--r-- | kernel/sched/topology.c | 6 |
2 files changed, 12 insertions, 4 deletions
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index d66cdfdfcaa150..23686bed441d00 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -386,6 +386,16 @@ unsigned int __pure cpumask_next_wrap(int n, const struct cpumask *mask, int sta for_each_or_bit(cpu, cpumask_bits(mask1), cpumask_bits(mask2), small_cpumask_bits) /** + * for_each_cpu_from - iterate over CPUs present in @mask, from @cpu to the end of @mask. + * @cpu: the (optionally unsigned) integer iterator + * @mask: the cpumask pointer + * + * After the loop, cpu is >= nr_cpu_ids. + */ +#define for_each_cpu_from(cpu, mask) \ + for_each_set_bit_from(cpu, cpumask_bits(mask), small_cpumask_bits) + +/** * cpumask_any_but - return a "random" in a cpumask, but not this one. * @mask: the cpumask to search * @cpu: the cpu to ignore. diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 6835598316561c..1d6eefa4032e53 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -2353,7 +2353,7 @@ static struct sched_domain *build_sched_domain(struct sched_domain_topology_leve static bool topology_span_sane(struct sched_domain_topology_level *tl, const struct cpumask *cpu_map, int cpu) { - int i; + int i = cpu + 1; /* NUMA levels are allowed to overlap */ if (tl->flags & SDTL_OVERLAP) @@ -2365,9 +2365,7 @@ static bool topology_span_sane(struct sched_domain_topology_level *tl, * breaking the sched_group lists - i.e. a later get_group() pass * breaks the linking done for an earlier span. */ - for_each_cpu(i, cpu_map) { - if (i == cpu) - continue; + for_each_cpu_from(i, cpu_map) { /* * We should 'and' all those masks with 'cpu_map' to exactly * match the topology we're about to build, but that can only |