summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Gortmaker <paul.gortmaker@windriver.com>2017-10-02 12:18:55 -0400
committerPaul Gortmaker <paul.gortmaker@windriver.com>2017-10-02 12:18:55 -0400
commit580832a8efcbb48d6c2d8ece0540f259f4b2b213 (patch)
treebe840ac446936537d1bdbcadc46de655ffd5f239
parentf9b050bc48ca3f88d962a95fd05d50393d09bd2b (diff)
download4.12-rt-patches-580832a8efcbb48d6c2d8ece0540f259f4b2b213.tar.gz
refresh kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
-rw-r--r--patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch68
1 files changed, 34 insertions, 34 deletions
diff --git a/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch b/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch
index 248e6bb41cddb..525bb35496529 100644
--- a/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch
+++ b/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch
@@ -1,4 +1,4 @@
-From 1160ade4f9182a801727627adcfd73f1ad6f6659 Mon Sep 17 00:00:00 2001
+From 6246e636c994a42d829368b839c3c2974ca83a9e Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 4 Apr 2017 12:50:16 +0200
Subject: [PATCH] kernel: sched: Provide a pointer to the valid CPU mask
@@ -298,7 +298,7 @@ index 2c487e0879d5..f760c3ac8fc6 100644
.mm = NULL, \
.active_mm = &init_mm, \
diff --git a/include/linux/sched.h b/include/linux/sched.h
-index 84b2910e6d5d..2de83101e548 100644
+index 6a96d7392844..8214168f9be2 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -535,7 +535,8 @@ struct task_struct {
@@ -321,7 +321,7 @@ index 84b2910e6d5d..2de83101e548 100644
#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
-index 0f41292be0fb..5bb6d48cfece 100644
+index f6501f4f6040..cc7a3e699f87 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -2092,7 +2092,7 @@ static void cpuset_fork(struct task_struct *task)
@@ -347,10 +347,10 @@ index b30196a00b0d..5d8e25b1a0de 100644
* One for us, one for whoever does the "release_task()" (usually
* parent)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
-index 1199a4275095..1ce68555bf2c 100644
+index 47a7bdb8bd52..67fee2d66540 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -986,7 +986,7 @@ static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int dest_
+@@ -984,7 +984,7 @@ static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,
return rq;
/* Affinity changed (again). */
@@ -358,7 +358,7 @@ index 1199a4275095..1ce68555bf2c 100644
+ if (!cpumask_test_cpu(dest_cpu, p->cpus_ptr))
return rq;
- rq = move_queued_task(rq, p, dest_cpu);
+ update_rq_clock(rq);
@@ -1012,7 +1012,7 @@ static int migration_cpu_stop(void *data)
local_irq_disable();
/*
@@ -386,7 +386,7 @@ index 1199a4275095..1ce68555bf2c 100644
goto out;
if (!cpumask_intersects(new_mask, cpu_valid_mask)) {
-@@ -1264,10 +1264,10 @@ static int migrate_swap_stop(void *data)
+@@ -1270,10 +1270,10 @@ static int migrate_swap_stop(void *data)
if (task_cpu(arg->src_task) != arg->src_cpu)
goto unlock;
@@ -399,7 +399,7 @@ index 1199a4275095..1ce68555bf2c 100644
goto unlock;
__migrate_swap_task(arg->src_task, arg->dst_cpu);
-@@ -1308,10 +1308,10 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p)
+@@ -1314,10 +1314,10 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p)
if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu))
goto out;
@@ -412,7 +412,7 @@ index 1199a4275095..1ce68555bf2c 100644
goto out;
trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
-@@ -1455,7 +1455,7 @@ void kick_process(struct task_struct *p)
+@@ -1461,7 +1461,7 @@ void kick_process(struct task_struct *p)
EXPORT_SYMBOL_GPL(kick_process);
/*
@@ -421,7 +421,7 @@ index 1199a4275095..1ce68555bf2c 100644
*
* A few notes on cpu_active vs cpu_online:
*
-@@ -1495,14 +1495,14 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
+@@ -1501,14 +1501,14 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
for_each_cpu(dest_cpu, nodemask) {
if (!cpu_active(dest_cpu))
continue;
@@ -438,7 +438,7 @@ index 1199a4275095..1ce68555bf2c 100644
if (!(p->flags & PF_KTHREAD) && !cpu_active(dest_cpu))
continue;
if (!cpu_online(dest_cpu))
-@@ -1547,7 +1547,7 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
+@@ -1553,7 +1553,7 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
}
/*
@@ -447,7 +447,7 @@ index 1199a4275095..1ce68555bf2c 100644
*/
static inline
int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
-@@ -1557,11 +1557,11 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
+@@ -1563,11 +1563,11 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
if (p->nr_cpus_allowed > 1)
cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags);
else
@@ -461,7 +461,7 @@ index 1199a4275095..1ce68555bf2c 100644
* CPU.
*
* Since this is common to all placement strategies, this lives here.
-@@ -1569,7 +1569,7 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
+@@ -1575,7 +1575,7 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
* [ this allows ->select_task() to simply return task_cpu(p) and
* not worry about this generic constraint ]
*/
@@ -470,7 +470,7 @@ index 1199a4275095..1ce68555bf2c 100644
!cpu_online(cpu)))
cpu = select_fallback_rq(task_cpu(p), p);
-@@ -2543,7 +2543,7 @@ void wake_up_new_task(struct task_struct *p)
+@@ -2544,7 +2544,7 @@ void wake_up_new_task(struct task_struct *p)
#ifdef CONFIG_SMP
/*
* Fork balancing, do it here and not earlier because:
@@ -479,7 +479,7 @@ index 1199a4275095..1ce68555bf2c 100644
* - any previously selected CPU might disappear through hotplug
*
* Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
-@@ -4315,7 +4315,7 @@ static int __sched_setscheduler(struct task_struct *p,
+@@ -4316,7 +4316,7 @@ static int __sched_setscheduler(struct task_struct *p,
* the entire root_domain to become SCHED_DEADLINE. We
* will also fail if there's no bandwidth available.
*/
@@ -488,7 +488,7 @@ index 1199a4275095..1ce68555bf2c 100644
rq->rd->dl_bw.bw == 0) {
task_rq_unlock(rq, p, &rf);
return -EPERM;
-@@ -4909,7 +4909,7 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask)
+@@ -4910,7 +4910,7 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask)
goto out_unlock;
raw_spin_lock_irqsave(&p->pi_lock, flags);
@@ -497,7 +497,7 @@ index 1199a4275095..1ce68555bf2c 100644
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
out_unlock:
-@@ -5469,7 +5469,7 @@ int task_can_attach(struct task_struct *p,
+@@ -5474,7 +5474,7 @@ int task_can_attach(struct task_struct *p,
* allowed nodes is unnecessary. Thus, cpusets are not
* applicable for such threads. This prevents checking for
* success of set_cpus_allowed_ptr() on all attached tasks
@@ -506,7 +506,7 @@ index 1199a4275095..1ce68555bf2c 100644
*/
if (p->flags & PF_NO_SETAFFINITY) {
ret = -EINVAL;
-@@ -5525,7 +5525,7 @@ int migrate_task_to(struct task_struct *p, int target_cpu)
+@@ -5530,7 +5530,7 @@ int migrate_task_to(struct task_struct *p, int target_cpu)
if (curr_cpu == target_cpu)
return 0;
@@ -515,7 +515,7 @@ index 1199a4275095..1ce68555bf2c 100644
return -EINVAL;
/* TODO: This is not properly updating schedstats */
-@@ -5665,7 +5665,7 @@ static void migrate_tasks(struct rq *dead_rq)
+@@ -5667,7 +5667,7 @@ static void migrate_tasks(struct rq *dead_rq, struct rq_flags *rf)
next->sched_class->put_prev_task(rq, next);
/*
@@ -591,10 +591,10 @@ index a2ce59015642..ce9233cd1126 100644
!dl_task(task) ||
!task_on_rq_queued(task))) {
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
-index dea138964b91..e136330f861f 100644
+index a903276fcb62..a87950eaa2f2 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
-@@ -1553,7 +1553,7 @@ static void task_numa_compare(struct task_numa_env *env,
+@@ -1547,7 +1547,7 @@ static void task_numa_compare(struct task_numa_env *env,
*/
if (cur) {
/* Skip this swap candidate if cannot move to the source cpu */
@@ -603,7 +603,7 @@ index dea138964b91..e136330f861f 100644
goto unlock;
/*
-@@ -1663,7 +1663,7 @@ static void task_numa_find_cpu(struct task_numa_env *env,
+@@ -1657,7 +1657,7 @@ static void task_numa_find_cpu(struct task_numa_env *env,
for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
/* Skip this CPU if the source task cannot migrate */
@@ -612,7 +612,7 @@ index dea138964b91..e136330f861f 100644
continue;
env->dst_cpu = cpu;
-@@ -5460,7 +5460,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
+@@ -5485,7 +5485,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
/* Skip over this group if it has no CPUs allowed */
if (!cpumask_intersects(sched_group_cpus(group),
@@ -621,7 +621,7 @@ index dea138964b91..e136330f861f 100644
continue;
local_group = cpumask_test_cpu(this_cpu,
-@@ -5580,7 +5580,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
+@@ -5605,7 +5605,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
return cpumask_first(sched_group_cpus(group));
/* Traverse only the allowed CPUs */
@@ -630,7 +630,7 @@ index dea138964b91..e136330f861f 100644
if (idle_cpu(i)) {
struct rq *rq = cpu_rq(i);
struct cpuidle_state *idle = idle_get_state(rq);
-@@ -5719,7 +5719,7 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int
+@@ -5744,7 +5744,7 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int
if (!test_idle_cores(target, false))
return -1;
@@ -639,7 +639,7 @@ index dea138964b91..e136330f861f 100644
for_each_cpu_wrap(core, cpus, target, wrap) {
bool idle = true;
-@@ -5753,7 +5753,7 @@ static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int t
+@@ -5778,7 +5778,7 @@ static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int t
return -1;
for_each_cpu(cpu, cpu_smt_mask(target)) {
@@ -648,7 +648,7 @@ index dea138964b91..e136330f861f 100644
continue;
if (idle_cpu(cpu))
return cpu;
-@@ -5805,7 +5805,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
+@@ -5830,7 +5830,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
time = local_clock();
for_each_cpu_wrap(cpu, sched_domain_span(sd), target, wrap) {
@@ -657,7 +657,7 @@ index dea138964b91..e136330f861f 100644
continue;
if (idle_cpu(cpu))
break;
-@@ -5960,7 +5960,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
+@@ -5985,7 +5985,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
if (sd_flag & SD_BALANCE_WAKE) {
record_wakee(p);
want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu)
@@ -666,7 +666,7 @@ index dea138964b91..e136330f861f 100644
}
rcu_read_lock();
-@@ -6693,14 +6693,14 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
+@@ -6718,14 +6718,14 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
/*
* We do not migrate tasks that are:
* 1) throttled_lb_pair, or
@@ -683,7 +683,7 @@ index dea138964b91..e136330f861f 100644
int cpu;
schedstat_inc(p->se.statistics.nr_failed_migrations_affine);
-@@ -6720,7 +6720,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
+@@ -6745,7 +6745,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
/* Prevent to re-select dst_cpu via env's cpus */
for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
@@ -692,7 +692,7 @@ index dea138964b91..e136330f861f 100644
env->flags |= LBF_DST_PINNED;
env->new_dst_cpu = cpu;
break;
-@@ -7254,7 +7254,7 @@ check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
+@@ -7287,7 +7287,7 @@ check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
/*
* Group imbalance indicates (and tries to solve) the problem where balancing
@@ -701,7 +701,7 @@ index dea138964b91..e136330f861f 100644
*
* Imagine a situation of two groups of 4 cpus each and 4 tasks each with a
* cpumask covering 1 cpu of the first group and 3 cpus of the second group.
-@@ -7828,7 +7828,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
+@@ -7862,7 +7862,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
/*
* If the busiest group is imbalanced the below checks don't
* work because they assume all things are equal, which typically
@@ -710,7 +710,7 @@ index dea138964b91..e136330f861f 100644
*/
if (busiest->group_type == group_imbalanced)
goto force_balance;
-@@ -8213,7 +8213,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
+@@ -8249,7 +8249,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
* if the curr task on busiest cpu can't be
* moved to this_cpu
*/
@@ -720,7 +720,7 @@ index dea138964b91..e136330f861f 100644
flags);
env.flags |= LBF_ALL_PINNED;
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
-index 9f3e40226dec..872eba9f2174 100644
+index 979b7341008a..a05a54efc0ee 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1591,7 +1591,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)