diff options
author | Paul Gortmaker <paul.gortmaker@windriver.com> | 2016-08-29 18:09:38 -0400 |
---|---|---|
committer | Paul Gortmaker <paul.gortmaker@windriver.com> | 2016-08-29 18:09:38 -0400 |
commit | 2bb0b61dbf1c181992572c450ef0610a92e3f571 (patch) | |
tree | da7998931116082929cd2a33075ba66ac1d3309e | |
parent | 21ff871160054395f1b4d876659950a5d7cc081a (diff) | |
download | 4.8-rt-patches-2bb0b61dbf1c181992572c450ef0610a92e3f571.tar.gz |
mmdrop: refreshrt-v4.6-10965-ge12fab28df1d
-rw-r--r-- | patches/sched-Move-mmdrop-to-RCU-on-RT.patch | 26 |
1 files changed, 13 insertions, 13 deletions
diff --git a/patches/sched-Move-mmdrop-to-RCU-on-RT.patch b/patches/sched-Move-mmdrop-to-RCU-on-RT.patch index d162909ab322fa..9dbf61c224566c 100644 --- a/patches/sched-Move-mmdrop-to-RCU-on-RT.patch +++ b/patches/sched-Move-mmdrop-to-RCU-on-RT.patch @@ -1,4 +1,4 @@ -From e451b0c9724c3aed378b619b3d1f1a47ffbaae6b Mon Sep 17 00:00:00 2001 +From 17ef899c75aa15aeb7818b3b1fec7a84518d10e5 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner <tglx@linutronix.de> Date: Mon, 6 Jun 2011 12:20:33 +0200 Subject: [PATCH] sched: Move mmdrop to RCU on RT @@ -9,7 +9,7 @@ we want to do in task switch and oder atomic contexts. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h -index d553855503e6..45f59c2398e3 100644 +index ca3e517980a0..986c97b1a5bf 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -11,6 +11,7 @@ @@ -31,10 +31,10 @@ index d553855503e6..45f59c2398e3 100644 /* address of the bounds directory */ void __user *bd_addr; diff --git a/include/linux/sched.h b/include/linux/sched.h -index 0f496c2da1b9..b53efc525e61 100644 +index 8f50f34904f4..89319443d322 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h -@@ -2752,6 +2752,7 @@ extern struct mm_struct * mm_alloc(void); +@@ -2757,6 +2757,7 @@ extern struct mm_struct * mm_alloc(void); /* mmdrop drops the mm and the page tables */ extern void __mmdrop(struct mm_struct *); @@ -42,7 +42,7 @@ index 0f496c2da1b9..b53efc525e61 100644 static inline void mmdrop(struct mm_struct *mm) { if (unlikely(atomic_dec_and_test(&mm->mm_count))) -@@ -2763,6 +2764,17 @@ static inline bool mmget_not_zero(struct mm_struct *mm) +@@ -2768,6 +2769,17 @@ static inline bool mmget_not_zero(struct mm_struct *mm) return atomic_inc_not_zero(&mm->mm_users); } @@ -59,12 +59,12 @@ index 0f496c2da1b9..b53efc525e61 100644 + /* mmput gets rid of the mappings and all user-space */ extern void mmput(struct mm_struct *); - /* same as above but performs the slow path from the async kontext. Can + #ifdef CONFIG_MMU diff --git a/kernel/fork.c b/kernel/fork.c -index 04b34ce10164..7c4a21f54acd 100644 +index bec1b14130e7..113771be0045 100644 --- a/kernel/fork.c +++ b/kernel/fork.c -@@ -696,6 +696,19 @@ struct mm_struct *mm_alloc(void) +@@ -701,6 +701,19 @@ struct mm_struct *mm_alloc(void) return mm_init(mm, current); } @@ -85,10 +85,10 @@ index 04b34ce10164..7c4a21f54acd 100644 * Called when the last reference to the mm * is dropped: either by a lazy thread or by diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 55e398b5fdd9..07d582b038a3 100644 +index 37b89c4a46db..56f08d6edcd0 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -2727,8 +2727,12 @@ static struct rq *finish_task_switch(struct task_struct *prev) +@@ -2731,8 +2731,12 @@ static struct rq *finish_task_switch(struct task_struct *prev) finish_arch_post_lock_switch(); fire_sched_in_preempt_notifiers(current); @@ -102,7 +102,7 @@ index 55e398b5fdd9..07d582b038a3 100644 if (unlikely(prev_state == TASK_DEAD)) { if (prev->sched_class->task_dead) prev->sched_class->task_dead(prev); -@@ -5439,6 +5443,8 @@ void sched_setnuma(struct task_struct *p, int nid) +@@ -5443,6 +5447,8 @@ void sched_setnuma(struct task_struct *p, int nid) #endif /* CONFIG_NUMA_BALANCING */ #ifdef CONFIG_HOTPLUG_CPU @@ -111,7 +111,7 @@ index 55e398b5fdd9..07d582b038a3 100644 /* * Ensures that the idle task is using init_mm right before its cpu goes * offline. -@@ -5453,7 +5459,11 @@ void idle_task_exit(void) +@@ -5457,7 +5463,11 @@ void idle_task_exit(void) switch_mm_irqs_off(mm, &init_mm, current); finish_arch_post_lock_switch(); } @@ -124,7 +124,7 @@ index 55e398b5fdd9..07d582b038a3 100644 } /* -@@ -7324,6 +7334,10 @@ int sched_cpu_dying(unsigned int cpu) +@@ -7328,6 +7338,10 @@ int sched_cpu_dying(unsigned int cpu) BUG_ON(rq->nr_running != 1); raw_spin_unlock_irqrestore(&rq->lock, flags); calc_load_migrate(rq); |