aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndy Lutomirski <luto@kernel.org>2021-11-05 21:20:47 -0700
committerAndy Lutomirski <luto@kernel.org>2021-11-05 21:20:47 -0700
commit54b675d9b28d9a56289d06a813250472bc621f40 (patch)
treed3f2f68e1f83399edb2df20b788d23c84233ae55
parent0304e8588dd4cf6d73a7553b9913e80758f3f399 (diff)
downloadlinux-sched/bad_lazymm.tar.gz
[HACK] demonstrate lazy tlb issuessched/bad_lazymm
-rw-r--r--arch/Kconfig1
-rw-r--r--kernel/sched/core.c7
2 files changed, 8 insertions, 0 deletions
diff --git a/arch/Kconfig b/arch/Kconfig
index cca27f1b5d0edb..19f273642d8fa6 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -442,6 +442,7 @@ config ARCH_WANT_IRQS_OFF_ACTIVATE_MM
config MMU_LAZY_TLB_REFCOUNT
def_bool y
depends on !MMU_LAZY_TLB_SHOOTDOWN
+ depends on !X86
# This option allows MMU_LAZY_TLB_REFCOUNT=n. It ensures no CPUs are using an
# mm as a lazy tlb beyond its last reference count, by shooting down these
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 25dd795497e818..c5a0c1e92524cf 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4903,6 +4903,13 @@ context_switch(struct rq *rq, struct task_struct *prev,
arch_start_context_switch(prev);
/*
+ * Sanity check: if something went wrong and the previous mm was
+ * freed while we were still using it, KASAN might not notice
+ * without help.
+ */
+ kasan_check_byte(prev->active_mm);
+
+ /*
* kernel -> kernel lazy + transfer active
* user -> kernel lazy + mmgrab_lazy_tlb() active
*