diff options
author | Frederic Weisbecker <frederic@kernel.org> | 2024-04-21 23:34:50 +0200 |
---|---|---|
committer | Frederic Weisbecker <frederic@kernel.org> | 2024-04-21 23:47:39 +0200 |
commit | 4bf6e6afb803e5862efb2cc8f2585d10e2f597f2 (patch) | |
tree | f30e1533469f26453bfc6b5fb52e50ac5566a62f | |
parent | b5ed7ff8dcb5e3c63e9e3efd32f62ee871ba3f1e (diff) | |
download | linux-dynticks-task/work.tar.gz |
mm: Drain LRUs upon resume to userspacetask/work
LRUs can be drained with several ways. One of them may add disturbances
to isolated workloads while queuing a work at any time to any target,
whether running in nohz_full mode or not.
Prevent from that with draining LRUs upon resuming to userspace using
the isolated task work framework.
It's worth noting that this is inherently racy against
lru_add_drain_all() remotely queueing the per CPU drain work and
therefore it prevents from the undesired disturbance only
*most of the time*.
Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
-rw-r--r-- | include/linux/swap.h | 1 | ||||
-rw-r--r-- | kernel/sched/isolation.c | 1 | ||||
-rw-r--r-- | mm/swap.c | 5 |
3 files changed, 6 insertions, 1 deletions
diff --git a/include/linux/swap.h b/include/linux/swap.h index f53d608daa013..283ad33eb60d4 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -383,6 +383,7 @@ extern void lru_add_drain(void); extern void lru_add_drain_cpu(int cpu); extern void lru_add_drain_cpu_zone(struct zone *zone); extern void lru_add_drain_all(void); +extern void lru_add_and_bh_lrus_drain(void); void folio_deactivate(struct folio *folio); void folio_mark_lazyfree(struct folio *folio); extern void swap_setup(void); diff --git a/kernel/sched/isolation.c b/kernel/sched/isolation.c index fcc1e91ede071..462076fa9150f 100644 --- a/kernel/sched/isolation.c +++ b/kernel/sched/isolation.c @@ -243,6 +243,7 @@ __setup("isolcpus=", housekeeping_isolcpus_setup); #if defined(CONFIG_NO_HZ_FULL) && defined(CONFIG_HAVE_POSIX_CPU_TIMERS_TASK_WORK) static void isolated_task_work(struct callback_head *head) { + lru_add_and_bh_lrus_drain(); } int __isolated_task_work_queue(void) diff --git a/mm/swap.c b/mm/swap.c index 500a09a48dfd3..e4dc998a470eb 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -37,6 +37,7 @@ #include <linux/page_idle.h> #include <linux/local_lock.h> #include <linux/buffer_head.h> +#include <linux/sched/isolation.h> #include "internal.h" @@ -531,6 +532,8 @@ void folio_add_lru(struct folio *folio) fbatch = this_cpu_ptr(&cpu_fbatches.lru_add); folio_batch_add_and_move(fbatch, folio, lru_add_fn); local_unlock(&cpu_fbatches.lock); + + isolated_task_work_queue(); } EXPORT_SYMBOL(folio_add_lru); @@ -775,7 +778,7 @@ void lru_add_drain(void) * the same cpu. It shouldn't be a problem in !SMP case since * the core is only one and the locks will disable preemption. */ -static void lru_add_and_bh_lrus_drain(void) +void lru_add_and_bh_lrus_drain(void) { local_lock(&cpu_fbatches.lock); lru_add_drain_cpu(smp_processor_id()); |