aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2022-12-19 14:47:42 -0500
committerKent Overstreet <kent.overstreet@linux.dev>2022-12-19 14:47:42 -0500
commite2670a38d1ad6038d64687cb1d585349508e06d7 (patch)
tree070543582e146c33280a5dbeadbc9c8c79ac2005
parent71111771690f244d13650c73d52ff601ad914d95 (diff)
downloadbcachefs-tools-e2670a38d1ad6038d64687cb1d585349508e06d7.tar.gz
Change memory reclaim
- Spin up a background thread to call the shrinkers every 1 second - Memory allocations will only call reclaim after a failed allocation, not every single time This will be a major performance boost on allocation intensive workloads. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
-rw-r--r--include/linux/slab.h56
-rw-r--r--linux/shrinker.c29
2 files changed, 63 insertions, 22 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 17fe235e..cf48570c 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -20,12 +20,10 @@
static inline void *kmalloc(size_t size, gfp_t flags)
{
- unsigned i = 0;
+ unsigned i;
void *p;
- do {
- run_shrinkers(flags, i != 0);
-
+ for (i = 0; i < 10; i++) {
if (size) {
size_t alignment = min(rounddown_pow_of_two(size), (size_t)PAGE_SIZE);
alignment = max(sizeof(void *), alignment);
@@ -34,9 +32,15 @@ static inline void *kmalloc(size_t size, gfp_t flags)
} else {
p = malloc(0);
}
- if (p && (flags & __GFP_ZERO))
- memset(p, 0, size);
- } while (!p && i++ < 10);
+
+ if (p) {
+ if (flags & __GFP_ZERO)
+ memset(p, 0, size);
+ break;
+ }
+
+ run_shrinkers(flags, true);
+ }
return p;
}
@@ -93,16 +97,20 @@ static inline void *krealloc_array(void *p, size_t new_n, size_t new_size, gfp_t
static inline struct page *alloc_pages(gfp_t flags, unsigned int order)
{
size_t size = PAGE_SIZE << order;
- unsigned i = 0;
+ unsigned i;
void *p;
- do {
- run_shrinkers(flags, i != 0);
-
+ for (i = 0; i < 10; i++) {
p = aligned_alloc(PAGE_SIZE, size);
- if (p && (flags & __GFP_ZERO))
- memset(p, 0, size);
- } while (!p && i++ < 10);
+
+ if (p) {
+ if (flags & __GFP_ZERO)
+ memset(p, 0, size);
+ break;
+ }
+
+ run_shrinkers(flags, true);
+ }
return p;
}
@@ -193,20 +201,24 @@ static inline struct kmem_cache *kmem_cache_create(size_t obj_size)
#define vfree(p) free(p)
-static inline void *__vmalloc(unsigned long size, gfp_t gfp_mask)
+static inline void *__vmalloc(unsigned long size, gfp_t flags)
{
- unsigned i = 0;
+ unsigned i;
void *p;
size = round_up(size, PAGE_SIZE);
- do {
- run_shrinkers(gfp_mask, i != 0);
-
+ for (i = 0; i < 10; i++) {
p = aligned_alloc(PAGE_SIZE, size);
- if (p && gfp_mask & __GFP_ZERO)
- memset(p, 0, size);
- } while (!p && i++ < 10);
+
+ if (p) {
+ if (flags & __GFP_ZERO)
+ memset(p, 0, size);
+ break;
+ }
+
+ run_shrinkers(flags, true);
+ }
return p;
}
diff --git a/linux/shrinker.c b/linux/shrinker.c
index 23e288d8..0b5715b3 100644
--- a/linux/shrinker.c
+++ b/linux/shrinker.c
@@ -1,6 +1,7 @@
#include <stdio.h>
+#include <linux/kthread.h>
#include <linux/list.h>
#include <linux/mm.h>
#include <linux/mutex.h>
@@ -126,3 +127,31 @@ void run_shrinkers(gfp_t gfp_mask, bool allocation_failed)
}
mutex_unlock(&shrinker_lock);
}
+
+static int shrinker_thread(void *arg)
+{
+ while (!kthread_should_stop()) {
+ sleep(1);
+ run_shrinkers(GFP_KERNEL, false);
+ }
+
+ return 0;
+}
+
+struct task_struct *shrinker_task;
+
+__attribute__((constructor(103)))
+static void shrinker_thread_init(void)
+{
+ shrinker_task = kthread_run(shrinker_thread, NULL, "shrinkers");
+ BUG_ON(IS_ERR(shrinker_task));
+}
+
+__attribute__((destructor(103)))
+static void shrinker_thread_exit(void)
+{
+ int ret = kthread_stop(shrinker_task);
+ BUG_ON(ret);
+
+ shrinker_task = NULL;
+}