aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGlauber Costa <glommer@parallels.com>2012-04-16 13:54:27 -0300
committerGlauber Costa <glommer@parallels.com>2012-10-23 14:46:55 +0400
commitc98470c4f96480e34cd3907674dd246ff6f83867 (patch)
tree4294497746597a682037c5915113b5a15176fa22
parente2f67e12bcc995869684c84e20c2cc3e35e9eda1 (diff)
downloadmemcg-tmp-cache-free.tar.gz
sl[au]b: always get the cache from its page in kmem_cache_freetmp-cache-free
struct page already have this information. If we start chaining caches, this information will always be more trustworthy than whatever is passed into the function With this patch, we are also modifying __kmem_cache_create's signature, so the slub doesn't need to derive the page pointer again (even though it should be cache-hot...) [ v3: added parent testing with VM_BUG_ON ] [ v4: make it faster when kmemcg not in use ] [ v6: move it to slab_common.c ] Signed-off-by: Glauber Costa <glommer@parallels.com> CC: Christoph Lameter <cl@linux.com> CC: Pekka Enberg <penberg@cs.helsinki.fi> CC: Christoph Lameter <cl@linux.com> CC: Pekka Enberg <penberg@cs.helsinki.fi> CC: Michal Hocko <mhocko@suse.cz> CC: Kamezawa Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> CC: Johannes Weiner <hannes@cmpxchg.org> CC: Suleiman Souhlal <suleiman@google.com> CC: Tejun Heo <tj@kernel.org>
-rw-r--r--include/linux/memcontrol.h4
-rw-r--r--mm/slab.c4
-rw-r--r--mm/slab.h46
-rw-r--r--mm/slob.c4
-rw-r--r--mm/slub.c13
5 files changed, 56 insertions, 15 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 92fc47a112d0a2..f1ecb4f0ff02e0 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -534,6 +534,10 @@ static inline void sock_release_memcg(struct sock *sk)
{
}
+static inline bool memcg_kmem_enabled(void)
+{
+ return false;
+}
static inline bool
memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
{
diff --git a/mm/slab.c b/mm/slab.c
index bceffccedca0c6..afaa272f0bec2a 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -87,8 +87,10 @@
*/
#include <linux/slab.h>
-#include "slab.h"
+#include <linux/memcontrol.h>
#include <linux/mm.h>
+#include "slab.h"
+
#include <linux/poison.h>
#include <linux/swap.h>
#include <linux/cache.h>
diff --git a/mm/slab.h b/mm/slab.h
index da3692e26b4401..be25a02f964055 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -108,6 +108,13 @@ static inline bool cache_match_memcg(struct kmem_cache *cachep,
return (is_root_cache(cachep) && !memcg) ||
(cachep->memcg_params->memcg == memcg);
}
+
+static inline bool slab_equal_or_root(struct kmem_cache *s,
+ struct kmem_cache *p)
+{
+ return (p == s) ||
+ (s->memcg_params && (p == s->memcg_params->root_cache));
+}
#else
static inline bool is_root_cache(struct kmem_cache *s)
{
@@ -119,8 +126,41 @@ static inline bool cache_match_memcg(struct kmem_cache *cachep,
{
return true;
}
+
+static inline bool slab_equal_or_root(struct kmem_cache *s,
+ struct kmem_cache *p)
+{
+ return true;
+}
#endif
+static inline struct kmem_cache *translate_cache(struct kmem_cache *s, void *x)
+{
+ struct kmem_cache *cachep;
+ struct page *page;
+
+ /*
+ * When kmemcg is not being used, both assignments should return the
+ * same value. but we don't want to pay the assignment price in that
+ * case. If it is not compiled in, the compiler should be smart enough
+ * to not do even the assignment. In that case, slab_equal_or_root
+ * will also be a constant.
+ */
+ if (!memcg_kmem_enabled() && !unlikely(s->flags & SLAB_DEBUG_FREE))
+ return s;
+
+ page = virt_to_head_page(x);
+ cachep = page->slab_cache;
+
+ if (!slab_equal_or_root(cachep, s)) {
+ pr_err("%s: Wrong slab cache. %s but object is from %s\n",
+ __FUNCTION__, cachep->name, s->name);
+ WARN_ON_ONCE(1);
+ return NULL;
+ }
+
+ return cachep;
+}
/*
* What goes below for kmem_cache_free is not pretty. But because this
* is an extremely hot path, we would like to avoid function calls as
@@ -142,7 +182,11 @@ static inline bool cache_match_memcg(struct kmem_cache *cachep,
#define KMEM_CACHE_FREE(allocator_fn) \
void kmem_cache_free(struct kmem_cache *s, void *x) \
{ \
- allocator_fn(s, x); \
+ struct kmem_cache *cachep; \
+ cachep = translate_cache(s, x); \
+ if (!cachep) \
+ return; \
+ allocator_fn(cachep, x); \
trace_kmem_cache_free(_RET_IP_, x); \
} \
EXPORT_SYMBOL(kmem_cache_free)
diff --git a/mm/slob.c b/mm/slob.c
index 15803712b2c2f9..b1acd0beddb7c3 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -59,9 +59,11 @@
#include <linux/kernel.h>
#include <linux/slab.h>
-#include "slab.h"
+#include <linux/memcontrol.h>
#include <linux/mm.h>
+#include "slab.h"
+
#include <linux/swap.h> /* struct reclaim_state */
#include <linux/cache.h>
#include <linux/init.h>
diff --git a/mm/slub.c b/mm/slub.c
index 686555e85b03c7..9969b0cec6f828 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2609,18 +2609,7 @@ redo:
static __always_inline void __kmem_cache_free(struct kmem_cache *s, void *x)
{
- struct page *page;
-
- page = virt_to_head_page(x);
-
- if (kmem_cache_debug(s) && page->slab_cache != s) {
- pr_err("kmem_cache_free: Wrong slab cache. %s but object"
- " is from %s\n", page->slab_cache->name, s->name);
- WARN_ON_ONCE(1);
- return;
- }
-
- slab_free(s, page, x, _RET_IP_);
+ slab_free(s, virt_to_head_page(x), x, _RET_IP_);
}
KMEM_CACHE_FREE(__kmem_cache_free);