summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Gortmaker <paul.gortmaker@windriver.com>2011-02-06 22:58:57 -0500
committerPaul Gortmaker <paul.gortmaker@windriver.com>2011-02-06 22:58:57 -0500
commitf75544ca39ce50d90e619a50eecc0a81a08b59e9 (patch)
tree5845cf2b90af9a96687c723566cc3d1ef0e95d10
parentbadb2157987663aea0ecb1de42afbc399e77434b (diff)
downloadrt-patches-f75544ca39ce50d90e619a50eecc0a81a08b59e9.tar.gz
refresh mm-Restructure-slab-to-use-percpu-locked-data-struct.patchv2.6.33-5411-g64096c1
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
-rw-r--r--mm-Restructure-slab-to-use-percpu-locked-data-struct.patch152
1 files changed, 76 insertions, 76 deletions
diff --git a/mm-Restructure-slab-to-use-percpu-locked-data-struct.patch b/mm-Restructure-slab-to-use-percpu-locked-data-struct.patch
index 2e518e6..122cb9e 100644
--- a/mm-Restructure-slab-to-use-percpu-locked-data-struct.patch
+++ b/mm-Restructure-slab-to-use-percpu-locked-data-struct.patch
@@ -1,4 +1,4 @@
-From c9648d74893628a12dd677e871f763b92abb357b Mon Sep 17 00:00:00 2001
+From 1684df2ad25964a7fa192cefac10ea28b7f6946a Mon Sep 17 00:00:00 2001
From: Ingo Molnar <mingo@elte.hu>
Date: Fri, 3 Jul 2009 08:44:11 -0500
Subject: [PATCH] mm: Restructure slab to use percpu locked data structures
@@ -18,7 +18,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
diff --git a/mm/slab.c b/mm/slab.c
-index 7451bda..b99df7f 100644
+index a9f325b..3164b8d 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -121,6 +121,120 @@
@@ -164,7 +164,7 @@ index 7451bda..b99df7f 100644
}
static inline struct kmem_cache *__find_general_cachep(size_t size,
-@@ -942,7 +1057,7 @@ static int transfer_objects(struct array_cache *to,
+@@ -941,7 +1056,7 @@ static int transfer_objects(struct array_cache *to,
#ifndef CONFIG_NUMA
#define drain_alien_cache(cachep, alien) do { } while (0)
@@ -173,7 +173,7 @@ index 7451bda..b99df7f 100644
static inline struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
{
-@@ -953,27 +1068,28 @@ static inline void free_alien_cache(struct array_cache **ac_ptr)
+@@ -952,27 +1067,28 @@ static inline void free_alien_cache(struct array_cache **ac_ptr)
{
}
@@ -207,7 +207,7 @@ index 7451bda..b99df7f 100644
static struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
{
-@@ -1014,7 +1130,8 @@ static void free_alien_cache(struct array_cache **ac_ptr)
+@@ -1011,7 +1127,8 @@ static void free_alien_cache(struct array_cache **ac_ptr)
}
static void __drain_alien_cache(struct kmem_cache *cachep,
@@ -217,7 +217,7 @@ index 7451bda..b99df7f 100644
{
struct kmem_list3 *rl3 = cachep->nodelists[node];
-@@ -1028,7 +1145,7 @@ static void __drain_alien_cache(struct kmem_cache *cachep,
+@@ -1025,7 +1142,7 @@ static void __drain_alien_cache(struct kmem_cache *cachep,
if (rl3->shared)
transfer_objects(rl3->shared, ac, ac->limit);
@@ -226,7 +226,7 @@ index 7451bda..b99df7f 100644
ac->avail = 0;
spin_unlock(&rl3->list_lock);
}
-@@ -1037,38 +1154,42 @@ static void __drain_alien_cache(struct kmem_cache *cachep,
+@@ -1034,38 +1151,42 @@ static void __drain_alien_cache(struct kmem_cache *cachep,
/*
* Called from cache_reap() to regularly drain alien caches round robin.
*/
@@ -277,7 +277,7 @@ index 7451bda..b99df7f 100644
{
struct slab *slabp = virt_to_slab(objp);
int nodeid = slabp->nodeid;
-@@ -1076,7 +1197,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
+@@ -1073,7 +1194,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
struct array_cache *alien = NULL;
int node;
@@ -286,7 +286,7 @@ index 7451bda..b99df7f 100644
/*
* Make sure we are not freeing a object from another node to the array
-@@ -1092,20 +1213,20 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
+@@ -1089,20 +1210,20 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
spin_lock(&alien->lock);
if (unlikely(alien->avail == alien->limit)) {
STATS_INC_ACOVERFLOW(cachep);
@@ -310,7 +310,7 @@ index 7451bda..b99df7f 100644
{
struct kmem_cache *cachep;
struct kmem_list3 *l3 = NULL;
-@@ -1116,6 +1237,7 @@ static void __cpuinit cpuup_canceled(long cpu)
+@@ -1113,6 +1234,7 @@ static void __cpuinit cpuup_canceled(long cpu)
struct array_cache *nc;
struct array_cache *shared;
struct array_cache **alien;
@@ -318,7 +318,7 @@ index 7451bda..b99df7f 100644
/* cpu is dead; no one can alloc from it. */
nc = cachep->array[cpu];
-@@ -1130,7 +1252,7 @@ static void __cpuinit cpuup_canceled(long cpu)
+@@ -1127,7 +1249,7 @@ static void __cpuinit cpuup_canceled(long cpu)
/* Free limit for this kmem_list3 */
l3->free_limit -= cachep->batchcount;
if (nc)
@@ -327,7 +327,7 @@ index 7451bda..b99df7f 100644
if (!cpumask_empty(mask)) {
spin_unlock_irq(&l3->list_lock);
-@@ -1140,7 +1262,7 @@ static void __cpuinit cpuup_canceled(long cpu)
+@@ -1137,7 +1259,7 @@ static void __cpuinit cpuup_canceled(long cpu)
shared = l3->shared;
if (shared) {
free_block(cachep, shared->entry,
@@ -336,7 +336,7 @@ index 7451bda..b99df7f 100644
l3->shared = NULL;
}
-@@ -1156,6 +1278,7 @@ static void __cpuinit cpuup_canceled(long cpu)
+@@ -1153,6 +1275,7 @@ static void __cpuinit cpuup_canceled(long cpu)
}
free_array_cache:
kfree(nc);
@@ -344,7 +344,7 @@ index 7451bda..b99df7f 100644
}
/*
* In the previous loop, all the objects were freed to
-@@ -1170,7 +1293,7 @@ free_array_cache:
+@@ -1167,7 +1290,7 @@ free_array_cache:
}
}
@@ -353,7 +353,7 @@ index 7451bda..b99df7f 100644
{
struct kmem_cache *cachep;
struct kmem_list3 *l3 = NULL;
-@@ -1280,10 +1403,19 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
+@@ -1277,10 +1400,19 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
long cpu = (long)hcpu;
int err = 0;
@@ -373,7 +373,7 @@ index 7451bda..b99df7f 100644
err = cpuup_prepare(cpu);
mutex_unlock(&cache_chain_mutex);
break;
-@@ -1323,10 +1455,14 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
+@@ -1320,10 +1452,14 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
case CPU_UP_CANCELED:
case CPU_UP_CANCELED_FROZEN:
mutex_lock(&cache_chain_mutex);
@@ -388,7 +388,7 @@ index 7451bda..b99df7f 100644
return err ? NOTIFY_BAD : NOTIFY_OK;
}
-@@ -1513,32 +1649,34 @@ void __init kmem_cache_init(void)
+@@ -1510,32 +1646,34 @@ void __init kmem_cache_init(void)
/* 4) Replace the bootstrap head arrays */
{
struct array_cache *ptr;
@@ -430,7 +430,7 @@ index 7451bda..b99df7f 100644
}
/* 5) Replace the bootstrap kmem_list3's */
{
-@@ -1705,7 +1843,7 @@ static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
+@@ -1702,7 +1840,7 @@ static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
*addr++ = 0x12345678;
*addr++ = caller;
@@ -439,7 +439,7 @@ index 7451bda..b99df7f 100644
size -= 3 * sizeof(unsigned long);
{
unsigned long *sptr = &caller;
-@@ -1895,6 +2033,10 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slab
+@@ -1892,6 +2030,10 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slab
}
#endif
@@ -450,7 +450,7 @@ index 7451bda..b99df7f 100644
/**
* slab_destroy - destroy and release all objects in a slab
* @cachep: cache pointer being destroyed
-@@ -1904,7 +2046,8 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slab
+@@ -1901,7 +2043,8 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slab
* Before calling the slab must have been unlinked from the cache. The
* cache-lock is not held/needed.
*/
@@ -460,7 +460,7 @@ index 7451bda..b99df7f 100644
{
void *addr = slabp->s_mem - slabp->colouroff;
-@@ -1918,8 +2061,12 @@ static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
+@@ -1915,8 +2058,12 @@ static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
call_rcu(&slab_rcu->head, kmem_rcu_free);
} else {
kmem_freepages(cachep, addr);
@@ -475,7 +475,7 @@ index 7451bda..b99df7f 100644
}
}
-@@ -2016,6 +2163,8 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
+@@ -2013,6 +2160,8 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
{
@@ -484,7 +484,7 @@ index 7451bda..b99df7f 100644
if (g_cpucache_up == FULL)
return enable_cpucache(cachep, gfp);
-@@ -2059,10 +2208,12 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
+@@ -2056,10 +2205,12 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
jiffies + REAPTIMEOUT_LIST3 +
((unsigned long)cachep) % REAPTIMEOUT_LIST3;
@@ -501,7 +501,7 @@ index 7451bda..b99df7f 100644
cachep->batchcount = 1;
cachep->limit = BOOT_CPUCACHE_ENTRIES;
return 0;
-@@ -2374,19 +2525,19 @@ EXPORT_SYMBOL(kmem_cache_create);
+@@ -2371,19 +2522,19 @@ EXPORT_SYMBOL(kmem_cache_create);
#if DEBUG
static void check_irq_off(void)
{
@@ -528,7 +528,7 @@ index 7451bda..b99df7f 100644
#endif
}
-@@ -2401,34 +2552,67 @@ static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
+@@ -2398,34 +2549,67 @@ static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
#else
#define check_irq_off() do { } while(0)
#define check_irq_on() do { } while(0)
@@ -603,7 +603,7 @@ index 7451bda..b99df7f 100644
check_irq_on();
for_each_online_node(node) {
l3 = cachep->nodelists[node];
-@@ -2453,16 +2637,16 @@ static int drain_freelist(struct kmem_cache *cache,
+@@ -2450,16 +2634,16 @@ static int drain_freelist(struct kmem_cache *cache,
struct kmem_list3 *l3, int tofree)
{
struct list_head *p;
@@ -623,7 +623,7 @@ index 7451bda..b99df7f 100644
goto out;
}
-@@ -2471,13 +2655,9 @@ static int drain_freelist(struct kmem_cache *cache,
+@@ -2468,13 +2652,9 @@ static int drain_freelist(struct kmem_cache *cache,
BUG_ON(slabp->inuse);
#endif
list_del(&slabp->list);
@@ -639,7 +639,7 @@ index 7451bda..b99df7f 100644
nr_freed++;
}
out:
-@@ -2741,8 +2921,8 @@ static void slab_map_pages(struct kmem_cache *cache, struct slab *slab,
+@@ -2738,8 +2918,8 @@ static void slab_map_pages(struct kmem_cache *cache, struct slab *slab,
* Grow (by 1) the number of slabs within a cache. This is called by
* kmem_cache_alloc() when there are no active objs left in a cache.
*/
@@ -650,7 +650,7 @@ index 7451bda..b99df7f 100644
{
struct slab *slabp;
size_t offset;
-@@ -2770,8 +2950,7 @@ static int cache_grow(struct kmem_cache *cachep,
+@@ -2767,8 +2947,7 @@ static int cache_grow(struct kmem_cache *cachep,
offset *= cachep->colour_off;
@@ -660,7 +660,7 @@ index 7451bda..b99df7f 100644
/*
* The test for missing atomic flag is performed here, rather than
-@@ -2800,8 +2979,8 @@ static int cache_grow(struct kmem_cache *cachep,
+@@ -2797,8 +2976,8 @@ static int cache_grow(struct kmem_cache *cachep,
cache_init_objs(cachep, slabp);
@@ -671,7 +671,7 @@ index 7451bda..b99df7f 100644
check_irq_off();
spin_lock(&l3->list_lock);
-@@ -2814,8 +2993,7 @@ static int cache_grow(struct kmem_cache *cachep,
+@@ -2811,8 +2990,7 @@ static int cache_grow(struct kmem_cache *cachep,
opps1:
kmem_freepages(cachep, objp);
failed:
@@ -681,7 +681,7 @@ index 7451bda..b99df7f 100644
return 0;
}
-@@ -2937,7 +3115,8 @@ bad:
+@@ -2934,7 +3112,8 @@ bad:
#define check_slabp(x,y) do { } while(0)
#endif
@@ -691,7 +691,7 @@ index 7451bda..b99df7f 100644
{
int batchcount;
struct kmem_list3 *l3;
-@@ -2947,7 +3126,7 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
+@@ -2944,7 +3123,7 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
retry:
check_irq_off();
node = numa_node_id();
@@ -700,7 +700,7 @@ index 7451bda..b99df7f 100644
batchcount = ac->batchcount;
if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
/*
-@@ -2957,7 +3136,7 @@ retry:
+@@ -2954,7 +3133,7 @@ retry:
*/
batchcount = BATCHREFILL_LIMIT;
}
@@ -709,7 +709,7 @@ index 7451bda..b99df7f 100644
BUG_ON(ac->avail > 0 || !l3);
spin_lock(&l3->list_lock);
-@@ -2980,7 +3159,7 @@ retry:
+@@ -2979,7 +3158,7 @@ retry:
slabp = list_entry(entry, struct slab, list);
check_slabp(cachep, slabp);
@@ -718,7 +718,7 @@ index 7451bda..b99df7f 100644
/*
* The slab was either on partial or free list so
-@@ -2994,8 +3173,9 @@ retry:
+@@ -2993,8 +3172,9 @@ retry:
STATS_INC_ACTIVE(cachep);
STATS_SET_HIGH(cachep);
@@ -730,7 +730,7 @@ index 7451bda..b99df7f 100644
}
check_slabp(cachep, slabp);
-@@ -3014,10 +3194,10 @@ alloc_done:
+@@ -3013,10 +3193,10 @@ alloc_done:
if (unlikely(!ac->avail)) {
int x;
@@ -743,8 +743,8 @@ index 7451bda..b99df7f 100644
if (!x && ac->avail == 0) /* no objects in sight? abort */
return NULL;
-@@ -3104,26 +3284,27 @@ static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags)
- return should_failslab(obj_size(cachep), flags);
+@@ -3103,26 +3283,27 @@ static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags)
+ return should_failslab(obj_size(cachep), flags, cachep->flags);
}
-static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
@@ -775,7 +775,7 @@ index 7451bda..b99df7f 100644
}
/*
* To avoid a false negative, if an object that is in one of the
-@@ -3142,7 +3323,8 @@ static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
+@@ -3141,7 +3322,8 @@ static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
* If we are in_interrupt, then process context, including cpusets and
* mempolicy, may not apply and should not be used for allocation policy.
*/
@@ -785,7 +785,7 @@ index 7451bda..b99df7f 100644
{
int nid_alloc, nid_here;
-@@ -3154,7 +3336,7 @@ static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
+@@ -3153,7 +3335,7 @@ static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
else if (current->mempolicy)
nid_alloc = slab_node(current->mempolicy);
if (nid_alloc != nid_here)
@@ -794,7 +794,7 @@ index 7451bda..b99df7f 100644
return NULL;
}
-@@ -3166,7 +3348,7 @@ static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
+@@ -3165,7 +3347,7 @@ static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
* allocator to do its reclaim / fallback magic. We then insert the
* slab into the proper nodelist and then allocate from it.
*/
@@ -803,7 +803,7 @@ index 7451bda..b99df7f 100644
{
struct zonelist *zonelist;
gfp_t local_flags;
-@@ -3194,7 +3376,8 @@ retry:
+@@ -3193,7 +3375,8 @@ retry:
cache->nodelists[nid] &&
cache->nodelists[nid]->free_objects) {
obj = ____cache_alloc_node(cache,
@@ -813,7 +813,7 @@ index 7451bda..b99df7f 100644
if (obj)
break;
}
-@@ -3207,20 +3390,21 @@ retry:
+@@ -3206,20 +3389,21 @@ retry:
* We may trigger various forms of reclaim on the allowed
* set and go into memory reserves if necessary.
*/
@@ -842,7 +842,7 @@ index 7451bda..b99df7f 100644
if (!obj)
/*
* Another processor may allocate the
-@@ -3241,7 +3425,7 @@ retry:
+@@ -3240,7 +3424,7 @@ retry:
* A interface to enable slab creation on nodeid
*/
static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
@@ -851,7 +851,7 @@ index 7451bda..b99df7f 100644
{
struct list_head *entry;
struct slab *slabp;
-@@ -3289,11 +3473,11 @@ retry:
+@@ -3288,11 +3472,11 @@ retry:
must_grow:
spin_unlock(&l3->list_lock);
@@ -865,7 +865,7 @@ index 7451bda..b99df7f 100644
done:
return obj;
-@@ -3316,6 +3500,7 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
+@@ -3315,6 +3499,7 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
void *caller)
{
unsigned long save_flags;
@@ -873,7 +873,7 @@ index 7451bda..b99df7f 100644
void *ptr;
flags &= gfp_allowed_mask;
-@@ -3326,32 +3511,33 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
+@@ -3325,32 +3510,33 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
return NULL;
cache_alloc_debugcheck_before(cachep, flags);
@@ -914,7 +914,7 @@ index 7451bda..b99df7f 100644
ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
kmemleak_alloc_recursive(ptr, obj_size(cachep), 1, cachep->flags,
flags);
-@@ -3366,33 +3552,33 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
+@@ -3365,33 +3551,33 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
}
static __always_inline void *
@@ -956,7 +956,7 @@ index 7451bda..b99df7f 100644
}
#endif /* CONFIG_NUMA */
-@@ -3401,6 +3587,7 @@ static __always_inline void *
+@@ -3400,6 +3586,7 @@ static __always_inline void *
__cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
{
unsigned long save_flags;
@@ -964,7 +964,7 @@ index 7451bda..b99df7f 100644
void *objp;
flags &= gfp_allowed_mask;
-@@ -3411,9 +3598,9 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
+@@ -3410,9 +3597,9 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
return NULL;
cache_alloc_debugcheck_before(cachep, flags);
@@ -977,7 +977,7 @@ index 7451bda..b99df7f 100644
objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
kmemleak_alloc_recursive(objp, obj_size(cachep), 1, cachep->flags,
flags);
-@@ -3432,7 +3619,7 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
+@@ -3431,7 +3618,7 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
* Caller needs to acquire correct kmem_list's list_lock
*/
static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
@@ -986,7 +986,7 @@ index 7451bda..b99df7f 100644
{
int i;
struct kmem_list3 *l3;
-@@ -3461,7 +3648,7 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
+@@ -3460,7 +3647,7 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
* a different cache, refer to comments before
* alloc_slabmgmt.
*/
@@ -995,7 +995,7 @@ index 7451bda..b99df7f 100644
} else {
list_add(&slabp->list, &l3->slabs_free);
}
-@@ -3475,11 +3662,12 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
+@@ -3474,11 +3661,12 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
}
}
@@ -1010,7 +1010,7 @@ index 7451bda..b99df7f 100644
batchcount = ac->batchcount;
#if DEBUG
-@@ -3501,7 +3689,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
+@@ -3500,7 +3688,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
}
}
@@ -1019,7 +1019,7 @@ index 7451bda..b99df7f 100644
free_done:
#if STATS
{
-@@ -3530,9 +3718,9 @@ free_done:
+@@ -3529,9 +3717,9 @@ free_done:
* Release an obj back to its cache. If the obj has a constructed state, it must
* be in this state _before_ it is released. Called with disabled ints.
*/
@@ -1031,7 +1031,7 @@ index 7451bda..b99df7f 100644
check_irq_off();
kmemleak_free_recursive(objp, cachep->flags);
-@@ -3547,7 +3735,7 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp)
+@@ -3546,7 +3734,7 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp)
* variable to skip the call, which is mostly likely to be present in
* the cache.
*/
@@ -1040,7 +1040,7 @@ index 7451bda..b99df7f 100644
return;
if (likely(ac->avail < ac->limit)) {
-@@ -3556,7 +3744,7 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp)
+@@ -3555,7 +3743,7 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp)
return;
} else {
STATS_INC_FREEMISS(cachep);
@@ -1049,7 +1049,7 @@ index 7451bda..b99df7f 100644
ac->entry[ac->avail++] = objp;
}
}
-@@ -3755,13 +3943,14 @@ EXPORT_SYMBOL(__kmalloc);
+@@ -3754,13 +3942,14 @@ EXPORT_SYMBOL(__kmalloc);
void kmem_cache_free(struct kmem_cache *cachep, void *objp)
{
unsigned long flags;
@@ -1067,7 +1067,7 @@ index 7451bda..b99df7f 100644
trace_kmem_cache_free(_RET_IP_, objp);
}
-@@ -3780,18 +3969,19 @@ void kfree(const void *objp)
+@@ -3779,18 +3968,19 @@ void kfree(const void *objp)
{
struct kmem_cache *c;
unsigned long flags;
@@ -1090,7 +1090,7 @@ index 7451bda..b99df7f 100644
}
EXPORT_SYMBOL(kfree);
-@@ -3812,7 +4002,7 @@ EXPORT_SYMBOL_GPL(kmem_cache_name);
+@@ -3811,7 +4001,7 @@ EXPORT_SYMBOL_GPL(kmem_cache_name);
*/
static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp)
{
@@ -1099,7 +1099,7 @@ index 7451bda..b99df7f 100644
struct kmem_list3 *l3;
struct array_cache *new_shared;
struct array_cache **new_alien = NULL;
-@@ -3840,11 +4030,11 @@ static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp)
+@@ -3839,11 +4029,11 @@ static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp)
if (l3) {
struct array_cache *shared = l3->shared;
@@ -1113,7 +1113,7 @@ index 7451bda..b99df7f 100644
l3->shared = new_shared;
if (!l3->alien) {
-@@ -3853,7 +4043,7 @@ static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp)
+@@ -3852,7 +4042,7 @@ static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp)
}
l3->free_limit = (1 + nr_cpus_node(node)) *
cachep->batchcount + cachep->num;
@@ -1122,7 +1122,7 @@ index 7451bda..b99df7f 100644
kfree(shared);
free_alien_cache(new_alien);
continue;
-@@ -3900,24 +4090,36 @@ struct ccupdate_struct {
+@@ -3899,24 +4089,36 @@ struct ccupdate_struct {
struct array_cache *new[NR_CPUS];
};
@@ -1164,7 +1164,7 @@ index 7451bda..b99df7f 100644
new = kzalloc(sizeof(*new), gfp);
if (!new)
-@@ -3935,7 +4137,7 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
+@@ -3934,7 +4136,7 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
}
new->cachep = cachep;
@@ -1173,7 +1173,7 @@ index 7451bda..b99df7f 100644
check_irq_on();
cachep->batchcount = batchcount;
-@@ -3946,9 +4148,12 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
+@@ -3945,9 +4147,12 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
struct array_cache *ccold = new->new[i];
if (!ccold)
continue;
@@ -1189,7 +1189,7 @@ index 7451bda..b99df7f 100644
kfree(ccold);
}
kfree(new);
-@@ -4013,29 +4218,31 @@ static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
+@@ -4012,29 +4217,31 @@ static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
* Drain an array if it contains any elements taking the l3 lock only if
* necessary. Note that the l3 listlock also protects the array_cache
* if drain_array() is used on the shared array.
@@ -1228,7 +1228,7 @@ index 7451bda..b99df7f 100644
}
/**
-@@ -4052,10 +4259,11 @@ void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
+@@ -4051,10 +4258,11 @@ void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
*/
static void cache_reap(struct work_struct *w)
{
@@ -1241,7 +1241,7 @@ index 7451bda..b99df7f 100644
if (!mutex_trylock(&cache_chain_mutex))
/* Give up. Setup the next iteration. */
-@@ -4071,9 +4279,12 @@ static void cache_reap(struct work_struct *w)
+@@ -4070,9 +4278,12 @@ static void cache_reap(struct work_struct *w)
*/
l3 = searchp->nodelists[node];
@@ -1256,7 +1256,7 @@ index 7451bda..b99df7f 100644
/*
* These are racy checks but it does not matter
-@@ -4084,7 +4295,7 @@ static void cache_reap(struct work_struct *w)
+@@ -4083,7 +4294,7 @@ static void cache_reap(struct work_struct *w)
l3->next_reap = jiffies + REAPTIMEOUT_LIST3;
@@ -1265,7 +1265,7 @@ index 7451bda..b99df7f 100644
if (l3->free_touched)
l3->free_touched = 0;
-@@ -4103,7 +4314,8 @@ next:
+@@ -4102,7 +4313,8 @@ next:
next_reap_node();
out:
/* Set up the next iteration */
@@ -1275,7 +1275,7 @@ index 7451bda..b99df7f 100644
}
#ifdef CONFIG_SLABINFO
-@@ -4162,7 +4374,7 @@ static int s_show(struct seq_file *m, void *p)
+@@ -4161,7 +4373,7 @@ static int s_show(struct seq_file *m, void *p)
unsigned long num_slabs, free_objects = 0, shared_avail = 0;
const char *name;
char *error = NULL;
@@ -1284,7 +1284,7 @@ index 7451bda..b99df7f 100644
struct kmem_list3 *l3;
active_objs = 0;
-@@ -4173,7 +4385,7 @@ static int s_show(struct seq_file *m, void *p)
+@@ -4172,7 +4384,7 @@ static int s_show(struct seq_file *m, void *p)
continue;
check_irq_on();
@@ -1293,7 +1293,7 @@ index 7451bda..b99df7f 100644
list_for_each_entry(slabp, &l3->slabs_full, list) {
if (slabp->inuse != cachep->num && !error)
-@@ -4198,7 +4410,7 @@ static int s_show(struct seq_file *m, void *p)
+@@ -4197,7 +4409,7 @@ static int s_show(struct seq_file *m, void *p)
if (l3->shared)
shared_avail += l3->shared->avail;
@@ -1302,7 +1302,7 @@ index 7451bda..b99df7f 100644
}
num_slabs += active_slabs;
num_objs = num_slabs * cachep->num;
-@@ -4408,7 +4620,7 @@ static int leaks_show(struct seq_file *m, void *p)
+@@ -4407,7 +4619,7 @@ static int leaks_show(struct seq_file *m, void *p)
struct kmem_list3 *l3;
const char *name;
unsigned long *n = m->private;
@@ -1311,7 +1311,7 @@ index 7451bda..b99df7f 100644
int i;
if (!(cachep->flags & SLAB_STORE_USER))
-@@ -4426,13 +4638,13 @@ static int leaks_show(struct seq_file *m, void *p)
+@@ -4425,13 +4637,13 @@ static int leaks_show(struct seq_file *m, void *p)
continue;
check_irq_on();