aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2016-01-15 16:33:34 +0100
committerSebastian Andrzej Siewior <bigeasy@linutronix.de>2016-02-13 00:36:32 +0100
commit097db04d4710f9a4038045d6467bbcce1a21442a (patch)
treedf2fc26fc0512efde7159822a4ddcc78e83c3d8c
parent1ff51b10e6cabbf717c58a99b42ad520935bcf36 (diff)
downloadrt-linux-097db04d4710f9a4038045d6467bbcce1a21442a.tar.gz
net/core: protect users of napi_alloc_cache against reentrance
On -RT the code running in BH can not be moved to another CPU so CPU local variable remain local. However the code can be preempted and another task may enter BH accessing the same CPU using the same napi_alloc_cache variable. This patch ensures that each user of napi_alloc_cache uses a local lock. Cc: stable-rt@vger.kernel.org Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-rw-r--r--net/core/skbuff.c18
1 files changed, 14 insertions, 4 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 2086f9fe4532e4..53f10c2d7718fa 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -351,6 +351,7 @@ EXPORT_SYMBOL(build_skb);
static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
static DEFINE_PER_CPU(struct page_frag_cache, napi_alloc_cache);
static DEFINE_LOCAL_IRQ_LOCK(netdev_alloc_lock);
+static DEFINE_LOCAL_IRQ_LOCK(napi_alloc_cache_lock);
static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
{
@@ -380,9 +381,13 @@ EXPORT_SYMBOL(netdev_alloc_frag);
static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
{
- struct page_frag_cache *nc = this_cpu_ptr(&napi_alloc_cache);
+ struct page_frag_cache *nc;
+ void *data;
- return __alloc_page_frag(nc, fragsz, gfp_mask);
+ nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
+ data = __alloc_page_frag(nc, fragsz, gfp_mask);
+ put_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
+ return data;
}
void *napi_alloc_frag(unsigned int fragsz)
@@ -476,9 +481,10 @@ EXPORT_SYMBOL(__netdev_alloc_skb);
struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
gfp_t gfp_mask)
{
- struct page_frag_cache *nc = this_cpu_ptr(&napi_alloc_cache);
+ struct page_frag_cache *nc;
struct sk_buff *skb;
void *data;
+ bool pfmemalloc;
len += NET_SKB_PAD + NET_IP_ALIGN;
@@ -496,7 +502,11 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
if (sk_memalloc_socks())
gfp_mask |= __GFP_MEMALLOC;
+ nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
data = __alloc_page_frag(nc, len, gfp_mask);
+ pfmemalloc = nc->pfmemalloc;
+ put_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
+
if (unlikely(!data))
return NULL;
@@ -507,7 +517,7 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
}
/* use OR instead of assignment to avoid clearing of bits in mask */
- if (nc->pfmemalloc)
+ if (pfmemalloc)
skb->pfmemalloc = 1;
skb->head_frag = 1;