aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-10-03 10:05:12 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-10-03 10:05:12 -0700
commitff93026d51935b671b4ac131886f0914f8b6189a (patch)
tree03bbcdb1eb122fbe830e0517e570112aaf2e5412
parentc1de1591d927bc1e55095d642b3c3cc2e54fc1cd (diff)
parent1fa4df3e688902d033dfda796eb83ae6ad8d0488 (diff)
downloadx86-kaiser-ff93026d51935b671b4ac131886f0914f8b6189a.tar.gz
Merge branch 'for-4.14-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
Pull percpu fixes from Tejun Heo: "Rather important fixes this time. - The new percpu area allocator had a subtle bug in how it iterates the memory regions and could skip viable areas, which led to allocation failures for module static percpu variables. Dennis fixed the bug and another non-critical one in stat calculation. - Mark noticed that the generic implementations of percpu local atomic reads aren't properly protected against irqs and there's a (slim) chance for split reads on some 32bit systems. Generic implementations are updated to disable irq when read size is larger than ulong size. This may have made some 32bit archs which can do atomic local 64bit accesses generate sub-optimal code. We need to find them out and implement arch-specific overrides" * 'for-4.14-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: percpu: fix iteration to prevent skipping over block percpu: fix starting offset for chunk statistics traversal percpu: make this_cpu_generic_read() atomic w.r.t. interrupts
-rw-r--r--include/asm-generic/percpu.h24
-rw-r--r--mm/percpu-stats.c2
-rw-r--r--mm/percpu.c4
3 files changed, 27 insertions, 3 deletions
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index 0504ef8f3aa31d..976f8ac26665b3 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -115,15 +115,35 @@ do { \
(__ret); \
})
-#define this_cpu_generic_read(pcp) \
+#define __this_cpu_generic_read_nopreempt(pcp) \
({ \
typeof(pcp) __ret; \
preempt_disable_notrace(); \
- __ret = raw_cpu_generic_read(pcp); \
+ __ret = READ_ONCE(*raw_cpu_ptr(&(pcp))); \
preempt_enable_notrace(); \
__ret; \
})
+#define __this_cpu_generic_read_noirq(pcp) \
+({ \
+ typeof(pcp) __ret; \
+ unsigned long __flags; \
+ raw_local_irq_save(__flags); \
+ __ret = raw_cpu_generic_read(pcp); \
+ raw_local_irq_restore(__flags); \
+ __ret; \
+})
+
+#define this_cpu_generic_read(pcp) \
+({ \
+ typeof(pcp) __ret; \
+ if (__native_word(pcp)) \
+ __ret = __this_cpu_generic_read_nopreempt(pcp); \
+ else \
+ __ret = __this_cpu_generic_read_noirq(pcp); \
+ __ret; \
+})
+
#define this_cpu_generic_to_op(pcp, val, op) \
do { \
unsigned long __flags; \
diff --git a/mm/percpu-stats.c b/mm/percpu-stats.c
index 6142484e88f79c..7a58460bfd27a3 100644
--- a/mm/percpu-stats.c
+++ b/mm/percpu-stats.c
@@ -73,7 +73,7 @@ static void chunk_map_stats(struct seq_file *m, struct pcpu_chunk *chunk,
last_alloc + 1 : 0;
as_len = 0;
- start = chunk->start_offset;
+ start = chunk->start_offset / PCPU_MIN_ALLOC_SIZE;
/*
* If a bit is set in the allocation map, the bound_map identifies
diff --git a/mm/percpu.c b/mm/percpu.c
index 59d44d61f5f198..aa121cef76de33 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -353,6 +353,8 @@ static void pcpu_next_md_free_region(struct pcpu_chunk *chunk, int *bit_off,
block->contig_hint_start);
return;
}
+ /* reset to satisfy the second predicate above */
+ block_off = 0;
*bits = block->right_free;
*bit_off = (i + 1) * PCPU_BITMAP_BLOCK_BITS - block->right_free;
@@ -407,6 +409,8 @@ static void pcpu_next_fit_region(struct pcpu_chunk *chunk, int alloc_bits,
*bit_off = pcpu_block_off_to_off(i, block->first_free);
return;
}
+ /* reset to satisfy the second predicate above */
+ block_off = 0;
*bit_off = ALIGN(PCPU_BITMAP_BLOCK_BITS - block->right_free,
align);