aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2018-03-23 09:29:05 +1100
committerMichael Ellerman <mpe@ellerman.id.au>2018-03-23 14:14:31 +1100
commitaff6f8cb3e2170b9e58b0932bce7bfb492775e23 (patch)
tree70b8f5db0da67c2a359166e793539dc1bdd9cdbd
parentff6781fd1bb404d8a551c02c35c70cec1da17ff1 (diff)
downloadtest-aff6f8cb3e2170b9e58b0932bce7bfb492775e23.tar.gz
powerpc/mm: Add tracking of the number of coprocessors using a context
Currently, when using coprocessors (which use the Nest MMU), we simply increment the active_cpu count to force all TLB invalidations to be come broadcast. Unfortunately, due to an errata in POWER9, we will need to know more specifically that coprocessors are in use. This maintains a separate copros counter in the MMU context for that purpose. NB. The commit mentioned in the fixes tag below is not at fault for the bug we're fixing in this commit and the next, but this fix applies on top the infrastructure it introduced. Fixes: 03b8abedf4f4 ("cxl: Enable global TLBIs for cxl contexts") Cc: stable@vger.kernel.org # v4.15+ Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Tested-by: Balbir Singh <bsingharora@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu.h3
-rw-r--r--arch/powerpc/include/asm/mmu_context.h18
-rw-r--r--arch/powerpc/mm/mmu_context_book3s64.c1
3 files changed, 17 insertions, 5 deletions
diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
index 0abeb0e2d616d5..37671feb2bf60f 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu.h
@@ -87,6 +87,9 @@ typedef struct {
/* Number of bits in the mm_cpumask */
atomic_t active_cpus;
+ /* Number of users of the external (Nest) MMU */
+ atomic_t copros;
+
/* NPU NMMU context */
struct npu_context *npu_context;
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index 051b3d63afe34b..3a15b6db950175 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -92,15 +92,23 @@ static inline void dec_mm_active_cpus(struct mm_struct *mm)
static inline void mm_context_add_copro(struct mm_struct *mm)
{
/*
- * On hash, should only be called once over the lifetime of
- * the context, as we can't decrement the active cpus count
- * and flush properly for the time being.
+ * If any copro is in use, increment the active CPU count
+ * in order to force TLB invalidations to be global as to
+ * propagate to the Nest MMU.
*/
- inc_mm_active_cpus(mm);
+ if (atomic_inc_return(&mm->context.copros) == 1)
+ inc_mm_active_cpus(mm);
}
static inline void mm_context_remove_copro(struct mm_struct *mm)
{
+ int c;
+
+ c = atomic_dec_if_positive(&mm->context.copros);
+
+ /* Detect imbalance between add and remove */
+ WARN_ON(c < 0);
+
/*
* Need to broadcast a global flush of the full mm before
* decrementing active_cpus count, as the next TLBI may be
@@ -111,7 +119,7 @@ static inline void mm_context_remove_copro(struct mm_struct *mm)
* for the time being. Invalidations will remain global if
* used on hash.
*/
- if (radix_enabled()) {
+ if (c == 0 && radix_enabled()) {
flush_all_mm(mm);
dec_mm_active_cpus(mm);
}
diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c
index 929d9ef7083f1a..3f980baade4c19 100644
--- a/arch/powerpc/mm/mmu_context_book3s64.c
+++ b/arch/powerpc/mm/mmu_context_book3s64.c
@@ -173,6 +173,7 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
mm_iommu_init(mm);
#endif
atomic_set(&mm->context.active_cpus, 0);
+ atomic_set(&mm->context.copros, 0);
return 0;
}