aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/slb_low.S
diff options
context:
space:
mode:
authorMichael Ellerman <mpe@ellerman.id.au>2017-08-01 20:29:24 +1000
committerMichael Ellerman <mpe@ellerman.id.au>2017-08-08 19:37:05 +1000
commit21a0e8c14bf61472723d2acc83f98ab35ff321b4 (patch)
tree5361d962f4421ae4a3b0c78df161c8362077f793 /arch/powerpc/mm/slb_low.S
parentb5048de04b32104140e5b251005404c3e0d03ccd (diff)
downloadlinux-21a0e8c14bf61472723d2acc83f98ab35ff321b4.tar.gz
powerpc/mm/hash64: Make vmalloc 56T on hash
On 64-bit book3s, with the hash MMU, we currently define the kernel virtual space (vmalloc, ioremap etc.), to be 16T in size. This is a leftover from pre v3.7 when our user VM was also 16T. Of that 16T we split it 50/50, with half used for PCI IO and ioremap and the other 8T for vmalloc. We never bothered to make it any bigger because 8T of vmalloc ought to be enough for anybody. But it turns out that's not true, the per cpu allocator wants large amounts of vmalloc space, not to make large allocations, but to allow a large stride between allocations, because we use pcpu_embed_first_chunk(). With a bit of juggling we can increase the entire kernel virtual space to 64T. The only real complication is the check of the address in the SLB miss handler, see the comment in the code. Although we could continue to split virtual space 50/50 as we do now, no one seems to be running out of PCI IO or ioremap space. So instead keep that as 8T, and use the remaining 56T for vmalloc. In future we should be able to increase the kernel virtual space to 512T, the code already supports that, it just needs testing on older hardware. Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Diffstat (limited to 'arch/powerpc/mm/slb_low.S')
-rw-r--r--arch/powerpc/mm/slb_low.S18
1 files changed, 15 insertions, 3 deletions
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index 2eb1b92a68ff43..906a86fe457bb3 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -121,9 +121,21 @@ slb_miss_kernel_load_vmemmap:
1:
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
- clrldi r11,r10,48
- cmpldi r11,(H_VMALLOC_SIZE >> 28) - 1
- bgt 5f
+ /*
+ * r10 contains the ESID, which is the original faulting EA shifted
+ * right by 28 bits. We need to compare that with (H_VMALLOC_END >> 28)
+ * which is 0xd00038000. That can't be used as an immediate, even if we
+ * ignored the 0xd, so we have to load it into a register, and we only
+ * have one register free. So we must load all of (H_VMALLOC_END >> 28)
+ * into a register and compare ESID against that.
+ */
+ lis r11,(H_VMALLOC_END >> 32)@h // r11 = 0xffffffffd0000000
+ ori r11,r11,(H_VMALLOC_END >> 32)@l // r11 = 0xffffffffd0003800
+ // Rotate left 4, then mask with 0xffffffff0
+ rldic r11,r11,4,28 // r11 = 0xd00038000
+ cmpld r10,r11 // if r10 >= r11
+ bge 5f // goto io_mapping
+
/*
* vmalloc mapping gets the encoding from the PACA as the mapping
* can be demoted from 64K -> 4K dynamically on some machines.