aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoffer Dall <christoffer.dall@arm.com>2019-01-28 00:06:21 -0800
committerChristoffer Dall <christoffer.dall@arm.com>2019-11-21 09:15:24 +0100
commit7fc38b69db0a61f07b77b102744622f0f565db9a (patch)
tree3984cee130525ba3339b16604754c21fa07e7982
parent0f95a316ec20a821f70f1ef60aadf53e31557c14 (diff)
downloadlinux-mmu-cache-unify-v5-test2.tar.gz
KVM: mips: Move to common kvm_mmu_memcache infrastructuremmu-cache-unify-v5-test2
Now that we have a common infrastructure for doing MMU cache allocations, use this for mips as well. This will change the GFP flags used for mips from plain GFP_KERNEL to GFP_PGTABLE_USER. This means that mips KVM page table allocations now gain __GFP_ACCOUNT and __GFP_ZERO. There should be no harm in the former, and while the latter might result in slight overhead for zeroing the page, it seems this is what a hypervisor should do on page table allocations. Signed-off-by: Christoffer Dall <christoffer.dall@arm.com>
-rw-r--r--arch/mips/include/asm/kvm_host.h15
-rw-r--r--arch/mips/include/asm/kvm_types.h4
-rw-r--r--arch/mips/kvm/mips.c2
-rw-r--r--arch/mips/kvm/mmu.c54
4 files changed, 17 insertions, 58 deletions
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 41204a49cf95e..418c941f1382f 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -293,17 +293,6 @@ struct kvm_mips_tlb {
long tlb_lo[2];
};
-#define KVM_NR_MEM_OBJS 4
-
-/*
- * We don't want allocation failures within the mmu code, so we preallocate
- * enough memory for a single page fault in a cache.
- */
-struct kvm_mmu_memory_cache {
- int nobjs;
- void *objects[KVM_NR_MEM_OBJS];
-};
-
#define KVM_MIPS_AUX_FPU 0x1
#define KVM_MIPS_AUX_MSA 0x2
@@ -378,7 +367,7 @@ struct kvm_vcpu_arch {
unsigned int last_user_gasid;
/* Cache some mmu pages needed inside spinlock regions */
- struct kvm_mmu_memory_cache mmu_page_cache;
+ struct kvm_mmu_memcache mmu_page_cache;
#ifdef CONFIG_KVM_MIPS_VZ
/* vcpu's vzguestid is different on each host cpu in an smp system */
@@ -915,7 +904,7 @@ void kvm_mips_flush_gva_pt(pgd_t *pgd, enum kvm_mips_flush flags);
bool kvm_mips_flush_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn);
int kvm_mips_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn);
pgd_t *kvm_pgd_alloc(void);
-void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
+void kvm_mmu_free_memcaches(struct kvm_vcpu *vcpu);
void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr,
bool user);
void kvm_trap_emul_gva_lockless_begin(struct kvm_vcpu *vcpu);
diff --git a/arch/mips/include/asm/kvm_types.h b/arch/mips/include/asm/kvm_types.h
index 5efeb32a59269..c7b906568a0e5 100644
--- a/arch/mips/include/asm/kvm_types.h
+++ b/arch/mips/include/asm/kvm_types.h
@@ -2,4 +2,8 @@
#ifndef _ASM_MIPS_KVM_TYPES_H
#define _ASM_MIPS_KVM_TYPES_H
+#define KVM_ARCH_WANT_MMU_MEMCACHE
+
+#define KVM_MMU_NR_MEMCACHE_OBJS 4
+
#endif /* _ASM_MIPS_KVM_TYPES_H */
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 1109924560d8c..8bf12ed539b55 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -415,7 +415,7 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
kvm_mips_dump_stats(vcpu);
- kvm_mmu_free_memory_caches(vcpu);
+ kvm_mmu_free_memcaches(vcpu);
kfree(vcpu->arch.guest_ebase);
kfree(vcpu->arch.kseg0_commpage);
kfree(vcpu);
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
index 97e538a8c1be2..aed5284d642e2 100644
--- a/arch/mips/kvm/mmu.c
+++ b/arch/mips/kvm/mmu.c
@@ -25,41 +25,9 @@
#define KVM_MMU_CACHE_MIN_PAGES 2
#endif
-static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
- int min, int max)
+void kvm_mmu_free_memcaches(struct kvm_vcpu *vcpu)
{
- void *page;
-
- BUG_ON(max > KVM_NR_MEM_OBJS);
- if (cache->nobjs >= min)
- return 0;
- while (cache->nobjs < max) {
- page = (void *)__get_free_page(GFP_KERNEL);
- if (!page)
- return -ENOMEM;
- cache->objects[cache->nobjs++] = page;
- }
- return 0;
-}
-
-static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
-{
- while (mc->nobjs)
- free_page((unsigned long)mc->objects[--mc->nobjs]);
-}
-
-static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
-{
- void *p;
-
- BUG_ON(!mc || !mc->nobjs);
- p = mc->objects[--mc->nobjs];
- return p;
-}
-
-void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
-{
- mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
+ kvm_mmu_free_memcache_page(&vcpu->arch.mmu_page_cache);
}
/**
@@ -133,7 +101,7 @@ pgd_t *kvm_pgd_alloc(void)
* NULL if a page table doesn't exist for @addr and !@cache.
* NULL if a page table allocation failed.
*/
-static pte_t *kvm_mips_walk_pgd(pgd_t *pgd, struct kvm_mmu_memory_cache *cache,
+static pte_t *kvm_mips_walk_pgd(pgd_t *pgd, struct kvm_mmu_memcache *cache,
unsigned long addr)
{
pud_t *pud;
@@ -151,7 +119,7 @@ static pte_t *kvm_mips_walk_pgd(pgd_t *pgd, struct kvm_mmu_memory_cache *cache,
if (!cache)
return NULL;
- new_pmd = mmu_memory_cache_alloc(cache);
+ new_pmd = kvm_mmu_memcache_alloc(cache);
pmd_init((unsigned long)new_pmd,
(unsigned long)invalid_pte_table);
pud_populate(NULL, pud, new_pmd);
@@ -162,7 +130,7 @@ static pte_t *kvm_mips_walk_pgd(pgd_t *pgd, struct kvm_mmu_memory_cache *cache,
if (!cache)
return NULL;
- new_pte = mmu_memory_cache_alloc(cache);
+ new_pte = kvm_mmu_memcache_alloc(cache);
clear_page(new_pte);
pmd_populate_kernel(NULL, pmd, new_pte);
}
@@ -171,7 +139,7 @@ static pte_t *kvm_mips_walk_pgd(pgd_t *pgd, struct kvm_mmu_memory_cache *cache,
/* Caller must hold kvm->mm_lock */
static pte_t *kvm_mips_pte_for_gpa(struct kvm *kvm,
- struct kvm_mmu_memory_cache *cache,
+ struct kvm_mmu_memcache *cache,
unsigned long addr)
{
return kvm_mips_walk_pgd(kvm->arch.gpa_mm.pgd, cache, addr);
@@ -688,7 +656,7 @@ static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,
pte_t *out_entry, pte_t *out_buddy)
{
struct kvm *kvm = vcpu->kvm;
- struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
+ struct kvm_mmu_memcache *memcache = &vcpu->arch.mmu_page_cache;
gfn_t gfn = gpa >> PAGE_SHIFT;
int srcu_idx, err;
kvm_pfn_t pfn;
@@ -705,8 +673,7 @@ static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,
goto out;
/* We need a minimum of cached pages ready for page table creation */
- err = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES,
- KVM_NR_MEM_OBJS);
+ err = kvm_mmu_topup_memcache_page(memcache, KVM_MMU_CACHE_MIN_PAGES);
if (err)
goto out;
@@ -785,13 +752,12 @@ out:
static pte_t *kvm_trap_emul_pte_for_gva(struct kvm_vcpu *vcpu,
unsigned long addr)
{
- struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
+ struct kvm_mmu_memcache *memcache = &vcpu->arch.mmu_page_cache;
pgd_t *pgdp;
int ret;
/* We need a minimum of cached pages ready for page table creation */
- ret = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES,
- KVM_NR_MEM_OBJS);
+ ret = kvm_mmu_topup_memcache_page(memcache, KVM_MMU_CACHE_MIN_PAGES);
if (ret)
return NULL;