diff options
author | Paul Mackerras <paulus@au1.ibm.com> | 2014-05-02 08:45:35 -0500 |
---|---|---|
committer | Eli Qiao <taget@linux.vnet.ibm.com> | 2014-05-04 10:55:22 +0800 |
commit | 1beb8571b373cf5c6019029071a52028807287f1 (patch) | |
tree | acc472c8133135a15fa2a94b40bf4d06cdf380d5 | |
parent | 2b71a05e56ad48cd52ec9b169df6dcbf961d9ed3 (diff) | |
download | powerkvm-1beb8571b373cf5c6019029071a52028807287f1.tar.gz |
PPC: KVM: fix dirty map for hugepages
The dirty map is system page (4K/64K) per bit, and when we populate dirty
map, we reset the Change bit in HPT which is expected to contains pages
less or equal to the system page size. This works until we start using
huge pages (16MB). In this case, we mark dirty just a single system page
and miss the rest of 16MB page which may be dirty as well.
This changes kvm_test_clear_dirty to return the actual number of pages
which is calculated from HPT entry.
This changes kvmppc_hv_get_dirty_log() to make pages dirty starting from
the rounded guest physical page number.
[paulus@samba.org - don't advance i in the loop to set dirty bits, so
that we make sure to clear C in all HPTEs.]
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Paul Mackerras <paulus@samba.org>
-rw-r--r-- | arch/powerpc/kvm/book3s_64_mmu_hv.c | 11 |
1 files changed, 7 insertions, 4 deletions
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index f7f8f5d93d81e..bccc8c39ce506 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -1083,7 +1083,8 @@ static int kvm_test_clear_dirty(struct kvm *kvm, unsigned long *rmapp) rev[i].guest_rpte |= HPTE_R_C; note_hpte_modification(kvm, &rev[i]); } - ret = 1; + ret = max(1UL, hpte_page_size(hptep[0], hptep[1]) >> + PAGE_SHIFT); } hptep[0] &= ~HPTE_V_HVLOCK; } while ((i = j) != head); @@ -1113,15 +1114,17 @@ static void harvest_vpa_dirty(struct kvmppc_vpa *vpa, long kvmppc_hv_get_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long *map) { - unsigned long i; + unsigned long i, j; unsigned long *rmapp; struct kvm_vcpu *vcpu; preempt_disable(); rmapp = memslot->arch.rmap; for (i = 0; i < memslot->npages; ++i) { - if (kvm_test_clear_dirty(kvm, rmapp) && map) - __set_bit_le(i, map); + int ret = kvm_test_clear_dirty(kvm, rmapp); + if (ret && map) + for (j = i - (i % ret); ret; ++j, --ret) + __set_bit_le(j, map); ++rmapp; } |