aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexey Kardashevskiy <aik@ozlabs.ru>2014-01-06 18:07:58 +1100
committerEli Qiao <taget@linux.vnet.ibm.com>2014-01-06 15:58:16 +0800
commit23fa06c76f06d0a570d6858b206368211b0d8f60 (patch)
treee919c089b186fc6eaa3a9f5491f8e384ab6307ea
parentaf4c301bd5b700f62597bcdf8e6f66bd2fd65db9 (diff)
downloadpowerkvm-23fa06c76f06d0a570d6858b206368211b0d8f60.tar.gz
vfio: use VFIO KVM device for real/virtual mode TCE hypercalls
The upstream kernel got a VFIO KVM device which we support on PPC64 to associate LIOBNs with IOMMU groups. This moves the existing real/virtual mode handlers to newer codebase. Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
-rw-r--r--arch/powerpc/include/asm/kvm_host.h10
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h2
-rw-r--r--arch/powerpc/kvm/book3s.c2
-rw-r--r--arch/powerpc/kvm/book3s_64_vio.c127
-rw-r--r--arch/powerpc/kvm/book3s_64_vio_hv.c74
5 files changed, 160 insertions, 55 deletions
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index f1fa962fcb8e7b..160faad31e8cf6 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -232,6 +232,14 @@ struct kvmppc_spapr_iommu_hugepage {
unsigned long size; /* Huge page size (always 16MB at the moment) */
};
+#define KVMPPC_SPAPR_IOMMU_GRP_HASH(liobn) hash_32(liobn, 32)
+
+struct kvmppc_spapr_iommu_grp {
+ struct hlist_node hash_node;
+ unsigned long liobn;
+ struct iommu_group *grp;
+};
+
/* XICS components, defined in book3s_xics.c */
struct kvmppc_xics;
struct kvmppc_icp;
@@ -310,6 +318,8 @@ struct kvm_arch {
struct list_head rtas_tokens;
DECLARE_HASHTABLE(hugepages_hash_tab, ilog2(64));
spinlock_t hugepages_write_lock;
+ DECLARE_HASHTABLE(iommu_grp_hash_tab, ilog2(4));
+ spinlock_t iommu_grp_write_lock;
#endif
#ifdef CONFIG_KVM_MPIC
struct openpic *mpic;
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index dd4a00d1c44fc6..661f12eedf8996 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -126,6 +126,8 @@ extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
extern void kvmppc_iommu_hugepages_init(struct kvm_arch *ka);
extern void kvmppc_iommu_hugepages_cleanup(struct kvm_arch *ka);
+extern void kvmppc_iommu_iommu_grp_init(struct kvm_arch *ka);
+extern void kvmppc_iommu_iommu_grp_cleanup(struct kvm_arch *ka);
extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
struct kvm_create_spapr_tce *args);
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 39779096b298d2..97d224ca03b035 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -862,6 +862,7 @@ int kvmppc_core_init_vm(struct kvm *kvm)
INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
kvmppc_iommu_hugepages_init(&kvm->arch);
+ kvmppc_iommu_iommu_grp_init(&kvm->arch);
#endif
return kvm->arch.kvm_ops->init_vm(kvm);
@@ -872,6 +873,7 @@ void kvmppc_core_destroy_vm(struct kvm *kvm)
kvm->arch.kvm_ops->destroy_vm(kvm);
#ifdef CONFIG_PPC64
+ kvmppc_iommu_iommu_grp_cleanup(&kvm->arch);
kvmppc_rtas_tokens_free(kvm);
WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
kvmppc_iommu_hugepages_cleanup(&kvm->arch);
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index 91c038980bcca0..d8cd27e40fe55c 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -45,6 +45,79 @@
#define ERROR_ADDR ((void *)~(unsigned long)0x0)
+void kvmppc_iommu_iommu_grp_init(struct kvm_arch *ka)
+{
+ spin_lock_init(&ka->iommu_grp_write_lock);
+ hash_init(ka->iommu_grp_hash_tab);
+}
+EXPORT_SYMBOL_GPL(kvmppc_iommu_iommu_grp_init);
+
+static void free_kvm_group(struct kvmppc_spapr_iommu_grp *kgrp)
+{
+ hlist_del_rcu(&kgrp->hash_node);
+ iommu_group_put(kgrp->grp);
+ kfree(kgrp);
+}
+
+void kvmppc_iommu_iommu_grp_cleanup(struct kvm_arch *ka)
+{
+ int bkt;
+ struct kvmppc_spapr_iommu_grp *kgrp;
+ struct hlist_node *tmp;
+
+ spin_lock(&ka->iommu_grp_write_lock);
+ hash_for_each_safe(ka->iommu_grp_hash_tab, bkt, tmp, kgrp, hash_node) {
+ free_kvm_group(kgrp);
+ }
+ spin_unlock(&ka->iommu_grp_write_lock);
+}
+EXPORT_SYMBOL_GPL(kvmppc_iommu_iommu_grp_cleanup);
+
+static void kvmdev_release_group_callback(struct kvm *kvm, unsigned long liobn)
+{
+ struct kvm_arch *ka = &kvm->arch;
+ int bkt;
+ struct kvmppc_spapr_iommu_grp *kgrp;
+ struct hlist_node *tmp;
+
+ spin_lock(&ka->iommu_grp_write_lock);
+ hash_for_each_safe(ka->iommu_grp_hash_tab, bkt, tmp, kgrp, hash_node) {
+ if (kgrp->liobn == liobn) {
+ free_kvm_group(kgrp);
+ break;
+ }
+ }
+ spin_unlock(&ka->iommu_grp_write_lock);
+}
+
+struct iommu_group *find_group_by_liobn(struct kvm *kvm, unsigned long liobn)
+{
+ struct iommu_group *grp;
+ struct kvmppc_spapr_iommu_grp *kgrp;
+ const unsigned key = KVMPPC_SPAPR_IOMMU_GRP_HASH(liobn);
+
+ hash_for_each_possible_rcu_notrace(kvm->arch.iommu_grp_hash_tab, kgrp,
+ hash_node, key) {
+ if (kgrp->liobn == liobn)
+ return kgrp->grp;
+ }
+
+ grp = kvm_vfio_find_group_by_liobn(kvm, liobn,
+ kvmdev_release_group_callback);
+ if (IS_ERR(grp))
+ return NULL;
+
+ kgrp = kzalloc(sizeof(*kgrp), GFP_KERNEL);
+ if (!kgrp)
+ return NULL;
+
+ kgrp->liobn = liobn;
+ kgrp->grp = grp;
+ hash_add_rcu(kvm->arch.iommu_grp_hash_tab, &kgrp->hash_node, key);
+
+ return grp;
+}
+
/*
* API to support huge pages in real mode
*/
@@ -212,7 +285,9 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
int i;
/* Check this LIOBN hasn't been previously allocated */
- if (kvmppc_find_tce_table(kvm, args->liobn))
+ struct iommu_group *grp = NULL;
+ grp = find_group_by_liobn(kvm, args->liobn);
+ if (grp)
return -EBUSY;
npages = kvmppc_stt_npages(args->window_size);
@@ -294,14 +369,14 @@ static void __user *kvmppc_gpa_to_hva_and_get(struct kvm_vcpu *vcpu,
}
long kvmppc_h_put_tce_iommu(struct kvm_vcpu *vcpu,
- struct kvmppc_spapr_tce_table *tt,
+ struct iommu_group *grp,
unsigned long liobn, unsigned long ioba,
unsigned long tce)
{
struct page *pg = NULL;
unsigned long hpa;
void __user *hva;
- struct iommu_table *tbl = iommu_group_get_iommudata(tt->grp);
+ struct iommu_table *tbl = iommu_group_get_iommudata(grp);
if (!tbl)
return H_RESCINDED;
@@ -344,11 +419,11 @@ long kvmppc_h_put_tce_iommu(struct kvm_vcpu *vcpu,
}
static long kvmppc_h_put_tce_indirect_iommu(struct kvm_vcpu *vcpu,
- struct kvmppc_spapr_tce_table *tt, unsigned long ioba,
+ struct iommu_group *grp, unsigned long ioba,
unsigned long __user *tces, unsigned long npages)
{
long i;
- struct iommu_table *tbl = iommu_group_get_iommudata(tt->grp);
+ struct iommu_table *tbl = iommu_group_get_iommudata(grp);
if (!tbl)
return H_RESCINDED;
@@ -392,11 +467,11 @@ putpages_flush_exit:
}
long kvmppc_h_stuff_tce_iommu(struct kvm_vcpu *vcpu,
- struct kvmppc_spapr_tce_table *tt,
+ struct iommu_group *grp,
unsigned long liobn, unsigned long ioba,
unsigned long tce_value, unsigned long npages)
{
- struct iommu_table *tbl = iommu_group_get_iommudata(tt->grp);
+ struct iommu_table *tbl = iommu_group_get_iommudata(grp);
unsigned long entry = ioba >> IOMMU_PAGE_SHIFT;
if (!tbl)
@@ -417,13 +492,17 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu,
{
long ret;
struct kvmppc_spapr_tce_table *tt;
+ struct iommu_group *grp = NULL;
tt = kvmppc_find_tce_table(vcpu->kvm, liobn);
- if (!tt)
- return H_TOO_HARD;
+ if (!tt) {
+ grp = find_group_by_liobn(vcpu->kvm, liobn);
+ if (!grp)
+ return H_TOO_HARD;
+ }
- if (tt->type == KVMPPC_TCET_IOMMU)
- return kvmppc_h_put_tce_iommu(vcpu, tt, liobn, ioba, tce);
+ if (grp)
+ return kvmppc_h_put_tce_iommu(vcpu, grp, liobn, ioba, tce);
/* Emulated IO */
if (ioba >= tt->window_size)
@@ -447,10 +526,14 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
long i, ret = H_SUCCESS;
unsigned long __user *tces;
struct page *pg = NULL;
+ struct iommu_group *grp = NULL;
tt = kvmppc_find_tce_table(vcpu->kvm, liobn);
- if (!tt)
- return H_TOO_HARD;
+ if (!tt) {
+ grp = find_group_by_liobn(vcpu->kvm, liobn);
+ if (!grp)
+ return H_TOO_HARD;
+ }
/*
* The spec says that the maximum size of the list is 512 TCEs
@@ -462,7 +545,7 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
if (tce_list & ~IOMMU_PAGE_MASK)
return H_PARAMETER;
- if ((ioba + (npages << IOMMU_PAGE_SHIFT)) > tt->window_size)
+ if (tt && ((ioba + (npages << IOMMU_PAGE_SHIFT)) > tt->window_size))
return H_PARAMETER;
if (vcpu->arch.tce_rm_fail != TCERM_NONE)
@@ -475,9 +558,9 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
if (tces == ERROR_ADDR)
return H_TOO_HARD;
- if (tt->type == KVMPPC_TCET_IOMMU) {
+ if (grp) {
ret = kvmppc_h_put_tce_indirect_iommu(vcpu,
- tt, ioba, tces, npages);
+ grp, ioba, tces, npages);
goto put_list_page_exit;
}
@@ -510,13 +593,17 @@ long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
{
struct kvmppc_spapr_tce_table *tt;
long i, ret;
+ struct iommu_group *grp = NULL;
tt = kvmppc_find_tce_table(vcpu->kvm, liobn);
- if (!tt)
- return H_TOO_HARD;
+ if (!tt) {
+ grp = find_group_by_liobn(vcpu->kvm, liobn);
+ if (!grp)
+ return H_TOO_HARD;
+ }
- if (tt->type == KVMPPC_TCET_IOMMU)
- return kvmppc_h_stuff_tce_iommu(vcpu, tt, liobn, ioba,
+ if (grp)
+ return kvmppc_h_stuff_tce_iommu(vcpu, grp, liobn, ioba,
tce_value, npages);
/* Emulated IO */
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
index a17c91a64bc689..a59aa5f88f5032 100644
--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
+++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
@@ -48,33 +48,25 @@
* WARNING: This will be called in real or virtual mode on HV KVM and virtual
* mode on PR KVM
*/
-struct kvmppc_spapr_tce_table *kvmppc_find_iommu_tce_table(
- struct kvm *kvm, unsigned long liobn)
+static struct iommu_group *find_group_by_liobn_rm(struct kvm *kvm, unsigned long liobn)
{
- struct kvmppc_spapr_tce_table *tt;
- struct kvmppc_spapr_tce_iommu_device *tcedev = kvm->arch.tcedev;
-
- if (!tcedev)
- return NULL;
+ struct kvmppc_spapr_iommu_grp *kvmgrp;
+ const unsigned key = KVMPPC_SPAPR_IOMMU_GRP_HASH(liobn);
- list_for_each_entry(tt, &tcedev->tables, list) {
- if (tt->liobn == liobn)
- return tt;
+ hash_for_each_possible_rcu_notrace(kvm->arch.iommu_grp_hash_tab, kvmgrp,
+ hash_node, key) {
+ if (kvmgrp->liobn == liobn)
+ return kvmgrp->grp;
}
return NULL;
}
-EXPORT_SYMBOL_GPL(kvmppc_find_iommu_tce_table);
struct kvmppc_spapr_tce_table *kvmppc_find_tce_table(struct kvm *kvm,
unsigned long liobn)
{
struct kvmppc_spapr_tce_table *tt;
- tt = kvmppc_find_iommu_tce_table(kvm, liobn);
- if (tt)
- return tt;
-
list_for_each_entry(tt, &kvm->arch.spapr_tce_tables, list) {
if (tt->liobn == liobn)
return tt;
@@ -241,11 +233,11 @@ static unsigned long kvmppc_rm_gpa_to_hpa_and_get(struct kvm_vcpu *vcpu,
}
static long kvmppc_rm_h_put_tce_iommu(struct kvm_vcpu *vcpu,
- struct kvmppc_spapr_tce_table *tt, unsigned long liobn,
+ struct iommu_group *grp, unsigned long liobn,
unsigned long ioba, unsigned long tce)
{
int ret = 0;
- struct iommu_table *tbl = iommu_group_get_iommudata(tt->grp);
+ struct iommu_table *tbl = iommu_group_get_iommudata(grp);
unsigned long hpa;
struct page *pg = NULL;
@@ -288,12 +280,12 @@ static long kvmppc_rm_h_put_tce_iommu(struct kvm_vcpu *vcpu,
}
static long kvmppc_rm_h_put_tce_indirect_iommu(struct kvm_vcpu *vcpu,
- struct kvmppc_spapr_tce_table *tt, unsigned long ioba,
+ struct iommu_group *grp, unsigned long ioba,
unsigned long *tces, unsigned long npages)
{
int i, ret;
unsigned long hpa;
- struct iommu_table *tbl = iommu_group_get_iommudata(tt->grp);
+ struct iommu_table *tbl = iommu_group_get_iommudata(grp);
struct page *pg = NULL;
if (!tbl)
@@ -332,11 +324,11 @@ static long kvmppc_rm_h_put_tce_indirect_iommu(struct kvm_vcpu *vcpu,
}
static long kvmppc_rm_h_stuff_tce_iommu(struct kvm_vcpu *vcpu,
- struct kvmppc_spapr_tce_table *tt,
+ struct iommu_group *grp,
unsigned long liobn, unsigned long ioba,
unsigned long tce_value, unsigned long npages)
{
- struct iommu_table *tbl = iommu_group_get_iommudata(tt->grp);
+ struct iommu_table *tbl = iommu_group_get_iommudata(grp);
if (!tbl)
return H_RESCINDED;
@@ -355,16 +347,20 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
{
long ret;
struct kvmppc_spapr_tce_table *tt;
+ struct iommu_group *grp = NULL;
tt = kvmppc_find_tce_table(vcpu->kvm, liobn);
- if (!tt)
- return H_TOO_HARD;
+ if (!tt) {
+ grp = find_group_by_liobn_rm(vcpu->kvm, liobn);
+ if (!grp)
+ return H_TOO_HARD;
+ }
vcpu->arch.tce_rm_fail = TCERM_NONE;
vcpu->arch.tce_tmp_num = 0;
- if (tt->type == KVMPPC_TCET_IOMMU)
- return kvmppc_rm_h_put_tce_iommu(vcpu, tt, liobn, ioba, tce);
+ if (grp)
+ return kvmppc_rm_h_put_tce_iommu(vcpu, grp, liobn, ioba, tce);
/* Emulated IO */
if (ioba >= tt->window_size)
@@ -385,10 +381,14 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
long i, ret = H_SUCCESS;
unsigned long tces;
struct page *pg = NULL;
+ struct iommu_group *grp = NULL;
tt = kvmppc_find_tce_table(vcpu->kvm, liobn);
- if (!tt)
- return H_TOO_HARD;
+ if (!tt) {
+ grp = find_group_by_liobn_rm(vcpu->kvm, liobn);
+ if (!grp)
+ return H_TOO_HARD;
+ }
/*
* The spec says that the maximum size of the list is 512 TCEs
@@ -400,7 +400,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
if (tce_list & ~IOMMU_PAGE_MASK)
return H_PARAMETER;
- if ((ioba + (npages << IOMMU_PAGE_SHIFT)) > tt->window_size)
+ if (tt && ((ioba + (npages << IOMMU_PAGE_SHIFT)) > tt->window_size))
return H_PARAMETER;
vcpu->arch.tce_rm_fail = TCERM_NONE;
@@ -412,9 +412,9 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
return H_TOO_HARD;
}
- if (tt->type == KVMPPC_TCET_IOMMU) {
+ if (grp) {
ret = kvmppc_rm_h_put_tce_indirect_iommu(vcpu,
- tt, ioba, (unsigned long *)tces, npages);
+ grp, ioba, (unsigned long *)tces, npages);
if (ret == H_TOO_HARD)
return ret;
@@ -447,13 +447,17 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
{
struct kvmppc_spapr_tce_table *tt;
long i, ret;
+ struct iommu_group *grp = NULL;
tt = kvmppc_find_tce_table(vcpu->kvm, liobn);
- if (!tt)
- return H_TOO_HARD;
+ if (!tt) {
+ grp = find_group_by_liobn_rm(vcpu->kvm, liobn);
+ if (!grp)
+ return H_TOO_HARD;
+ }
- if (tt->type == KVMPPC_TCET_IOMMU)
- return kvmppc_rm_h_stuff_tce_iommu(vcpu, tt, liobn, ioba,
+ if (grp)
+ return kvmppc_rm_h_stuff_tce_iommu(vcpu, grp, liobn, ioba,
tce_value, npages);
/* Emulated IO */
@@ -469,4 +473,4 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
return H_SUCCESS;
}
-#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
+#endif /* KVM_BOOK3S_HV_POSSIBLE */