aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoffer Dall <christoffer.dall@arm.com>2019-01-25 17:10:47 +0100
committerChristoffer Dall <christoffer.dall@arm.com>2019-01-25 17:10:47 +0100
commit437cb6b5c3f6c30acfc6a85467715c71e0d906f5 (patch)
tree9b8ae530ed62538e03cf3223cf550a85709f34b0
parent313ca24a44d2bcc84cc8969a972bc4bcd759c616 (diff)
downloadkvmarm-kvm-arm64/edf-wip.tar.gz
KVM: arm64: nv: Rework nested GICv3 supportkvm-arm64/edf-wip
This gets rid of the nested and shadow gicv3 cpuif structures and performs the necessary conversion and save/restore for nested virtualization when strictly necessary. Even though we copy some code from vgic-v3-sr.c this is still much simpler to maintain and understand and we don't impact the normal GICv3 path with the nested virt logic. Signed-off-by: Christoffer Dall <christoffer.dall@arm.com>
-rw-r--r--arch/arm64/include/asm/kvm_host.h14
-rw-r--r--arch/arm64/include/asm/kvm_hyp.h1
-rw-r--r--arch/arm64/kvm/sys_regs.c27
-rw-r--r--include/kvm/arm_vgic.h9
-rw-r--r--include/linux/irqchip/arm-gic-v3.h4
-rw-r--r--virt/kvm/arm/hyp/vgic-v3-sr.c6
-rw-r--r--virt/kvm/arm/vgic/vgic-nested-trace.h115
-rw-r--r--virt/kvm/arm/vgic/vgic-v3-nested.c231
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c28
9 files changed, 216 insertions, 219 deletions
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 22bb993e88acab..2ac69a9a86f283 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -251,6 +251,20 @@ enum vcpu_sysreg {
CNTHP_CVAL_EL2,
CNTVOFF_EL2, /* ARMv8.4-NV=0x60 */
+ /* GIC Registers */
+ ICH_HCR_EL2, /* ARMv8.4-NV=0x4c0 */
+ ICH_VMCR_EL2, /* ARMv8.4-NV=0x4c8 */
+ ICH_AP0R0_EL2, /* ARMv8.4-NV=0x480 */
+ ICH_AP0R1_EL2, /* ARMv8.4-NV=0x488 */
+ ICH_AP0R2_EL2, /* ARMv8.4-NV=0x490 */
+ ICH_AP0R3_EL2, /* ARMv8.4-NV=0x498 */
+ ICH_AP1R0_EL2, /* ARMv8.4-NV=0x4a0 */
+ ICH_AP1R1_EL2, /* ARMv8.4-NV=0x4a8 */
+ ICH_AP1R2_EL2, /* ARMv8.4-NV=0x4b0 */
+ ICH_AP1R3_EL2, /* ARMv8.4-NV=0x4b8 */
+ ICH_LR0_EL2, /* ARMv8.4-NV=0x400 */
+ ICH_LR15_EL2 = ICH_LR0_EL2 + 15, /* ARMv8.4-NV=0x478 */
+
NR_SYS_REGS /* Nothing after this line! */
};
diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
index bc9057adc5aaf3..e457f8f99b467f 100644
--- a/arch/arm64/include/asm/kvm_hyp.h
+++ b/arch/arm64/include/asm/kvm_hyp.h
@@ -100,6 +100,7 @@ typeof(orig) * __hyp_text fname(void) \
int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu);
u64 __hyp_text __gic_v3_get_lr(unsigned int lr);
+void __hyp_text __gic_v3_set_lr(u64 val, int lr);
void __vgic_v3_save_state(struct vgic_v3_cpu_if *cpu_if);
void __vgic_v3_restore_state(struct vgic_v3_cpu_if *cpu_if);
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 113f2656f2f88f..0e0aa66e1fa50f 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -1709,14 +1709,14 @@ static bool access_gic_apr(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
- struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.nested_vgic_v3;
- u32 index, *base;
+ u32 index;
+ u64 *base;
index = r->Op2;
if (r->CRm == 8)
- base = cpu_if->vgic_ap0r;
+ base = &__vcpu_sys_reg(vcpu, ICH_AP0R0_EL2);
else
- base = cpu_if->vgic_ap1r;
+ base = &__vcpu_sys_reg(vcpu, ICH_AP1R0_EL2);
if (p->is_write)
base[index] = p->regval;
@@ -1730,12 +1730,10 @@ static bool access_gic_hcr(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
- struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.nested_vgic_v3;
-
if (p->is_write)
- cpu_if->vgic_hcr = p->regval;
+ __vcpu_sys_reg(vcpu, ICH_HCR_EL2) = p->regval;
else
- p->regval = cpu_if->vgic_hcr;
+ p->regval = __vcpu_sys_reg(vcpu, ICH_HCR_EL2);
return true;
}
@@ -1792,12 +1790,10 @@ static bool access_gic_vmcr(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
- struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.nested_vgic_v3;
-
if (p->is_write)
- cpu_if->vgic_vmcr = p->regval;
+ __vcpu_sys_reg(vcpu, ICH_VMCR_EL2) = p->regval;
else
- p->regval = cpu_if->vgic_vmcr;
+ p->regval = __vcpu_sys_reg(vcpu, ICH_VMCR_EL2);
return true;
}
@@ -1806,17 +1802,18 @@ static bool access_gic_lr(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
- struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.nested_vgic_v3;
u32 index;
+ u64 *lrs;
index = p->Op2;
if (p->CRm == 13)
index += 8;
+ lrs = &__vcpu_sys_reg(vcpu, ICH_LR0_EL2);
if (p->is_write)
- cpu_if->vgic_lr[index] = p->regval;
+ lrs[index] = p->regval;
else
- p->regval = cpu_if->vgic_lr[index];
+ p->regval = lrs[index];
return true;
}
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index ff031f53e7a19b..ddb2842322ccb9 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -313,15 +313,6 @@ struct vgic_cpu {
struct vgic_irq private_irqs[VGIC_NR_PRIVATE_IRQS];
- /* CPU vif control registers for the virtual GICH interface */
- struct vgic_v3_cpu_if nested_vgic_v3;
-
- /*
- * The shadow vif control register loaded to the hardware when
- * running a nested L2 guest with the virtual IMO/FMO bit set.
- */
- struct vgic_v3_cpu_if shadow_vgic_v3;
-
spinlock_t ap_list_lock; /* Protects the ap_list */
/*
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 071b4cbdf01047..302901da11b4ec 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -575,6 +575,10 @@
#define ICC_SGI1R_AFFINITY_3_SHIFT 48
#define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_3_SHIFT)
+#define vtr_to_max_lr_idx(v) ((v) & 0xf)
+#define vtr_to_nr_pre_bits(v) ((((u32)(v) >> 26) & 7) + 1)
+#define vtr_to_nr_apr_regs(v) (1 << (vtr_to_nr_pre_bits(v) - 5))
+
#include <asm/arch_gicv3.h>
#ifndef __ASSEMBLY__
diff --git a/virt/kvm/arm/hyp/vgic-v3-sr.c b/virt/kvm/arm/hyp/vgic-v3-sr.c
index 5dbce07245c3f1..ed9964c1ca713d 100644
--- a/virt/kvm/arm/hyp/vgic-v3-sr.c
+++ b/virt/kvm/arm/hyp/vgic-v3-sr.c
@@ -23,10 +23,6 @@
#include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h>
-#define vtr_to_max_lr_idx(v) ((v) & 0xf)
-#define vtr_to_nr_pre_bits(v) ((((u32)(v) >> 26) & 7) + 1)
-#define vtr_to_nr_apr_regs(v) (1 << (vtr_to_nr_pre_bits(v) - 5))
-
u64 __hyp_text __gic_v3_get_lr(unsigned int lr)
{
switch (lr & 0xf) {
@@ -67,7 +63,7 @@ u64 __hyp_text __gic_v3_get_lr(unsigned int lr)
unreachable();
}
-static void __hyp_text __gic_v3_set_lr(u64 val, int lr)
+void __hyp_text __gic_v3_set_lr(u64 val, int lr)
{
switch (lr & 0xf) {
case 0:
diff --git a/virt/kvm/arm/vgic/vgic-nested-trace.h b/virt/kvm/arm/vgic/vgic-nested-trace.h
index 69f4ec031e7c53..9eab4a5e75dd6b 100644
--- a/virt/kvm/arm/vgic/vgic-nested-trace.h
+++ b/virt/kvm/arm/vgic/vgic-nested-trace.h
@@ -7,103 +7,56 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM kvm
-#define SLR_ENTRY_VALS(x) \
- " ", \
- !!(__entry->lrs[x] & ICH_LR_HW), \
- !!(__entry->lrs[x] & ICH_LR_PENDING_BIT), \
- !!(__entry->lrs[x] & ICH_LR_ACTIVE_BIT), \
- __entry->lrs[x] & ICH_LR_VIRTUAL_ID_MASK, \
- (__entry->lrs[x] & ICH_LR_PHYS_ID_MASK) >> ICH_LR_PHYS_ID_SHIFT, \
- (__entry->orig_lrs[x] & ICH_LR_PHYS_ID_MASK) >> ICH_LR_PHYS_ID_SHIFT
-
-TRACE_EVENT(vgic_create_shadow_lrs,
- TP_PROTO(struct kvm_vcpu *vcpu, int nr_lr, u64 *lrs, u64 *orig_lrs),
- TP_ARGS(vcpu, nr_lr, lrs, orig_lrs),
+TRACE_EVENT(vgic_restore_shadow_lr,
+ TP_PROTO(struct kvm_vcpu *vcpu, int lr_idx, u64 lr, u64 orig_lr),
+ TP_ARGS(vcpu, lr_idx, lr, orig_lr),
TP_STRUCT__entry(
- __field( int, nr_lr )
- __array( u64, lrs, 16 )
- __array( u64, orig_lrs, 16 )
+ __field( int, lr_idx )
+ __field( u64, lr )
+ __field( u64, orig_lr )
),
TP_fast_assign(
- __entry->nr_lr = nr_lr;
- memcpy(__entry->lrs, lrs, 16 * sizeof(u64));
- memcpy(__entry->orig_lrs, orig_lrs, 16 * sizeof(u64));
+ __entry->lr_idx = lr_idx;
+ __entry->lr = lr;
+ __entry->orig_lr = orig_lr;
),
- TP_printk("nr_lr: %d\n"
- "%50sLR[ 0]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: %5llu (%5llu)\n"
- "%50sLR[ 1]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: %5llu (%5llu)\n"
- "%50sLR[ 2]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: %5llu (%5llu)\n"
- "%50sLR[ 3]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: %5llu (%5llu)\n"
- "%50sLR[ 4]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: %5llu (%5llu)\n"
- "%50sLR[ 5]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: %5llu (%5llu)\n"
- "%50sLR[ 6]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: %5llu (%5llu)\n"
- "%50sLR[ 7]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: %5llu (%5llu)\n"
- "%50sLR[ 8]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: %5llu (%5llu)\n"
- "%50sLR[ 9]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: %5llu (%5llu)\n"
- "%50sLR[10]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: %5llu (%5llu)\n"
- "%50sLR[11]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: %5llu (%5llu)\n"
- "%50sLR[12]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: %5llu (%5llu)\n"
- "%50sLR[13]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: %5llu (%5llu)\n"
- "%50sLR[14]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: %5llu (%5llu)\n"
- "%50sLR[15]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: %5llu (%5llu)",
- __entry->nr_lr,
- SLR_ENTRY_VALS(0), SLR_ENTRY_VALS(1), SLR_ENTRY_VALS(2),
- SLR_ENTRY_VALS(3), SLR_ENTRY_VALS(4), SLR_ENTRY_VALS(5),
- SLR_ENTRY_VALS(6), SLR_ENTRY_VALS(7), SLR_ENTRY_VALS(8),
- SLR_ENTRY_VALS(9), SLR_ENTRY_VALS(10), SLR_ENTRY_VALS(11),
- SLR_ENTRY_VALS(12), SLR_ENTRY_VALS(13), SLR_ENTRY_VALS(14),
- SLR_ENTRY_VALS(15))
+ TP_printk("LR[%2d]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: %5llu (%5llu)\n",
+ __entry->lr_idx,
+ !!(__entry->lr & ICH_LR_HW),
+ !!(__entry->lr & ICH_LR_PENDING_BIT),
+ !!(__entry->lr & ICH_LR_ACTIVE_BIT),
+ __entry->lr & ICH_LR_VIRTUAL_ID_MASK,
+ (__entry->lr & ICH_LR_PHYS_ID_MASK) >> ICH_LR_PHYS_ID_SHIFT,
+ (__entry->orig_lr & ICH_LR_PHYS_ID_MASK) >> ICH_LR_PHYS_ID_SHIFT)
);
-#define LR_ENTRY_VALS(x) \
- " ", \
- !!(__entry->lrs[x] & ICH_LR_HW), \
- !!(__entry->lrs[x] & ICH_LR_PENDING_BIT), \
- !!(__entry->lrs[x] & ICH_LR_ACTIVE_BIT), \
- __entry->lrs[x] & ICH_LR_VIRTUAL_ID_MASK, \
- (__entry->lrs[x] & ICH_LR_PHYS_ID_MASK) >> ICH_LR_PHYS_ID_SHIFT
-
-TRACE_EVENT(vgic_put_nested,
- TP_PROTO(struct kvm_vcpu *vcpu, int nr_lr, u64 *lrs),
- TP_ARGS(vcpu, nr_lr, lrs),
+TRACE_EVENT(vgic_save_shadow_lr,
+ TP_PROTO(struct kvm_vcpu *vcpu, int lr_idx, u64 lr, u64 orig_lr),
+ TP_ARGS(vcpu, lr_idx, lr, orig_lr),
TP_STRUCT__entry(
- __field( int, nr_lr )
- __array( u64, lrs, 16 )
+ __field( int, lr_idx )
+ __field( u64, lr )
+ __field( u64, orig_lr )
),
TP_fast_assign(
- __entry->nr_lr = nr_lr;
- memcpy(__entry->lrs, lrs, 16 * sizeof(u64));
+ __entry->lr_idx = lr_idx;
+ __entry->lr = lr;
+ __entry->orig_lr = orig_lr;
),
- TP_printk("nr_lr: %d\n"
- "%50sLR[ 0]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: %5llu\n"
- "%50sLR[ 1]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: %5llu\n"
- "%50sLR[ 2]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: %5llu\n"
- "%50sLR[ 3]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: %5llu\n"
- "%50sLR[ 4]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: %5llu\n"
- "%50sLR[ 5]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: %5llu\n"
- "%50sLR[ 6]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: %5llu\n"
- "%50sLR[ 7]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: %5llu\n"
- "%50sLR[ 8]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: %5llu\n"
- "%50sLR[ 9]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: %5llu\n"
- "%50sLR[10]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: %5llu\n"
- "%50sLR[11]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: %5llu\n"
- "%50sLR[12]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: %5llu\n"
- "%50sLR[13]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: %5llu\n"
- "%50sLR[14]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: %5llu\n"
- "%50sLR[15]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: %5llu",
- __entry->nr_lr,
- LR_ENTRY_VALS(0), LR_ENTRY_VALS(1), LR_ENTRY_VALS(2),
- LR_ENTRY_VALS(3), LR_ENTRY_VALS(4), LR_ENTRY_VALS(5),
- LR_ENTRY_VALS(6), LR_ENTRY_VALS(7), LR_ENTRY_VALS(8),
- LR_ENTRY_VALS(9), LR_ENTRY_VALS(10), LR_ENTRY_VALS(11),
- LR_ENTRY_VALS(12), LR_ENTRY_VALS(13), LR_ENTRY_VALS(14),
- LR_ENTRY_VALS(15))
+ TP_printk("LR[%2d]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: %5llu (%5llu)\n",
+ __entry->lr_idx,
+ !!(__entry->lr & ICH_LR_HW),
+ !!(__entry->lr & ICH_LR_PENDING_BIT),
+ !!(__entry->lr & ICH_LR_ACTIVE_BIT),
+ __entry->lr & ICH_LR_VIRTUAL_ID_MASK,
+ (__entry->lr & ICH_LR_PHYS_ID_MASK) >> ICH_LR_PHYS_ID_SHIFT,
+ (__entry->orig_lr & ICH_LR_PHYS_ID_MASK) >> ICH_LR_PHYS_ID_SHIFT)
);
TRACE_EVENT(vgic_nested_hw_emulate,
diff --git a/virt/kvm/arm/vgic/vgic-v3-nested.c b/virt/kvm/arm/vgic/vgic-v3-nested.c
index f3c4811ccc9cd2..98e9861c58f537 100644
--- a/virt/kvm/arm/vgic/vgic-v3-nested.c
+++ b/virt/kvm/arm/vgic/vgic-v3-nested.c
@@ -16,16 +16,6 @@
#define CREATE_TRACE_POINTS
#include "vgic-nested-trace.h"
-static inline struct vgic_v3_cpu_if *vcpu_nested_if(struct kvm_vcpu *vcpu)
-{
- return &vcpu->arch.vgic_cpu.nested_vgic_v3;
-}
-
-static inline struct vgic_v3_cpu_if *vcpu_shadow_if(struct kvm_vcpu *vcpu)
-{
- return &vcpu->arch.vgic_cpu.shadow_vgic_v3;
-}
-
static inline bool lr_triggers_eoi(u64 lr)
{
return !(lr & (ICH_LR_STATE | ICH_LR_HW)) && (lr & ICH_LR_EOI);
@@ -33,12 +23,13 @@ static inline bool lr_triggers_eoi(u64 lr)
u16 vgic_v3_get_eisr(struct kvm_vcpu *vcpu)
{
- struct vgic_v3_cpu_if *cpu_if = vcpu_nested_if(vcpu);
u16 reg = 0;
int i;
for (i = 0; i < kvm_vgic_global_state.nr_lr; i++) {
- if (lr_triggers_eoi(cpu_if->vgic_lr[i]))
+ u64 lr = __vcpu_sys_reg(vcpu, ICH_LR0_EL2 + i);
+
+ if (lr_triggers_eoi(lr))
reg |= BIT(i);
}
@@ -47,12 +38,13 @@ u16 vgic_v3_get_eisr(struct kvm_vcpu *vcpu)
u16 vgic_v3_get_elrsr(struct kvm_vcpu *vcpu)
{
- struct vgic_v3_cpu_if *cpu_if = vcpu_nested_if(vcpu);
u16 reg = 0;
int i;
for (i = 0; i < kvm_vgic_global_state.nr_lr; i++) {
- if (!(cpu_if->vgic_lr[i] & ICH_LR_STATE))
+ u64 lr = __vcpu_sys_reg(vcpu, ICH_LR0_EL2 + i);
+
+ if (!(lr & ICH_LR_STATE))
reg |= BIT(i);
}
@@ -61,14 +53,13 @@ u16 vgic_v3_get_elrsr(struct kvm_vcpu *vcpu)
u64 vgic_v3_get_misr(struct kvm_vcpu *vcpu)
{
- struct vgic_v3_cpu_if *cpu_if = vcpu_nested_if(vcpu);
int nr_lr = kvm_vgic_global_state.nr_lr;
u64 reg = 0;
if (vgic_v3_get_eisr(vcpu))
reg |= ICH_MISR_EOI;
- if (cpu_if->vgic_hcr & ICH_HCR_UIE) {
+ if (__vcpu_sys_reg(vcpu, ICH_HCR_EL2) & ICH_HCR_UIE) {
int used_lrs;
used_lrs = nr_lr - hweight16(vgic_v3_get_elrsr(vcpu));
@@ -80,24 +71,57 @@ u64 vgic_v3_get_misr(struct kvm_vcpu *vcpu)
return reg;
}
+void vgic_v3_sync_nested(struct kvm_vcpu *vcpu)
+{
+ struct vgic_irq *irq;
+ int i;
+ int used_lrs = vcpu->arch.vgic_cpu.vgic_v3.used_lrs;
+
+ for (i = 0; i < used_lrs; i++) {
+ u64 lr = __vcpu_sys_reg(vcpu, ICH_LR0_EL2 + i);
+ u64 shadow_lr;
+ int l1_irq;
+
+ if (!(lr & ICH_LR_HW) || !(lr & ICH_LR_STATE))
+ continue;
+
+ /*
+ * If we had a HW lr programmed by the guest hypervisor, we
+ * need to emulate the HW effect between the guest hypervisor
+ * and the nested guest.
+ */
+ l1_irq = (lr & ICH_LR_PHYS_ID_MASK) >> ICH_LR_PHYS_ID_SHIFT;
+ irq = vgic_get_irq(vcpu->kvm, vcpu, l1_irq);
+ if (!irq)
+ continue; /* oh well, the guest hyp is broken */
+
+ shadow_lr = __gic_v3_get_lr(i);
+ if (!(shadow_lr & ICH_LR_STATE)) {
+ trace_vgic_nested_hw_emulate(i, shadow_lr, l1_irq);
+ irq->active = false;
+ }
+
+ vgic_put_irq(vcpu->kvm, irq);
+ }
+}
+
/*
* For LRs which have HW bit set such as timer interrupts, we modify them to
* have the host hardware interrupt number instead of the virtual one programmed
* by the guest hypervisor.
*/
-static void vgic_v3_create_shadow_lr(struct kvm_vcpu *vcpu)
+static void vgic_v3_restore_shadow_lrs(struct kvm_vcpu *vcpu)
{
- struct vgic_v3_cpu_if *cpu_if = vcpu_nested_if(vcpu);
- struct vgic_v3_cpu_if *s_cpu_if = vcpu_shadow_if(vcpu);
struct vgic_irq *irq;
- int i;
+ int i, used_lrs = 0;
for (i = 0; i < kvm_vgic_global_state.nr_lr; i++) {
- u64 lr = cpu_if->vgic_lr[i];
+ u64 lr = __vcpu_sys_reg(vcpu, ICH_LR0_EL2 + i);
+ u64 orig_lr = lr;
int l1_irq;
if (!(lr & ICH_LR_HW))
- goto next;
+ goto write;
/* We have the HW bit set */
l1_irq = (lr & ICH_LR_PHYS_ID_MASK) >> ICH_LR_PHYS_ID_SHIFT;
@@ -108,7 +132,7 @@ static void vgic_v3_create_shadow_lr(struct kvm_vcpu *vcpu)
lr &= ~ICH_LR_HW;
if (irq)
vgic_put_irq(vcpu->kvm, irq);
- goto next;
+ goto write;
}
/* Translate the virtual mapping to the real one */
@@ -117,74 +141,58 @@ static void vgic_v3_create_shadow_lr(struct kvm_vcpu *vcpu)
lr |= (u64)irq->hwintid << ICH_LR_PHYS_ID_SHIFT;
vgic_put_irq(vcpu->kvm, irq);
-next:
- s_cpu_if->vgic_lr[i] = lr;
+write:
+ if (lr & ICH_LR_STATE) {
+ used_lrs = i + 1;
+ __gic_v3_set_lr(lr, i);
+ trace_vgic_restore_shadow_lr(vcpu, i, lr, orig_lr);
+ }
}
- trace_vgic_create_shadow_lrs(vcpu, kvm_vgic_global_state.nr_lr,
- s_cpu_if->vgic_lr, cpu_if->vgic_lr);
- s_cpu_if->used_lrs = kvm_vgic_global_state.nr_lr;
+ /* We reuse the existing used_lrs field here. Horrible, but hey. */
+ vcpu->arch.vgic_cpu.vgic_v3.used_lrs = used_lrs;
}
-/*
- * Change the shadow HWIRQ field back to the virtual value before copying over
- * the entire shadow struct to the nested state.
- */
-static void vgic_v3_fixup_shadow_lr_state(struct kvm_vcpu *vcpu)
+static void vgic_v3_restore_state_nested(struct kvm_vcpu *vcpu)
{
- struct vgic_v3_cpu_if *cpu_if = vcpu_nested_if(vcpu);
- struct vgic_v3_cpu_if *s_cpu_if = vcpu_shadow_if(vcpu);
- int lr;
-
- for (lr = 0; lr < kvm_vgic_global_state.nr_lr; lr++) {
- s_cpu_if->vgic_lr[lr] &= ~ICH_LR_PHYS_ID_MASK;
- s_cpu_if->vgic_lr[lr] |= cpu_if->vgic_lr[lr] & ICH_LR_PHYS_ID_MASK;
+ u64 val;
+ u32 nr_pre_bits;
+
+ write_gicreg(__vcpu_sys_reg(vcpu, ICH_VMCR_EL2), ICH_VMCR_EL2);
+ write_gicreg(__vcpu_sys_reg(vcpu, ICH_HCR_EL2), ICH_HCR_EL2);
+
+ val = read_gicreg(ICH_VTR_EL2);
+ nr_pre_bits = vtr_to_nr_pre_bits(val);
+
+ switch (nr_pre_bits) {
+ case 7:
+ write_gicreg(__vcpu_sys_reg(vcpu, ICH_AP0R3_EL2), ICH_AP0R3_EL2);
+ write_gicreg(__vcpu_sys_reg(vcpu, ICH_AP0R2_EL2), ICH_AP0R2_EL2);
+ case 6:
+ write_gicreg(__vcpu_sys_reg(vcpu, ICH_AP0R1_EL2), ICH_AP0R1_EL2);
+ default:
+ write_gicreg(__vcpu_sys_reg(vcpu, ICH_AP0R0_EL2), ICH_AP0R0_EL2);
}
-}
-
-void vgic_v3_sync_nested(struct kvm_vcpu *vcpu)
-{
- struct vgic_v3_cpu_if *cpu_if = vcpu_nested_if(vcpu);
- struct vgic_v3_cpu_if *s_cpu_if = vcpu_shadow_if(vcpu);
- struct vgic_irq *irq;
- int i;
-
- for (i = 0; i < s_cpu_if->used_lrs; i++) {
- u64 lr = cpu_if->vgic_lr[i];
- int l1_irq;
-
- if (!(lr & ICH_LR_HW) || !(lr & ICH_LR_STATE))
- continue;
-
- /*
- * If we had a HW lr programmed by the guest hypervisor, we
- * need to emulate the HW effect between the guest hypervisor
- * and the nested guest.
- */
- l1_irq = (lr & ICH_LR_PHYS_ID_MASK) >> ICH_LR_PHYS_ID_SHIFT;
- irq = vgic_get_irq(vcpu->kvm, vcpu, l1_irq);
- if (!irq)
- continue; /* oh well, the guest hyp is broken */
- lr = __gic_v3_get_lr(i);
- if (!(lr & ICH_LR_STATE)) {
- trace_vgic_nested_hw_emulate(i, lr, l1_irq);
- irq->active = false;
- }
-
- vgic_put_irq(vcpu->kvm, irq);
+ switch (nr_pre_bits) {
+ case 7:
+ write_gicreg(__vcpu_sys_reg(vcpu, ICH_AP1R3_EL2), ICH_AP1R3_EL2);
+ write_gicreg(__vcpu_sys_reg(vcpu, ICH_AP1R2_EL2), ICH_AP1R2_EL2);
+ case 6:
+ write_gicreg(__vcpu_sys_reg(vcpu, ICH_AP1R1_EL2), ICH_AP1R1_EL2);
+ default:
+ write_gicreg(__vcpu_sys_reg(vcpu, ICH_AP1R0_EL2), ICH_AP1R0_EL2);
}
+
+ vgic_v3_restore_shadow_lrs(vcpu);
}
void vgic_v3_load_nested(struct kvm_vcpu *vcpu)
{
- struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct vgic_irq *irq;
unsigned long flags;
- vgic_cpu->shadow_vgic_v3 = vgic_cpu->nested_vgic_v3;
- vgic_v3_create_shadow_lr(vcpu);
- __vgic_v3_restore_state(vcpu_shadow_if(vcpu));
+ vgic_v3_restore_state_nested(vcpu);
irq = vgic_get_irq(vcpu->kvm, vcpu, vcpu->kvm->arch.vgic.maint_irq);
spin_lock_irqsave(&irq->irq_lock, flags);
@@ -195,28 +203,73 @@ void vgic_v3_load_nested(struct kvm_vcpu *vcpu)
vgic_put_irq(vcpu->kvm, irq);
}
-void vgic_v3_put_nested(struct kvm_vcpu *vcpu)
+/*
+ * Update the LR state field from the shadow LR in hardware.
+ */
+static void vgic_v3_save_shadow_lrs(struct kvm_vcpu *vcpu)
{
- struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+ int i;
- __vgic_v3_save_state(vcpu_shadow_if(vcpu));
+ for (i = 0; i < vcpu->arch.vgic_cpu.vgic_v3.used_lrs; i++) {
+ u64 lr = __vcpu_sys_reg(vcpu, ICH_LR0_EL2 + i);
- trace_vgic_put_nested(vcpu, kvm_vgic_global_state.nr_lr,
- vcpu_shadow_if(vcpu)->vgic_lr);
+ if ((lr & ICH_LR_STATE)) {
+ u64 shadow_lr = __gic_v3_get_lr(i);
+
+ lr = (lr & ~ICH_LR_STATE) | (shadow_lr & ICH_LR_STATE);
+ __vcpu_sys_reg(vcpu, ICH_LR0_EL2 + i) = lr;
+
+ __gic_v3_set_lr(0, i);
+
+ trace_vgic_save_shadow_lr(vcpu, i, shadow_lr, lr);
+ }
+ }
+}
+
+static void vgic_v3_save_state_nested(struct kvm_vcpu *vcpu)
+{
+ u64 val;
+ u32 nr_pre_bits;
+
+ __vcpu_sys_reg(vcpu, ICH_VMCR_EL2) = read_gicreg(ICH_VMCR_EL2);
+ __vcpu_sys_reg(vcpu, ICH_HCR_EL2) = read_gicreg(ICH_HCR_EL2);
+
+ val = read_gicreg(ICH_VTR_EL2);
+ nr_pre_bits = vtr_to_nr_pre_bits(val);
+
+ switch (nr_pre_bits) {
+ case 7:
+ __vcpu_sys_reg(vcpu, ICH_AP0R3_EL2) = read_gicreg(ICH_AP0R3_EL2);
+ __vcpu_sys_reg(vcpu, ICH_AP0R2_EL2) = read_gicreg(ICH_AP0R2_EL2);
+ case 6:
+ __vcpu_sys_reg(vcpu, ICH_AP0R1_EL2) = read_gicreg(ICH_AP0R1_EL2);
+ default:
+ __vcpu_sys_reg(vcpu, ICH_AP0R0_EL2) = read_gicreg(ICH_AP0R0_EL2);
+ }
+
+ switch (nr_pre_bits) {
+ case 7:
+ __vcpu_sys_reg(vcpu, ICH_AP1R3_EL2) = read_gicreg(ICH_AP1R3_EL2);
+ __vcpu_sys_reg(vcpu, ICH_AP1R2_EL2) = read_gicreg(ICH_AP1R2_EL2);
+ case 6:
+ __vcpu_sys_reg(vcpu, ICH_AP1R1_EL2) = read_gicreg(ICH_AP1R1_EL2);
+ default:
+ __vcpu_sys_reg(vcpu, ICH_AP1R0_EL2) = read_gicreg(ICH_AP1R0_EL2);
+ }
+
+ vgic_v3_save_shadow_lrs(vcpu);
+}
+
+void vgic_v3_put_nested(struct kvm_vcpu *vcpu)
+{
+ vgic_v3_save_state_nested(vcpu);
- /*
- * Translate the shadow state HW fields back to the virtual ones
- * before copying the shadow struct back to the nested one.
- */
- vgic_v3_fixup_shadow_lr_state(vcpu);
- vgic_cpu->nested_vgic_v3 = vgic_cpu->shadow_vgic_v3;
irq_set_irqchip_state(kvm_vgic_global_state.maint_irq,
IRQCHIP_STATE_ACTIVE, false);
}
void vgic_v3_handle_nested_maint_irq(struct kvm_vcpu *vcpu)
{
- struct vgic_v3_cpu_if *cpu_if = vcpu_nested_if(vcpu);
bool state;
/*
@@ -228,7 +281,7 @@ void vgic_v3_handle_nested_maint_irq(struct kvm_vcpu *vcpu)
if (!vgic_state_is_nested(vcpu))
return;
- state = cpu_if->vgic_hcr & ICH_HCR_EN;
+ state = __vcpu_sys_reg(vcpu, ICH_HCR_EL2) & ICH_HCR_EN;
state &= vgic_v3_get_misr(vcpu);
kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index edcc9b0a2648c1..c548384f7d1c8c 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -299,12 +299,6 @@ void vgic_v3_enable(struct kvm_vcpu *vcpu)
vgic_v3->vgic_sre = (ICC_SRE_EL1_DIB |
ICC_SRE_EL1_DFB |
ICC_SRE_EL1_SRE);
- /*
- * If nesting is allowed, force GICv3 onto the nested
- * guests as well.
- */
- if (nested_virt_in_use(vcpu))
- vcpu->arch.vgic_cpu.nested_vgic_v3.vgic_sre = vgic_v3->vgic_sre;
vcpu->arch.vgic_cpu.pendbaser = INITIAL_PENDBASER_VALUE;
} else {
vgic_v3->vgic_sre = 0;
@@ -673,12 +667,10 @@ void vgic_v3_load(struct kvm_vcpu *vcpu)
{
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
- /*
- * vgic_v3_load_nested only affects the LRs in the shadow
- * state, so it is fine to pass the nested state around.
- */
- if (vgic_state_is_nested(vcpu))
- cpu_if = &vcpu->arch.vgic_cpu.nested_vgic_v3;
+ if (vgic_state_is_nested(vcpu)) {
+ vgic_v3_load_nested(vcpu);
+ return;
+ }
/*
* If dealing with a GICv2 emulation on GICv3, VMCR_EL2.VFIQen
@@ -692,17 +684,16 @@ void vgic_v3_load(struct kvm_vcpu *vcpu)
if (has_vhe())
__vgic_v3_activate_traps(cpu_if);
-
- if (vgic_state_is_nested(vcpu))
- vgic_v3_load_nested(vcpu);
}
void vgic_v3_put(struct kvm_vcpu *vcpu)
{
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
- if (vgic_state_is_nested(vcpu))
- cpu_if = &vcpu->arch.vgic_cpu.shadow_vgic_v3;
+ if (vgic_state_is_nested(vcpu)) {
+ vgic_v3_put_nested(vcpu);
+ return;
+ }
if (likely(cpu_if->vgic_sre))
cpu_if->vgic_vmcr = kvm_call_hyp_ret(__vgic_v3_read_vmcr);
@@ -711,9 +702,6 @@ void vgic_v3_put(struct kvm_vcpu *vcpu)
if (has_vhe())
__vgic_v3_deactivate_traps(cpu_if);
-
- if (vgic_state_is_nested(vcpu))
- vgic_v3_put_nested(vcpu);
}
__weak void vgic_v3_sync_nested(struct kvm_vcpu *vcpu) {}