diff options
author | Jiri Slaby <jslaby@suse.cz> | 2019-12-09 09:13:13 +0100 |
---|---|---|
committer | Jiri Slaby <jslaby@suse.cz> | 2019-12-11 13:56:44 +0100 |
commit | cfadfb028d81371cded5ede154cc41da3b5e1989 (patch) | |
tree | 2c0fb680005f6c55dd08e1410c8723a9245af7ea | |
parent | 7aa917809b5e48271cac3b6091b1e1a2f5d51a81 (diff) | |
download | linux-sev-rebase.tar.gz |
svm: add support for encrypted get_*regssev-rebase
Signed-off-by: Jiri Slaby <jslaby@suse.cz>
-rw-r--r-- | arch/x86/include/asm/kvm_host.h | 4 | ||||
-rw-r--r-- | arch/x86/include/asm/svm.h | 15 | ||||
-rw-r--r-- | arch/x86/kvm/svm.c | 222 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 34 |
4 files changed, 254 insertions, 21 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index dc4cf84750db5d..ccf9a8a88bcca8 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1212,6 +1212,10 @@ struct kvm_x86_ops { unsigned long val); void (*reg_write)(struct kvm_vcpu *vcpu, enum kvm_reg reg, unsigned long val); + bool (*get_regs)(struct kvm_vcpu *vcpu, struct kvm_regs *regs); + bool (*get_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); + bool (*get_fpu)(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); + bool (*get_xsave)(struct kvm_vcpu *vcpu, struct kvm_xsave *guest_xsave); bool (*allow_debug)(struct kvm *kvm); }; diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h index 2d88c6244a15c7..a4efabe40a07f0 100644 --- a/arch/x86/include/asm/svm.h +++ b/arch/x86/include/asm/svm.h @@ -272,8 +272,19 @@ struct __attribute__ ((__packed__)) vmcb_save_area { u8 reserved_11[56]; u64 xcr0; u8 valid_bitmap[16]; - u64 x87_state_gpa; - u8 reserved_12[1016]; + u64 x87_datapa; + u32 x87_mxcsr; + u16 x87_ftw; + u16 x87_fsw; + u16 x87_fcw; + u16 x87_fop; + u16 x87_ds; + u16 x87_cs; + u64 x87_rip; + u8 x87_regs[8][10]; + u8 x87_xmm[16][16]; + u8 x87_ymm_hi[16][16]; + u8 reserved_12[400]; }; struct __attribute__ ((__packed__)) vmcb { diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index e87228f81dc6de..44db60b1b78f58 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -2445,11 +2445,10 @@ static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg) return s->base; } -static void svm_get_segment(struct kvm_vcpu *vcpu, - struct kvm_segment *var, int seg) +static void __svm_get_segment(struct kvm_vcpu *vcpu, + struct kvm_segment *var, int seg, + const struct vmcb_seg *s) { - struct vmcb_seg *s = svm_seg(vcpu, seg); - var->base = s->base; var->limit = s->limit; var->selector = s->selector; @@ -2514,6 +2513,12 @@ static void svm_get_segment(struct kvm_vcpu *vcpu, } } +static void svm_get_segment(struct kvm_vcpu *vcpu, + struct kvm_segment *var, int seg) +{ + __svm_get_segment(vcpu, var, seg, svm_seg(vcpu, seg)); +} + static int svm_get_cpl(struct kvm_vcpu *vcpu) { return vmsa_cpl_read(to_svm(vcpu)); @@ -5358,6 +5363,211 @@ static void dump_ghcb(struct kvm_vcpu *vcpu) dump_ghcb_valid_bitmap(vcpu); } +static struct vmcb_save_area *svm_decrypt_regs(struct kvm_vcpu *vcpu) +{ + struct kvm_sev_info *sev = &to_kvm_svm(vcpu->kvm)->sev_info; + struct vcpu_svm *svm = to_svm(vcpu); + struct vmcb_save_area *save; + struct sev_data_dbg *dbg; + struct page *save_page; + int ret, error; + + dbg = kzalloc(sizeof(*dbg), GFP_KERNEL); + if (!dbg) + return ERR_PTR(-ENOMEM); + + save_page = alloc_page(GFP_KERNEL); + if (!save_page) { + kfree(dbg); + return ERR_PTR(-ENOMEM); + } + + save = page_address(save_page); + + wbinvd_on_all_cpus(); + + dbg->handle = sev->handle; + dbg->dst_addr = __psp_pa(save); + dbg->src_addr = svm->vmcb->control.vmsa_pa; + dbg->len = PAGE_SIZE; + + ret = sev_do_cmd(SEV_CMD_DBG_DECRYPT, dbg, &error); + kfree(dbg); + if (ret) { + pr_err("%s: SEV_CMD_DBG_DECRYPT error, ret=%d, error=%d\n", __func__, ret, error); + return ERR_PTR(ret); + } + + return save; +} + +static bool svm_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) +{ + struct vmcb_save_area *save; + + if (!vcpu->arch.vmsa_encrypted) + return false; + + pr_info("%s(%d)\n", __func__, __LINE__); + + save = svm_decrypt_regs(vcpu); + if (IS_ERR(save)) + return true; + + regs->rax = save->rax; + regs->rbx = save->rbx; + regs->rcx = save->rcx; + regs->rdx = save->rdx; + regs->rsi = save->rsi; + regs->rdi = save->rdi; + regs->rsp = save->rsp; + regs->rbp = save->rbp; +#ifdef CONFIG_X86_64 + regs->r8 = save->r8; + regs->r9 = save->r9; + regs->r10 = save->r10; + regs->r11 = save->r11; + regs->r12 = save->r12; + regs->r13 = save->r13; + regs->r14 = save->r14; + regs->r15 = save->r15; +#endif + + regs->rip = save->rip; + regs->rflags = save->rflags; + + __free_page(virt_to_page(save)); + + return true; +} + +static void __svm_get_segment(struct kvm_vcpu *vcpu, + struct kvm_segment *var, int seg, + const struct vmcb_seg *s); + +static bool svm_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) +{ + struct vmcb_save_area *save; + + if (!vcpu->arch.vmsa_encrypted) + return false; + + pr_info("%s(%d)\n", __func__, __LINE__); + + save = svm_decrypt_regs(vcpu); + if (IS_ERR(save)) + return true; + + __svm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS, &save->cs); + __svm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS, &save->ds); + __svm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES, &save->es); + __svm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS, &save->fs); + __svm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS, &save->gs); + __svm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS, &save->ss); + + __svm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR, &save->tr); + __svm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR, &save->ldtr); + + sregs->idt.limit = save->idtr.limit; + sregs->idt.base = save->idtr.base; + sregs->gdt.limit = save->gdtr.limit; + sregs->gdt.base = save->gdtr.base; + + sregs->cr0 = save->cr0; + sregs->cr2 = save->cr2; + sregs->cr3 = save->cr3; + sregs->cr4 = save->cr4; + sregs->cr8 = kvm_get_cr8(vcpu); // TODO + sregs->efer = save->efer; + sregs->apic_base = kvm_get_apic_base(vcpu); // TODO + + __free_page(virt_to_page(save)); + + memset(sregs->interrupt_bitmap, 0, sizeof(sregs->interrupt_bitmap)); + + if (vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft) + set_bit(vcpu->arch.interrupt.nr, + (unsigned long *)sregs->interrupt_bitmap); + + return true; +} + +static bool svm_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) +{ + struct vmcb_save_area *save; + unsigned int a; + + if (!vcpu->arch.vmsa_encrypted) + return false; + + pr_info("%s(%d)\n", __func__, __LINE__); + + save = svm_decrypt_regs(vcpu); + if (IS_ERR(save)) + return true; + + //memcpy(fpu->fpr, save->x87_regs, sizeof(save->x87_regs)); + for (a = 0; a < 8; a++) { + memset(&fpu->fpr[a][9], 0, 6); + memcpy(fpu->fpr[a], &save->x87_regs[a], sizeof(save->x87_regs[a])); + } + fpu->fcw = save->x87_fcw; + fpu->fsw = save->x87_fsw; + fpu->ftwx = save->x87_ftw; + fpu->mxcsr = save->x87_mxcsr; + fpu->last_opcode = save->x87_fop; + fpu->last_ip = save->x87_rip; + fpu->last_dp = save->x87_datapa; + + memcpy(fpu->xmm, save->x87_xmm, sizeof(save->x87_xmm)); + + print_hex_dump(KERN_DEBUG, "FPU ", DUMP_PREFIX_OFFSET, 16, 1, + &save->valid_bitmap, + offsetof(struct vmcb_save_area, reserved_12) - + offsetof(struct vmcb_save_area, valid_bitmap), true); + + __free_page(virt_to_page(save)); + + return true; +} + +bool svm_get_xsave(struct kvm_vcpu *vcpu, struct kvm_xsave *guest_xsave) +{ + struct vmcb_save_area *save; + struct fxregs_state *fxregs = (void *)guest_xsave; + unsigned int a; + + if (!vcpu->arch.vmsa_encrypted) + return false; + + pr_info("%s(%d)\n", __func__, __LINE__); + + save = svm_decrypt_regs(vcpu); + if (IS_ERR(save)) + return true; + + for (a = 0; a < 8; a++) { + memset(&fxregs->st_space[a * 16 + 9], 0, 6); + memcpy(&fxregs->st_space[a * 16], &save->x87_regs[a], sizeof(save->x87_regs[a])); + } + fxregs->cwd = save->x87_fcw; + fxregs->swd = save->x87_fsw; + fxregs->twd = save->x87_ftw; + fxregs->mxcsr = save->x87_mxcsr; // TODO mxcsr_mask + fxregs->fop = save->x87_fop; + fxregs->rip = save->x87_rip; + fxregs->rdp = save->x87_datapa; + + memcpy(fxregs->xmm_space, save->x87_xmm, sizeof(save->x87_xmm)); + + print_hex_dump(KERN_DEBUG, "FPU ", DUMP_PREFIX_OFFSET, 16, 1, + &save->valid_bitmap, + offsetof(struct vmcb_save_area, reserved_12) - + offsetof(struct vmcb_save_area, valid_bitmap), true); + + return true; +} + static void svm_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2) { struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control; @@ -8147,6 +8357,10 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { .reg_read = svm_reg_read, .reg_write_override = svm_reg_write_override, .reg_write = svm_reg_write, + .get_regs = svm_get_regs, + .get_sregs = svm_get_sregs, + .get_fpu = svm_get_fpu, + .get_xsave = svm_get_xsave, .allow_debug = svm_allow_debug, }; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 851f8f6e5f659c..38969f6b7d8b92 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3799,6 +3799,9 @@ static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu, struct kvm_xsave *guest_xsave) { if (boot_cpu_has(X86_FEATURE_XSAVE)) { + if (kvm_x86_ops->get_xsave && kvm_x86_ops->get_xsave(vcpu, guest_xsave)) + return; + memset(guest_xsave, 0, sizeof(struct kvm_xsave)); fill_xsave((u8 *) guest_xsave->region, vcpu); } else { @@ -8346,7 +8349,8 @@ static void __get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { vcpu_load(vcpu); - __get_regs(vcpu, regs); + if (!kvm_x86_ops->get_regs || !kvm_x86_ops->get_regs(vcpu, regs)) + __get_regs(vcpu, regs); vcpu_put(vcpu); return 0; } @@ -8443,7 +8447,8 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { vcpu_load(vcpu); - __get_sregs(vcpu, sregs); + if (!kvm_x86_ops->get_sregs || !kvm_x86_ops->get_sregs(vcpu, sregs)) + __get_sregs(vcpu, sregs); vcpu_put(vcpu); return 0; } @@ -8731,20 +8736,19 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { - struct fxregs_state *fxsave; - vcpu_load(vcpu); - - fxsave = &vcpu->arch.guest_fpu->state.fxsave; - memcpy(fpu->fpr, fxsave->st_space, 128); - fpu->fcw = fxsave->cwd; - fpu->fsw = fxsave->swd; - fpu->ftwx = fxsave->twd; - fpu->last_opcode = fxsave->fop; - fpu->last_ip = fxsave->rip; - fpu->last_dp = fxsave->rdp; - memcpy(fpu->xmm, fxsave->xmm_space, sizeof(fxsave->xmm_space)); - + if (!kvm_x86_ops->get_fpu || !kvm_x86_ops->get_fpu(vcpu, fpu)) { + struct fxregs_state *fxsave = &vcpu->arch.guest_fpu->state.fxsave; + + memcpy(fpu->fpr, fxsave->st_space, 128); + fpu->fcw = fxsave->cwd; + fpu->fsw = fxsave->swd; + fpu->ftwx = fxsave->twd; + fpu->last_opcode = fxsave->fop; + fpu->last_ip = fxsave->rip; + fpu->last_dp = fxsave->rdp; + memcpy(fpu->xmm, fxsave->xmm_space, sizeof(fxsave->xmm_space)); + } vcpu_put(vcpu); return 0; } |