Skip to content

Commit 05cade7

Browse files
ladiprobonzini
authored andcommitted
KVM: nSVM: fix SMI injection in guest mode
Entering SMM while running in guest mode wasn't working very well because several pieces of the vcpu state were left set up for nested operation. Some of the issues observed: * L1 was getting unexpected VM exits (using L1 interception controls but running in SMM execution environment) * MMU was confused (walk_mmu was still set to nested_mmu) * INTERCEPT_SMI was not emulated for L1 (KVM never injected SVM_EXIT_SMI) Intel SDM actually prescribes the logical processor to "leave VMX operation" upon entering SMM in 34.14.1 Default Treatment of SMI Delivery. AMD doesn't seem to document this but they provide fields in the SMM state-save area to stash the current state of SVM. What we need to do is basically get out of guest mode for the duration of SMM. All this completely transparent to L1, i.e. L1 is not given control and no L1 observable state changes. To avoid code duplication this commit takes advantage of the existing nested vmexit and run functionality, perhaps at the cost of efficiency. To get out of guest mode, nested_svm_vmexit is called, unchanged. Re-entering is performed using enter_svm_guest_mode. This commit fixes running Windows Server 2016 with Hyper-V enabled in a VM with OVMF firmware (OVMF_CODE-need-smm.fd). Signed-off-by: Ladi Prosek <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
1 parent c263406 commit 05cade7

File tree

3 files changed

+58
-6
lines changed

3 files changed

+58
-6
lines changed

arch/x86/include/asm/kvm_host.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1430,4 +1430,7 @@ static inline int kvm_cpu_get_apicid(int mps_cpu)
14301430
#endif
14311431
}
14321432

1433+
#define put_smstate(type, buf, offset, val) \
1434+
*(type *)((buf) + (offset) - 0x7e00) = val
1435+
14331436
#endif /* _ASM_X86_KVM_HOST_H */

arch/x86/kvm/svm.c

Lines changed: 55 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -5409,19 +5409,71 @@ static void svm_setup_mce(struct kvm_vcpu *vcpu)
54095409

54105410
static int svm_smi_allowed(struct kvm_vcpu *vcpu)
54115411
{
5412+
struct vcpu_svm *svm = to_svm(vcpu);
5413+
5414+
/* Per APM Vol.2 15.22.2 "Response to SMI" */
5415+
if (!gif_set(svm))
5416+
return 0;
5417+
5418+
if (is_guest_mode(&svm->vcpu) &&
5419+
svm->nested.intercept & (1ULL << INTERCEPT_SMI)) {
5420+
/* TODO: Might need to set exit_info_1 and exit_info_2 here */
5421+
svm->vmcb->control.exit_code = SVM_EXIT_SMI;
5422+
svm->nested.exit_required = true;
5423+
return 0;
5424+
}
5425+
54125426
return 1;
54135427
}
54145428

54155429
static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
54165430
{
5417-
/* TODO: Implement */
5431+
struct vcpu_svm *svm = to_svm(vcpu);
5432+
int ret;
5433+
5434+
if (is_guest_mode(vcpu)) {
5435+
/* FED8h - SVM Guest */
5436+
put_smstate(u64, smstate, 0x7ed8, 1);
5437+
/* FEE0h - SVM Guest VMCB Physical Address */
5438+
put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb);
5439+
5440+
svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
5441+
svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
5442+
svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
5443+
5444+
ret = nested_svm_vmexit(svm);
5445+
if (ret)
5446+
return ret;
5447+
}
54185448
return 0;
54195449
}
54205450

54215451
static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase)
54225452
{
5423-
/* TODO: Implement */
5424-
return 0;
5453+
struct vcpu_svm *svm = to_svm(vcpu);
5454+
struct vmcb *nested_vmcb;
5455+
struct page *page;
5456+
struct {
5457+
u64 guest;
5458+
u64 vmcb;
5459+
} svm_state_save;
5460+
int ret;
5461+
5462+
ret = kvm_vcpu_read_guest(vcpu, smbase + 0xfed8, &svm_state_save,
5463+
sizeof(svm_state_save));
5464+
if (ret)
5465+
return ret;
5466+
5467+
if (svm_state_save.guest) {
5468+
vcpu->arch.hflags &= ~HF_SMM_MASK;
5469+
nested_vmcb = nested_svm_map(svm, svm_state_save.vmcb, &page);
5470+
if (nested_vmcb)
5471+
enter_svm_guest_mode(svm, svm_state_save.vmcb, nested_vmcb, page);
5472+
else
5473+
ret = 1;
5474+
vcpu->arch.hflags |= HF_SMM_MASK;
5475+
}
5476+
return ret;
54255477
}
54265478

54275479
static struct kvm_x86_ops svm_x86_ops __ro_after_init = {

arch/x86/kvm/x86.c

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6485,9 +6485,6 @@ static void process_nmi(struct kvm_vcpu *vcpu)
64856485
kvm_make_request(KVM_REQ_EVENT, vcpu);
64866486
}
64876487

6488-
#define put_smstate(type, buf, offset, val) \
6489-
*(type *)((buf) + (offset) - 0x7e00) = val
6490-
64916488
static u32 enter_smm_get_segment_flags(struct kvm_segment *seg)
64926489
{
64936490
u32 flags = 0;

0 commit comments

Comments
 (0)