Skip to content

Commit d62caab

Browse files
Andrey Smetaninbonzini
Andrey Smetanin
authored andcommitted
kvm/x86: per-vcpu apicv deactivation support
The decision on whether to use hardware APIC virtualization used to be taken globally, based on the availability of the feature in the CPU and the value of a module parameter. However, under certain circumstances we want to control it on per-vcpu basis. In particular, when the userspace activates HyperV synthetic interrupt controller (SynIC), APICv has to be disabled as it's incompatible with SynIC auto-EOI behavior. To achieve that, introduce 'apicv_active' flag on struct kvm_vcpu_arch, and kvm_vcpu_deactivate_apicv() function to turn APICv off. The flag is initialized based on the module parameter and CPU capability, and consulted whenever an APICv-specific action is performed. Signed-off-by: Andrey Smetanin <[email protected]> Reviewed-by: Roman Kagan <[email protected]> Signed-off-by: Denis V. Lunev <[email protected]> CC: Gleb Natapov <[email protected]> CC: Paolo Bonzini <[email protected]> CC: Roman Kagan <[email protected]> CC: Denis V. Lunev <[email protected]> CC: [email protected] Signed-off-by: Paolo Bonzini <[email protected]>
1 parent 6308630 commit d62caab

File tree

7 files changed

+63
-47
lines changed

7 files changed

+63
-47
lines changed

arch/x86/include/asm/kvm_host.h

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -400,6 +400,7 @@ struct kvm_vcpu_arch {
400400
u64 efer;
401401
u64 apic_base;
402402
struct kvm_lapic *apic; /* kernel irqchip context */
403+
bool apicv_active;
403404
DECLARE_BITMAP(ioapic_handled_vectors, 256);
404405
unsigned long apic_attention;
405406
int32_t apic_arb_prio;
@@ -831,7 +832,8 @@ struct kvm_x86_ops {
831832
void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
832833
void (*enable_irq_window)(struct kvm_vcpu *vcpu);
833834
void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr);
834-
int (*cpu_uses_apicv)(struct kvm_vcpu *vcpu);
835+
bool (*get_enable_apicv)(void);
836+
void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu);
835837
void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
836838
void (*hwapic_isr_update)(struct kvm *kvm, int isr);
837839
void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
@@ -1086,6 +1088,8 @@ gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
10861088
gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
10871089
struct x86_exception *exception);
10881090

1091+
void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu);
1092+
10891093
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
10901094

10911095
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code,

arch/x86/kvm/irq.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,7 @@ int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
7676
if (kvm_cpu_has_extint(v))
7777
return 1;
7878

79-
if (kvm_vcpu_apic_vid_enabled(v))
79+
if (kvm_vcpu_apicv_active(v))
8080
return 0;
8181

8282
return kvm_apic_has_interrupt(v) != -1; /* LAPIC */

arch/x86/kvm/lapic.c

Lines changed: 12 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -379,7 +379,8 @@ static inline int apic_find_highest_irr(struct kvm_lapic *apic)
379379
if (!apic->irr_pending)
380380
return -1;
381381

382-
kvm_x86_ops->sync_pir_to_irr(apic->vcpu);
382+
if (apic->vcpu->arch.apicv_active)
383+
kvm_x86_ops->sync_pir_to_irr(apic->vcpu);
383384
result = apic_search_irr(apic);
384385
ASSERT(result == -1 || result >= 16);
385386

@@ -392,7 +393,7 @@ static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
392393

393394
vcpu = apic->vcpu;
394395

395-
if (unlikely(kvm_vcpu_apic_vid_enabled(vcpu))) {
396+
if (unlikely(vcpu->arch.apicv_active)) {
396397
/* try to update RVI */
397398
apic_clear_vector(vec, apic->regs + APIC_IRR);
398399
kvm_make_request(KVM_REQ_EVENT, vcpu);
@@ -418,7 +419,7 @@ static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
418419
* because the processor can modify ISR under the hood. Instead
419420
* just set SVI.
420421
*/
421-
if (unlikely(kvm_x86_ops->hwapic_isr_update))
422+
if (unlikely(vcpu->arch.apicv_active))
422423
kvm_x86_ops->hwapic_isr_update(vcpu->kvm, vec);
423424
else {
424425
++apic->isr_count;
@@ -466,7 +467,7 @@ static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
466467
* on the other hand isr_count and highest_isr_cache are unused
467468
* and must be left alone.
468469
*/
469-
if (unlikely(kvm_x86_ops->hwapic_isr_update))
470+
if (unlikely(vcpu->arch.apicv_active))
470471
kvm_x86_ops->hwapic_isr_update(vcpu->kvm,
471472
apic_find_highest_isr(apic));
472473
else {
@@ -852,7 +853,7 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
852853
apic_clear_vector(vector, apic->regs + APIC_TMR);
853854
}
854855

855-
if (kvm_x86_ops->deliver_posted_interrupt)
856+
if (vcpu->arch.apicv_active)
856857
kvm_x86_ops->deliver_posted_interrupt(vcpu, vector);
857858
else {
858859
apic_set_irr(vector, apic);
@@ -1225,7 +1226,7 @@ static bool lapic_timer_int_injected(struct kvm_vcpu *vcpu)
12251226
int vec = reg & APIC_VECTOR_MASK;
12261227
void *bitmap = apic->regs + APIC_ISR;
12271228

1228-
if (kvm_x86_ops->deliver_posted_interrupt)
1229+
if (vcpu->arch.apicv_active)
12291230
bitmap = apic->regs + APIC_IRR;
12301231

12311232
if (apic_test_vector(vec, bitmap))
@@ -1693,8 +1694,8 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
16931694
apic_set_reg(apic, APIC_ISR + 0x10 * i, 0);
16941695
apic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
16951696
}
1696-
apic->irr_pending = kvm_vcpu_apic_vid_enabled(vcpu);
1697-
apic->isr_count = kvm_x86_ops->hwapic_isr_update ? 1 : 0;
1697+
apic->irr_pending = vcpu->arch.apicv_active;
1698+
apic->isr_count = vcpu->arch.apicv_active ? 1 : 0;
16981699
apic->highest_isr_cache = -1;
16991700
update_divide_count(apic);
17001701
atomic_set(&apic->lapic_timer.pending, 0);
@@ -1906,15 +1907,15 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu,
19061907
update_divide_count(apic);
19071908
start_apic_timer(apic);
19081909
apic->irr_pending = true;
1909-
apic->isr_count = kvm_x86_ops->hwapic_isr_update ?
1910+
apic->isr_count = vcpu->arch.apicv_active ?
19101911
1 : count_vectors(apic->regs + APIC_ISR);
19111912
apic->highest_isr_cache = -1;
1912-
if (kvm_x86_ops->hwapic_irr_update)
1913+
if (vcpu->arch.apicv_active) {
19131914
kvm_x86_ops->hwapic_irr_update(vcpu,
19141915
apic_find_highest_irr(apic));
1915-
if (unlikely(kvm_x86_ops->hwapic_isr_update))
19161916
kvm_x86_ops->hwapic_isr_update(vcpu->kvm,
19171917
apic_find_highest_isr(apic));
1918+
}
19181919
kvm_make_request(KVM_REQ_EVENT, vcpu);
19191920
if (ioapic_in_kernel(vcpu->kvm))
19201921
kvm_rtc_eoi_tracking_restore_one(vcpu);

arch/x86/kvm/lapic.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -143,9 +143,9 @@ static inline int apic_x2apic_mode(struct kvm_lapic *apic)
143143
return apic->vcpu->arch.apic_base & X2APIC_ENABLE;
144144
}
145145

146-
static inline bool kvm_vcpu_apic_vid_enabled(struct kvm_vcpu *vcpu)
146+
static inline bool kvm_vcpu_apicv_active(struct kvm_vcpu *vcpu)
147147
{
148-
return kvm_x86_ops->cpu_uses_apicv(vcpu);
148+
return vcpu->arch.apic && vcpu->arch.apicv_active;
149149
}
150150

151151
static inline bool kvm_apic_has_events(struct kvm_vcpu *vcpu)

arch/x86/kvm/svm.c

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3559,9 +3559,13 @@ static void svm_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
35593559
return;
35603560
}
35613561

3562-
static int svm_cpu_uses_apicv(struct kvm_vcpu *vcpu)
3562+
static bool svm_get_enable_apicv(void)
3563+
{
3564+
return false;
3565+
}
3566+
3567+
static void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
35633568
{
3564-
return 0;
35653569
}
35663570

35673571
static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
@@ -4328,7 +4332,8 @@ static struct kvm_x86_ops svm_x86_ops = {
43284332
.enable_irq_window = enable_irq_window,
43294333
.update_cr8_intercept = update_cr8_intercept,
43304334
.set_virtual_x2apic_mode = svm_set_virtual_x2apic_mode,
4331-
.cpu_uses_apicv = svm_cpu_uses_apicv,
4335+
.get_enable_apicv = svm_get_enable_apicv,
4336+
.refresh_apicv_exec_ctrl = svm_refresh_apicv_exec_ctrl,
43324337
.load_eoi_exitmap = svm_load_eoi_exitmap,
43334338
.sync_pir_to_irr = svm_sync_pir_to_irr,
43344339

arch/x86/kvm/vmx.c

Lines changed: 19 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
#include "irq.h"
2020
#include "mmu.h"
2121
#include "cpuid.h"
22+
#include "lapic.h"
2223

2324
#include <linux/kvm_host.h>
2425
#include <linux/module.h>
@@ -862,15 +863,13 @@ static void kvm_cpu_vmxon(u64 addr);
862863
static void kvm_cpu_vmxoff(void);
863864
static bool vmx_mpx_supported(void);
864865
static bool vmx_xsaves_supported(void);
865-
static int vmx_cpu_uses_apicv(struct kvm_vcpu *vcpu);
866866
static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr);
867867
static void vmx_set_segment(struct kvm_vcpu *vcpu,
868868
struct kvm_segment *var, int seg);
869869
static void vmx_get_segment(struct kvm_vcpu *vcpu,
870870
struct kvm_segment *var, int seg);
871871
static bool guest_state_valid(struct kvm_vcpu *vcpu);
872872
static u32 vmx_segment_access_rights(struct kvm_segment *var);
873-
static void vmx_sync_pir_to_irr_dummy(struct kvm_vcpu *vcpu);
874873
static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx);
875874
static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx);
876875
static int alloc_identity_pagetable(struct kvm *kvm);
@@ -2498,7 +2497,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
24982497
vmx->nested.nested_vmx_pinbased_ctls_high |=
24992498
PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
25002499
PIN_BASED_VMX_PREEMPTION_TIMER;
2501-
if (vmx_cpu_uses_apicv(&vmx->vcpu))
2500+
if (kvm_vcpu_apicv_active(&vmx->vcpu))
25022501
vmx->nested.nested_vmx_pinbased_ctls_high |=
25032502
PIN_BASED_POSTED_INTR;
25042503

@@ -4462,9 +4461,9 @@ static void vmx_disable_intercept_msr_write_x2apic(u32 msr)
44624461
msr, MSR_TYPE_W);
44634462
}
44644463

4465-
static int vmx_cpu_uses_apicv(struct kvm_vcpu *vcpu)
4464+
static bool vmx_get_enable_apicv(void)
44664465
{
4467-
return enable_apicv && lapic_in_kernel(vcpu);
4466+
return enable_apicv;
44684467
}
44694468

44704469
static int vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
@@ -4586,11 +4585,6 @@ static void vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
45864585
kvm_apic_update_irr(vcpu, vmx->pi_desc.pir);
45874586
}
45884587

4589-
static void vmx_sync_pir_to_irr_dummy(struct kvm_vcpu *vcpu)
4590-
{
4591-
return;
4592-
}
4593-
45944588
/*
45954589
* Set up the vmcs's constant host-state fields, i.e., host-state fields that
45964590
* will not change in the lifetime of the guest.
@@ -4660,11 +4654,18 @@ static u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx)
46604654
{
46614655
u32 pin_based_exec_ctrl = vmcs_config.pin_based_exec_ctrl;
46624656

4663-
if (!vmx_cpu_uses_apicv(&vmx->vcpu))
4657+
if (!kvm_vcpu_apicv_active(&vmx->vcpu))
46644658
pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR;
46654659
return pin_based_exec_ctrl;
46664660
}
46674661

4662+
static void vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
4663+
{
4664+
struct vcpu_vmx *vmx = to_vmx(vcpu);
4665+
4666+
vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_ctrl(vmx));
4667+
}
4668+
46684669
static u32 vmx_exec_control(struct vcpu_vmx *vmx)
46694670
{
46704671
u32 exec_control = vmcs_config.cpu_based_exec_ctrl;
@@ -4703,7 +4704,7 @@ static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
47034704
exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
47044705
if (!ple_gap)
47054706
exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING;
4706-
if (!vmx_cpu_uses_apicv(&vmx->vcpu))
4707+
if (!kvm_vcpu_apicv_active(&vmx->vcpu))
47074708
exec_control &= ~(SECONDARY_EXEC_APIC_REGISTER_VIRT |
47084709
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
47094710
exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
@@ -4767,7 +4768,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
47674768
vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
47684769
vmx_secondary_exec_control(vmx));
47694770

4770-
if (vmx_cpu_uses_apicv(&vmx->vcpu)) {
4771+
if (kvm_vcpu_apicv_active(&vmx->vcpu)) {
47714772
vmcs_write64(EOI_EXIT_BITMAP0, 0);
47724773
vmcs_write64(EOI_EXIT_BITMAP1, 0);
47734774
vmcs_write64(EOI_EXIT_BITMAP2, 0);
@@ -4919,7 +4920,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
49194920

49204921
kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
49214922

4922-
if (vmx_cpu_uses_apicv(vcpu))
4923+
if (kvm_vcpu_apicv_active(vcpu))
49234924
memset(&vmx->pi_desc, 0, sizeof(struct pi_desc));
49244925

49254926
if (vmx->vpid != 0)
@@ -6203,15 +6204,6 @@ static __init int hardware_setup(void)
62036204
kvm_tsc_scaling_ratio_frac_bits = 48;
62046205
}
62056206

6206-
if (enable_apicv)
6207-
kvm_x86_ops->update_cr8_intercept = NULL;
6208-
else {
6209-
kvm_x86_ops->hwapic_irr_update = NULL;
6210-
kvm_x86_ops->hwapic_isr_update = NULL;
6211-
kvm_x86_ops->deliver_posted_interrupt = NULL;
6212-
kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
6213-
}
6214-
62156207
vmx_disable_intercept_for_msr(MSR_FS_BASE, false);
62166208
vmx_disable_intercept_for_msr(MSR_GS_BASE, false);
62176209
vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true);
@@ -8152,7 +8144,7 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
81528144
* apicv
81538145
*/
81548146
if (!cpu_has_vmx_virtualize_x2apic_mode() ||
8155-
!vmx_cpu_uses_apicv(vcpu))
8147+
!kvm_vcpu_apicv_active(vcpu))
81568148
return;
81578149

81588150
if (!cpu_need_tpr_shadow(vcpu))
@@ -8259,7 +8251,7 @@ static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
82598251

82608252
static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
82618253
{
8262-
if (!vmx_cpu_uses_apicv(vcpu))
8254+
if (!kvm_vcpu_apicv_active(vcpu))
82638255
return;
82648256

82658257
vmcs_write64(EOI_EXIT_BITMAP0, eoi_exit_bitmap[0]);
@@ -10803,7 +10795,8 @@ static struct kvm_x86_ops vmx_x86_ops = {
1080310795
.update_cr8_intercept = update_cr8_intercept,
1080410796
.set_virtual_x2apic_mode = vmx_set_virtual_x2apic_mode,
1080510797
.set_apic_access_page_addr = vmx_set_apic_access_page_addr,
10806-
.cpu_uses_apicv = vmx_cpu_uses_apicv,
10798+
.get_enable_apicv = vmx_get_enable_apicv,
10799+
.refresh_apicv_exec_ctrl = vmx_refresh_apicv_exec_ctrl,
1080710800
.load_eoi_exitmap = vmx_load_eoi_exitmap,
1080810801
.hwapic_irr_update = vmx_hwapic_irr_update,
1080910802
.hwapic_isr_update = vmx_hwapic_isr_update,

arch/x86/kvm/x86.c

Lines changed: 16 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2748,7 +2748,9 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
27482748
static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
27492749
struct kvm_lapic_state *s)
27502750
{
2751-
kvm_x86_ops->sync_pir_to_irr(vcpu);
2751+
if (vcpu->arch.apicv_active)
2752+
kvm_x86_ops->sync_pir_to_irr(vcpu);
2753+
27522754
memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s);
27532755

27542756
return 0;
@@ -5867,6 +5869,12 @@ static void kvm_pv_kick_cpu_op(struct kvm *kvm, unsigned long flags, int apicid)
58675869
kvm_irq_delivery_to_apic(kvm, NULL, &lapic_irq, NULL);
58685870
}
58695871

5872+
void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu)
5873+
{
5874+
vcpu->arch.apicv_active = false;
5875+
kvm_x86_ops->refresh_apicv_exec_ctrl(vcpu);
5876+
}
5877+
58705878
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
58715879
{
58725880
unsigned long nr, a0, a1, a2, a3, ret;
@@ -5960,6 +5968,9 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu)
59605968
if (!vcpu->arch.apic)
59615969
return;
59625970

5971+
if (vcpu->arch.apicv_active)
5972+
return;
5973+
59635974
if (!vcpu->arch.apic->vapic_addr)
59645975
max_irr = kvm_lapic_find_highest_irr(vcpu);
59655976
else
@@ -6306,7 +6317,8 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
63066317
if (irqchip_split(vcpu->kvm))
63076318
kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors);
63086319
else {
6309-
kvm_x86_ops->sync_pir_to_irr(vcpu);
6320+
if (vcpu->arch.apicv_active)
6321+
kvm_x86_ops->sync_pir_to_irr(vcpu);
63106322
kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
63116323
}
63126324
kvm_x86_ops->load_eoi_exitmap(vcpu,
@@ -6453,7 +6465,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
64536465
* Update architecture specific hints for APIC
64546466
* virtual interrupt delivery.
64556467
*/
6456-
if (kvm_x86_ops->hwapic_irr_update)
6468+
if (vcpu->arch.apicv_active)
64576469
kvm_x86_ops->hwapic_irr_update(vcpu,
64586470
kvm_lapic_find_highest_irr(vcpu));
64596471
}
@@ -7524,6 +7536,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
75247536
BUG_ON(vcpu->kvm == NULL);
75257537
kvm = vcpu->kvm;
75267538

7539+
vcpu->arch.apicv_active = kvm_x86_ops->get_enable_apicv();
75277540
vcpu->arch.pv.pv_unhalted = false;
75287541
vcpu->arch.emulate_ctxt.ops = &emulate_ops;
75297542
if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_reset_bsp(vcpu))

0 commit comments

Comments
 (0)