@@ -3555,6 +3555,7 @@ void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
3555
3555
& invalid_list );
3556
3556
mmu -> root_hpa = INVALID_PAGE ;
3557
3557
}
3558
+ mmu -> root_cr3 = 0 ;
3558
3559
}
3559
3560
3560
3561
kvm_mmu_commit_zap_page (vcpu -> kvm , & invalid_list );
@@ -3610,6 +3611,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
3610
3611
vcpu -> arch .mmu -> root_hpa = __pa (vcpu -> arch .mmu -> pae_root );
3611
3612
} else
3612
3613
BUG ();
3614
+ vcpu -> arch .mmu -> root_cr3 = vcpu -> arch .mmu -> get_cr3 (vcpu );
3613
3615
3614
3616
return 0 ;
3615
3617
}
@@ -3618,10 +3620,11 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
3618
3620
{
3619
3621
struct kvm_mmu_page * sp ;
3620
3622
u64 pdptr , pm_mask ;
3621
- gfn_t root_gfn ;
3623
+ gfn_t root_gfn , root_cr3 ;
3622
3624
int i ;
3623
3625
3624
- root_gfn = vcpu -> arch .mmu -> get_cr3 (vcpu ) >> PAGE_SHIFT ;
3626
+ root_cr3 = vcpu -> arch .mmu -> get_cr3 (vcpu );
3627
+ root_gfn = root_cr3 >> PAGE_SHIFT ;
3625
3628
3626
3629
if (mmu_check_root (vcpu , root_gfn ))
3627
3630
return 1 ;
@@ -3646,7 +3649,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
3646
3649
++ sp -> root_count ;
3647
3650
spin_unlock (& vcpu -> kvm -> mmu_lock );
3648
3651
vcpu -> arch .mmu -> root_hpa = root ;
3649
- return 0 ;
3652
+ goto set_root_cr3 ;
3650
3653
}
3651
3654
3652
3655
/*
@@ -3712,6 +3715,9 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
3712
3715
vcpu -> arch .mmu -> root_hpa = __pa (vcpu -> arch .mmu -> lm_root );
3713
3716
}
3714
3717
3718
+ set_root_cr3 :
3719
+ vcpu -> arch .mmu -> root_cr3 = root_cr3 ;
3720
+
3715
3721
return 0 ;
3716
3722
}
3717
3723
@@ -4163,7 +4169,7 @@ static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_cr3,
4163
4169
struct kvm_mmu_root_info root ;
4164
4170
struct kvm_mmu * mmu = vcpu -> arch .mmu ;
4165
4171
4166
- root .cr3 = mmu -> get_cr3 ( vcpu ) ;
4172
+ root .cr3 = mmu -> root_cr3 ;
4167
4173
root .hpa = mmu -> root_hpa ;
4168
4174
4169
4175
for (i = 0 ; i < KVM_MMU_NUM_PREV_ROOTS ; i ++ ) {
@@ -4176,6 +4182,7 @@ static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_cr3,
4176
4182
}
4177
4183
4178
4184
mmu -> root_hpa = root .hpa ;
4185
+ mmu -> root_cr3 = root .cr3 ;
4179
4186
4180
4187
return i < KVM_MMU_NUM_PREV_ROOTS ;
4181
4188
}
@@ -4770,6 +4777,7 @@ static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu)
4770
4777
ext .cr4_pse = !!is_pse (vcpu );
4771
4778
ext .cr4_pke = !!kvm_read_cr4_bits (vcpu , X86_CR4_PKE );
4772
4779
ext .cr4_la57 = !!kvm_read_cr4_bits (vcpu , X86_CR4_LA57 );
4780
+ ext .maxphyaddr = cpuid_maxphyaddr (vcpu );
4773
4781
4774
4782
ext .valid = 1 ;
4775
4783
@@ -5516,11 +5524,13 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
5516
5524
vcpu -> arch .walk_mmu = & vcpu -> arch .root_mmu ;
5517
5525
5518
5526
vcpu -> arch .root_mmu .root_hpa = INVALID_PAGE ;
5527
+ vcpu -> arch .root_mmu .root_cr3 = 0 ;
5519
5528
vcpu -> arch .root_mmu .translate_gpa = translate_gpa ;
5520
5529
for (i = 0 ; i < KVM_MMU_NUM_PREV_ROOTS ; i ++ )
5521
5530
vcpu -> arch .root_mmu .prev_roots [i ] = KVM_MMU_ROOT_INFO_INVALID ;
5522
5531
5523
5532
vcpu -> arch .guest_mmu .root_hpa = INVALID_PAGE ;
5533
+ vcpu -> arch .guest_mmu .root_cr3 = 0 ;
5524
5534
vcpu -> arch .guest_mmu .translate_gpa = translate_gpa ;
5525
5535
for (i = 0 ; i < KVM_MMU_NUM_PREV_ROOTS ; i ++ )
5526
5536
vcpu -> arch .guest_mmu .prev_roots [i ] = KVM_MMU_ROOT_INFO_INVALID ;
0 commit comments