@@ -2542,15 +2542,15 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data)
2542
2542
kvm_vcpu_write_tsc_offset (vcpu , offset );
2543
2543
raw_spin_unlock_irqrestore (& kvm -> arch .tsc_write_lock , flags );
2544
2544
2545
- spin_lock_irqsave (& kvm -> arch .pvclock_gtod_sync_lock , flags );
2545
+ raw_spin_lock_irqsave (& kvm -> arch .pvclock_gtod_sync_lock , flags );
2546
2546
if (!matched ) {
2547
2547
kvm -> arch .nr_vcpus_matched_tsc = 0 ;
2548
2548
} else if (!already_matched ) {
2549
2549
kvm -> arch .nr_vcpus_matched_tsc ++ ;
2550
2550
}
2551
2551
2552
2552
kvm_track_tsc_matching (vcpu );
2553
- spin_unlock_irqrestore (& kvm -> arch .pvclock_gtod_sync_lock , flags );
2553
+ raw_spin_unlock_irqrestore (& kvm -> arch .pvclock_gtod_sync_lock , flags );
2554
2554
}
2555
2555
2556
2556
static inline void adjust_tsc_offset_guest (struct kvm_vcpu * vcpu ,
@@ -2780,9 +2780,9 @@ static void kvm_gen_update_masterclock(struct kvm *kvm)
2780
2780
kvm_make_mclock_inprogress_request (kvm );
2781
2781
2782
2782
/* no guest entries from this point */
2783
- spin_lock_irqsave (& ka -> pvclock_gtod_sync_lock , flags );
2783
+ raw_spin_lock_irqsave (& ka -> pvclock_gtod_sync_lock , flags );
2784
2784
pvclock_update_vm_gtod_copy (kvm );
2785
- spin_unlock_irqrestore (& ka -> pvclock_gtod_sync_lock , flags );
2785
+ raw_spin_unlock_irqrestore (& ka -> pvclock_gtod_sync_lock , flags );
2786
2786
2787
2787
kvm_for_each_vcpu (i , vcpu , kvm )
2788
2788
kvm_make_request (KVM_REQ_CLOCK_UPDATE , vcpu );
@@ -2800,15 +2800,15 @@ u64 get_kvmclock_ns(struct kvm *kvm)
2800
2800
unsigned long flags ;
2801
2801
u64 ret ;
2802
2802
2803
- spin_lock_irqsave (& ka -> pvclock_gtod_sync_lock , flags );
2803
+ raw_spin_lock_irqsave (& ka -> pvclock_gtod_sync_lock , flags );
2804
2804
if (!ka -> use_master_clock ) {
2805
- spin_unlock_irqrestore (& ka -> pvclock_gtod_sync_lock , flags );
2805
+ raw_spin_unlock_irqrestore (& ka -> pvclock_gtod_sync_lock , flags );
2806
2806
return get_kvmclock_base_ns () + ka -> kvmclock_offset ;
2807
2807
}
2808
2808
2809
2809
hv_clock .tsc_timestamp = ka -> master_cycle_now ;
2810
2810
hv_clock .system_time = ka -> master_kernel_ns + ka -> kvmclock_offset ;
2811
- spin_unlock_irqrestore (& ka -> pvclock_gtod_sync_lock , flags );
2811
+ raw_spin_unlock_irqrestore (& ka -> pvclock_gtod_sync_lock , flags );
2812
2812
2813
2813
/* both __this_cpu_read() and rdtsc() should be on the same cpu */
2814
2814
get_cpu ();
@@ -2902,13 +2902,13 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
2902
2902
* If the host uses TSC clock, then passthrough TSC as stable
2903
2903
* to the guest.
2904
2904
*/
2905
- spin_lock_irqsave (& ka -> pvclock_gtod_sync_lock , flags );
2905
+ raw_spin_lock_irqsave (& ka -> pvclock_gtod_sync_lock , flags );
2906
2906
use_master_clock = ka -> use_master_clock ;
2907
2907
if (use_master_clock ) {
2908
2908
host_tsc = ka -> master_cycle_now ;
2909
2909
kernel_ns = ka -> master_kernel_ns ;
2910
2910
}
2911
- spin_unlock_irqrestore (& ka -> pvclock_gtod_sync_lock , flags );
2911
+ raw_spin_unlock_irqrestore (& ka -> pvclock_gtod_sync_lock , flags );
2912
2912
2913
2913
/* Keep irq disabled to prevent changes to the clock */
2914
2914
local_irq_save (flags );
@@ -6100,13 +6100,13 @@ long kvm_arch_vm_ioctl(struct file *filp,
6100
6100
* is slightly ahead) here we risk going negative on unsigned
6101
6101
* 'system_time' when 'user_ns.clock' is very small.
6102
6102
*/
6103
- spin_lock_irq (& ka -> pvclock_gtod_sync_lock );
6103
+ raw_spin_lock_irq (& ka -> pvclock_gtod_sync_lock );
6104
6104
if (kvm -> arch .use_master_clock )
6105
6105
now_ns = ka -> master_kernel_ns ;
6106
6106
else
6107
6107
now_ns = get_kvmclock_base_ns ();
6108
6108
ka -> kvmclock_offset = user_ns .clock - now_ns ;
6109
- spin_unlock_irq (& ka -> pvclock_gtod_sync_lock );
6109
+ raw_spin_unlock_irq (& ka -> pvclock_gtod_sync_lock );
6110
6110
6111
6111
kvm_make_all_cpus_request (kvm , KVM_REQ_CLOCK_UPDATE );
6112
6112
break ;
@@ -8139,9 +8139,9 @@ static void kvm_hyperv_tsc_notifier(void)
8139
8139
list_for_each_entry (kvm , & vm_list , vm_list ) {
8140
8140
struct kvm_arch * ka = & kvm -> arch ;
8141
8141
8142
- spin_lock_irqsave (& ka -> pvclock_gtod_sync_lock , flags );
8142
+ raw_spin_lock_irqsave (& ka -> pvclock_gtod_sync_lock , flags );
8143
8143
pvclock_update_vm_gtod_copy (kvm );
8144
- spin_unlock_irqrestore (& ka -> pvclock_gtod_sync_lock , flags );
8144
+ raw_spin_unlock_irqrestore (& ka -> pvclock_gtod_sync_lock , flags );
8145
8145
8146
8146
kvm_for_each_vcpu (cpu , vcpu , kvm )
8147
8147
kvm_make_request (KVM_REQ_CLOCK_UPDATE , vcpu );
@@ -11182,7 +11182,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
11182
11182
11183
11183
raw_spin_lock_init (& kvm -> arch .tsc_write_lock );
11184
11184
mutex_init (& kvm -> arch .apic_map_lock );
11185
- spin_lock_init (& kvm -> arch .pvclock_gtod_sync_lock );
11185
+ raw_spin_lock_init (& kvm -> arch .pvclock_gtod_sync_lock );
11186
11186
11187
11187
kvm -> arch .kvmclock_offset = - get_kvmclock_base_ns ();
11188
11188
pvclock_update_vm_gtod_copy (kvm );
0 commit comments