@@ -2537,15 +2537,15 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data)
2537
2537
kvm_vcpu_write_tsc_offset (vcpu , offset );
2538
2538
raw_spin_unlock_irqrestore (& kvm -> arch .tsc_write_lock , flags );
2539
2539
2540
- spin_lock_irqsave (& kvm -> arch .pvclock_gtod_sync_lock , flags );
2540
+ raw_spin_lock_irqsave (& kvm -> arch .pvclock_gtod_sync_lock , flags );
2541
2541
if (!matched ) {
2542
2542
kvm -> arch .nr_vcpus_matched_tsc = 0 ;
2543
2543
} else if (!already_matched ) {
2544
2544
kvm -> arch .nr_vcpus_matched_tsc ++ ;
2545
2545
}
2546
2546
2547
2547
kvm_track_tsc_matching (vcpu );
2548
- spin_unlock_irqrestore (& kvm -> arch .pvclock_gtod_sync_lock , flags );
2548
+ raw_spin_unlock_irqrestore (& kvm -> arch .pvclock_gtod_sync_lock , flags );
2549
2549
}
2550
2550
2551
2551
static inline void adjust_tsc_offset_guest (struct kvm_vcpu * vcpu ,
@@ -2775,9 +2775,9 @@ static void kvm_gen_update_masterclock(struct kvm *kvm)
2775
2775
kvm_make_mclock_inprogress_request (kvm );
2776
2776
2777
2777
/* no guest entries from this point */
2778
- spin_lock_irqsave (& ka -> pvclock_gtod_sync_lock , flags );
2778
+ raw_spin_lock_irqsave (& ka -> pvclock_gtod_sync_lock , flags );
2779
2779
pvclock_update_vm_gtod_copy (kvm );
2780
- spin_unlock_irqrestore (& ka -> pvclock_gtod_sync_lock , flags );
2780
+ raw_spin_unlock_irqrestore (& ka -> pvclock_gtod_sync_lock , flags );
2781
2781
2782
2782
kvm_for_each_vcpu (i , vcpu , kvm )
2783
2783
kvm_make_request (KVM_REQ_CLOCK_UPDATE , vcpu );
@@ -2795,15 +2795,15 @@ u64 get_kvmclock_ns(struct kvm *kvm)
2795
2795
unsigned long flags ;
2796
2796
u64 ret ;
2797
2797
2798
- spin_lock_irqsave (& ka -> pvclock_gtod_sync_lock , flags );
2798
+ raw_spin_lock_irqsave (& ka -> pvclock_gtod_sync_lock , flags );
2799
2799
if (!ka -> use_master_clock ) {
2800
- spin_unlock_irqrestore (& ka -> pvclock_gtod_sync_lock , flags );
2800
+ raw_spin_unlock_irqrestore (& ka -> pvclock_gtod_sync_lock , flags );
2801
2801
return get_kvmclock_base_ns () + ka -> kvmclock_offset ;
2802
2802
}
2803
2803
2804
2804
hv_clock .tsc_timestamp = ka -> master_cycle_now ;
2805
2805
hv_clock .system_time = ka -> master_kernel_ns + ka -> kvmclock_offset ;
2806
- spin_unlock_irqrestore (& ka -> pvclock_gtod_sync_lock , flags );
2806
+ raw_spin_unlock_irqrestore (& ka -> pvclock_gtod_sync_lock , flags );
2807
2807
2808
2808
/* both __this_cpu_read() and rdtsc() should be on the same cpu */
2809
2809
get_cpu ();
@@ -2897,13 +2897,13 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
2897
2897
* If the host uses TSC clock, then passthrough TSC as stable
2898
2898
* to the guest.
2899
2899
*/
2900
- spin_lock_irqsave (& ka -> pvclock_gtod_sync_lock , flags );
2900
+ raw_spin_lock_irqsave (& ka -> pvclock_gtod_sync_lock , flags );
2901
2901
use_master_clock = ka -> use_master_clock ;
2902
2902
if (use_master_clock ) {
2903
2903
host_tsc = ka -> master_cycle_now ;
2904
2904
kernel_ns = ka -> master_kernel_ns ;
2905
2905
}
2906
- spin_unlock_irqrestore (& ka -> pvclock_gtod_sync_lock , flags );
2906
+ raw_spin_unlock_irqrestore (& ka -> pvclock_gtod_sync_lock , flags );
2907
2907
2908
2908
/* Keep irq disabled to prevent changes to the clock */
2909
2909
local_irq_save (flags );
@@ -6101,13 +6101,13 @@ long kvm_arch_vm_ioctl(struct file *filp,
6101
6101
* is slightly ahead) here we risk going negative on unsigned
6102
6102
* 'system_time' when 'user_ns.clock' is very small.
6103
6103
*/
6104
- spin_lock_irq (& ka -> pvclock_gtod_sync_lock );
6104
+ raw_spin_lock_irq (& ka -> pvclock_gtod_sync_lock );
6105
6105
if (kvm -> arch .use_master_clock )
6106
6106
now_ns = ka -> master_kernel_ns ;
6107
6107
else
6108
6108
now_ns = get_kvmclock_base_ns ();
6109
6109
ka -> kvmclock_offset = user_ns .clock - now_ns ;
6110
- spin_unlock_irq (& ka -> pvclock_gtod_sync_lock );
6110
+ raw_spin_unlock_irq (& ka -> pvclock_gtod_sync_lock );
6111
6111
6112
6112
kvm_make_all_cpus_request (kvm , KVM_REQ_CLOCK_UPDATE );
6113
6113
break ;
@@ -8157,9 +8157,9 @@ static void kvm_hyperv_tsc_notifier(void)
8157
8157
list_for_each_entry (kvm , & vm_list , vm_list ) {
8158
8158
struct kvm_arch * ka = & kvm -> arch ;
8159
8159
8160
- spin_lock_irqsave (& ka -> pvclock_gtod_sync_lock , flags );
8160
+ raw_spin_lock_irqsave (& ka -> pvclock_gtod_sync_lock , flags );
8161
8161
pvclock_update_vm_gtod_copy (kvm );
8162
- spin_unlock_irqrestore (& ka -> pvclock_gtod_sync_lock , flags );
8162
+ raw_spin_unlock_irqrestore (& ka -> pvclock_gtod_sync_lock , flags );
8163
8163
8164
8164
kvm_for_each_vcpu (cpu , vcpu , kvm )
8165
8165
kvm_make_request (KVM_REQ_CLOCK_UPDATE , vcpu );
@@ -11148,7 +11148,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
11148
11148
11149
11149
raw_spin_lock_init (& kvm -> arch .tsc_write_lock );
11150
11150
mutex_init (& kvm -> arch .apic_map_lock );
11151
- spin_lock_init (& kvm -> arch .pvclock_gtod_sync_lock );
11151
+ raw_spin_lock_init (& kvm -> arch .pvclock_gtod_sync_lock );
11152
11152
11153
11153
kvm -> arch .kvmclock_offset = - get_kvmclock_base_ns ();
11154
11154
pvclock_update_vm_gtod_copy (kvm );
0 commit comments