Loading arch/x86/kvm/lapic.c +1 −6 Original line number Diff line number Diff line Loading @@ -1076,16 +1076,11 @@ static void apic_timer_expired(struct kvm_lapic *apic) wait_queue_head_t *q = &vcpu->wq; struct kvm_timer *ktimer = &apic->lapic_timer; /* * Note: KVM_REQ_PENDING_TIMER is implicitly checked in * vcpu_enter_guest. */ if (atomic_read(&apic->lapic_timer.pending)) return; atomic_inc(&apic->lapic_timer.pending); /* FIXME: this code should not know anything about vcpus */ kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu); kvm_set_pending_timer(vcpu); if (waitqueue_active(q)) wake_up_interruptible(q); Loading arch/x86/kvm/x86.c +9 −0 Original line number Diff line number Diff line Loading @@ -1087,6 +1087,15 @@ static void update_pvclock_gtod(struct timekeeper *tk) } #endif void kvm_set_pending_timer(struct kvm_vcpu *vcpu) { /* * Note: KVM_REQ_PENDING_TIMER is implicitly checked in * vcpu_enter_guest. This function is only called from * the physical CPU that is running vcpu. */ kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu); } static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock) { Loading arch/x86/kvm/x86.h +1 −0 Original line number Diff line number Diff line Loading @@ -147,6 +147,7 @@ static inline void kvm_register_writel(struct kvm_vcpu *vcpu, void kvm_before_handle_nmi(struct kvm_vcpu *vcpu); void kvm_after_handle_nmi(struct kvm_vcpu *vcpu); void kvm_set_pending_timer(struct kvm_vcpu *vcpu); int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip); void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr); Loading Loading
arch/x86/kvm/lapic.c +1 −6 Original line number Diff line number Diff line Loading @@ -1076,16 +1076,11 @@ static void apic_timer_expired(struct kvm_lapic *apic) wait_queue_head_t *q = &vcpu->wq; struct kvm_timer *ktimer = &apic->lapic_timer; /* * Note: KVM_REQ_PENDING_TIMER is implicitly checked in * vcpu_enter_guest. */ if (atomic_read(&apic->lapic_timer.pending)) return; atomic_inc(&apic->lapic_timer.pending); /* FIXME: this code should not know anything about vcpus */ kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu); kvm_set_pending_timer(vcpu); if (waitqueue_active(q)) wake_up_interruptible(q); Loading
arch/x86/kvm/x86.c +9 −0 Original line number Diff line number Diff line Loading @@ -1087,6 +1087,15 @@ static void update_pvclock_gtod(struct timekeeper *tk) } #endif void kvm_set_pending_timer(struct kvm_vcpu *vcpu) { /* * Note: KVM_REQ_PENDING_TIMER is implicitly checked in * vcpu_enter_guest. This function is only called from * the physical CPU that is running vcpu. */ kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu); } static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock) { Loading
arch/x86/kvm/x86.h +1 −0 Original line number Diff line number Diff line Loading @@ -147,6 +147,7 @@ static inline void kvm_register_writel(struct kvm_vcpu *vcpu, void kvm_before_handle_nmi(struct kvm_vcpu *vcpu); void kvm_after_handle_nmi(struct kvm_vcpu *vcpu); void kvm_set_pending_timer(struct kvm_vcpu *vcpu); int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip); void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr); Loading