Loading arch/powerpc/kvm/book3s.h +3 −0 Original line number Diff line number Diff line Loading @@ -32,4 +32,7 @@ extern void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val); static inline void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val) {} #endif extern void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr); extern void kvmppc_inject_interrupt_hv(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags); #endif arch/powerpc/kvm/book3s_hv.c +0 −43 Original line number Diff line number Diff line Loading @@ -133,7 +133,6 @@ static inline bool nesting_enabled(struct kvm *kvm) /* If set, the threads on each CPU core have to be in the same MMU mode */ static bool no_mixing_hpt_and_radix; static void kvmppc_end_cede(struct kvm_vcpu *vcpu); static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu); /* Loading Loading @@ -338,39 +337,6 @@ static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu) spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); } static void kvmppc_inject_interrupt_hv(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags) { unsigned long msr, pc, new_msr, new_pc; msr = kvmppc_get_msr(vcpu); pc = kvmppc_get_pc(vcpu); new_msr = vcpu->arch.intr_msr; new_pc = vec; /* If transactional, change to suspend mode on IRQ delivery */ if (MSR_TM_TRANSACTIONAL(msr)) new_msr |= MSR_TS_S; else new_msr |= msr & MSR_TS_MASK; kvmppc_set_srr0(vcpu, pc); kvmppc_set_srr1(vcpu, (msr & SRR1_MSR_BITS) | srr1_flags); kvmppc_set_pc(vcpu, new_pc); kvmppc_set_msr(vcpu, new_msr); } static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr) { /* * Check for illegal transactional state bit combination * and if we find it, force the TS field to a safe state. */ if ((msr & MSR_TS_MASK) == MSR_TS_MASK) msr &= ~MSR_TS_MASK; vcpu->arch.shregs.msr = msr; kvmppc_end_cede(vcpu); } static void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr) { vcpu->arch.pvr = pvr; Loading Loading @@ -2475,15 +2441,6 @@ static void kvmppc_set_timer(struct kvm_vcpu *vcpu) vcpu->arch.timer_running = 1; } static void kvmppc_end_cede(struct kvm_vcpu *vcpu) { vcpu->arch.ceded = 0; if (vcpu->arch.timer_running) { hrtimer_try_to_cancel(&vcpu->arch.dec_timer); vcpu->arch.timer_running = 0; } } extern int __kvmppc_vcore_entry(void); static void kvmppc_remove_runnable(struct kvmppc_vcore *vc, Loading arch/powerpc/kvm/book3s_hv_builtin.c +53 −14 Original line number Diff line number Diff line Loading @@ -755,6 +755,56 @@ void kvmhv_p9_restore_lpcr(struct kvm_split_mode *sip) local_paca->kvm_hstate.kvm_split_mode = NULL; } static void kvmppc_end_cede(struct kvm_vcpu *vcpu) { vcpu->arch.ceded = 0; if (vcpu->arch.timer_running) { hrtimer_try_to_cancel(&vcpu->arch.dec_timer); vcpu->arch.timer_running = 0; } } void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr) { /* * Check for illegal transactional state bit combination * and if we find it, force the TS field to a safe state. */ if ((msr & MSR_TS_MASK) == MSR_TS_MASK) msr &= ~MSR_TS_MASK; vcpu->arch.shregs.msr = msr; kvmppc_end_cede(vcpu); } EXPORT_SYMBOL_GPL(kvmppc_set_msr_hv); static void inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags) { unsigned long msr, pc, new_msr, new_pc; msr = kvmppc_get_msr(vcpu); pc = kvmppc_get_pc(vcpu); new_msr = vcpu->arch.intr_msr; new_pc = vec; /* If transactional, change to suspend mode on IRQ delivery */ if (MSR_TM_TRANSACTIONAL(msr)) new_msr |= MSR_TS_S; else new_msr |= msr & MSR_TS_MASK; kvmppc_set_srr0(vcpu, pc); kvmppc_set_srr1(vcpu, (msr & SRR1_MSR_BITS) | srr1_flags); kvmppc_set_pc(vcpu, new_pc); vcpu->arch.shregs.msr = new_msr; } void kvmppc_inject_interrupt_hv(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags) { inject_interrupt(vcpu, vec, srr1_flags); kvmppc_end_cede(vcpu); } EXPORT_SYMBOL_GPL(kvmppc_inject_interrupt_hv); /* * Is there a PRIV_DOORBELL pending for the guest (on POWER9)? * Can we inject a Decrementer or a External interrupt? Loading @@ -762,7 +812,6 @@ void kvmhv_p9_restore_lpcr(struct kvm_split_mode *sip) void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu) { int ext; unsigned long vec = 0; unsigned long lpcr; /* Insert EXTERNAL bit into LPCR at the MER bit position */ Loading @@ -774,25 +823,15 @@ void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu) if (vcpu->arch.shregs.msr & MSR_EE) { if (ext) { vec = BOOK3S_INTERRUPT_EXTERNAL; inject_interrupt(vcpu, BOOK3S_INTERRUPT_EXTERNAL, 0); } else { long int dec = mfspr(SPRN_DEC); if (!(lpcr & LPCR_LD)) dec = (int) dec; if (dec < 0) vec = BOOK3S_INTERRUPT_DECREMENTER; } inject_interrupt(vcpu, BOOK3S_INTERRUPT_DECREMENTER, 0); } if (vec) { unsigned long msr, old_msr = vcpu->arch.shregs.msr; kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu)); kvmppc_set_srr1(vcpu, old_msr); kvmppc_set_pc(vcpu, vec); msr = vcpu->arch.intr_msr; if (MSR_TM_ACTIVE(old_msr)) msr |= MSR_TS_S; vcpu->arch.shregs.msr = msr; } if (vcpu->arch.doorbell_request) { Loading Loading
arch/powerpc/kvm/book3s.h +3 −0 Original line number Diff line number Diff line Loading @@ -32,4 +32,7 @@ extern void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val); static inline void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val) {} #endif extern void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr); extern void kvmppc_inject_interrupt_hv(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags); #endif
arch/powerpc/kvm/book3s_hv.c +0 −43 Original line number Diff line number Diff line Loading @@ -133,7 +133,6 @@ static inline bool nesting_enabled(struct kvm *kvm) /* If set, the threads on each CPU core have to be in the same MMU mode */ static bool no_mixing_hpt_and_radix; static void kvmppc_end_cede(struct kvm_vcpu *vcpu); static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu); /* Loading Loading @@ -338,39 +337,6 @@ static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu) spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); } static void kvmppc_inject_interrupt_hv(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags) { unsigned long msr, pc, new_msr, new_pc; msr = kvmppc_get_msr(vcpu); pc = kvmppc_get_pc(vcpu); new_msr = vcpu->arch.intr_msr; new_pc = vec; /* If transactional, change to suspend mode on IRQ delivery */ if (MSR_TM_TRANSACTIONAL(msr)) new_msr |= MSR_TS_S; else new_msr |= msr & MSR_TS_MASK; kvmppc_set_srr0(vcpu, pc); kvmppc_set_srr1(vcpu, (msr & SRR1_MSR_BITS) | srr1_flags); kvmppc_set_pc(vcpu, new_pc); kvmppc_set_msr(vcpu, new_msr); } static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr) { /* * Check for illegal transactional state bit combination * and if we find it, force the TS field to a safe state. */ if ((msr & MSR_TS_MASK) == MSR_TS_MASK) msr &= ~MSR_TS_MASK; vcpu->arch.shregs.msr = msr; kvmppc_end_cede(vcpu); } static void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr) { vcpu->arch.pvr = pvr; Loading Loading @@ -2475,15 +2441,6 @@ static void kvmppc_set_timer(struct kvm_vcpu *vcpu) vcpu->arch.timer_running = 1; } static void kvmppc_end_cede(struct kvm_vcpu *vcpu) { vcpu->arch.ceded = 0; if (vcpu->arch.timer_running) { hrtimer_try_to_cancel(&vcpu->arch.dec_timer); vcpu->arch.timer_running = 0; } } extern int __kvmppc_vcore_entry(void); static void kvmppc_remove_runnable(struct kvmppc_vcore *vc, Loading
arch/powerpc/kvm/book3s_hv_builtin.c +53 −14 Original line number Diff line number Diff line Loading @@ -755,6 +755,56 @@ void kvmhv_p9_restore_lpcr(struct kvm_split_mode *sip) local_paca->kvm_hstate.kvm_split_mode = NULL; } static void kvmppc_end_cede(struct kvm_vcpu *vcpu) { vcpu->arch.ceded = 0; if (vcpu->arch.timer_running) { hrtimer_try_to_cancel(&vcpu->arch.dec_timer); vcpu->arch.timer_running = 0; } } void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr) { /* * Check for illegal transactional state bit combination * and if we find it, force the TS field to a safe state. */ if ((msr & MSR_TS_MASK) == MSR_TS_MASK) msr &= ~MSR_TS_MASK; vcpu->arch.shregs.msr = msr; kvmppc_end_cede(vcpu); } EXPORT_SYMBOL_GPL(kvmppc_set_msr_hv); static void inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags) { unsigned long msr, pc, new_msr, new_pc; msr = kvmppc_get_msr(vcpu); pc = kvmppc_get_pc(vcpu); new_msr = vcpu->arch.intr_msr; new_pc = vec; /* If transactional, change to suspend mode on IRQ delivery */ if (MSR_TM_TRANSACTIONAL(msr)) new_msr |= MSR_TS_S; else new_msr |= msr & MSR_TS_MASK; kvmppc_set_srr0(vcpu, pc); kvmppc_set_srr1(vcpu, (msr & SRR1_MSR_BITS) | srr1_flags); kvmppc_set_pc(vcpu, new_pc); vcpu->arch.shregs.msr = new_msr; } void kvmppc_inject_interrupt_hv(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags) { inject_interrupt(vcpu, vec, srr1_flags); kvmppc_end_cede(vcpu); } EXPORT_SYMBOL_GPL(kvmppc_inject_interrupt_hv); /* * Is there a PRIV_DOORBELL pending for the guest (on POWER9)? * Can we inject a Decrementer or a External interrupt? Loading @@ -762,7 +812,6 @@ void kvmhv_p9_restore_lpcr(struct kvm_split_mode *sip) void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu) { int ext; unsigned long vec = 0; unsigned long lpcr; /* Insert EXTERNAL bit into LPCR at the MER bit position */ Loading @@ -774,25 +823,15 @@ void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu) if (vcpu->arch.shregs.msr & MSR_EE) { if (ext) { vec = BOOK3S_INTERRUPT_EXTERNAL; inject_interrupt(vcpu, BOOK3S_INTERRUPT_EXTERNAL, 0); } else { long int dec = mfspr(SPRN_DEC); if (!(lpcr & LPCR_LD)) dec = (int) dec; if (dec < 0) vec = BOOK3S_INTERRUPT_DECREMENTER; } inject_interrupt(vcpu, BOOK3S_INTERRUPT_DECREMENTER, 0); } if (vec) { unsigned long msr, old_msr = vcpu->arch.shregs.msr; kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu)); kvmppc_set_srr1(vcpu, old_msr); kvmppc_set_pc(vcpu, vec); msr = vcpu->arch.intr_msr; if (MSR_TM_ACTIVE(old_msr)) msr |= MSR_TS_S; vcpu->arch.shregs.msr = msr; } if (vcpu->arch.doorbell_request) { Loading