Loading arch/powerpc/kvm/book3s_hv.c +3 −2 Original line number Diff line number Diff line Loading @@ -1952,7 +1952,7 @@ static void post_guest_process(struct kvmppc_vcore *vc) */ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) { struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu, *vnext; int i; int srcu_idx; Loading Loading @@ -1982,7 +1982,8 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) */ if ((threads_per_core > 1) && ((vc->num_threads > threads_per_subcore) || !on_primary_thread())) { list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) { list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads, arch.run_list) { vcpu->arch.ret = -EBUSY; kvmppc_remove_runnable(vc, vcpu); wake_up(&vcpu->arch.cpu_run); Loading arch/x86/include/asm/kvm_host.h +2 −0 Original line number Diff line number Diff line Loading @@ -401,6 +401,7 @@ struct kvm_vcpu_arch { struct kvm_mmu_memory_cache mmu_page_header_cache; struct fpu guest_fpu; bool eager_fpu; u64 xcr0; u64 guest_supported_xcr0; u32 guest_xstate_size; Loading Loading @@ -747,6 +748,7 @@ struct kvm_x86_ops { void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg); unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); void (*fpu_activate)(struct kvm_vcpu *vcpu); void (*fpu_deactivate)(struct kvm_vcpu *vcpu); void (*tlb_flush)(struct kvm_vcpu *vcpu); Loading arch/x86/kvm/cpuid.c +4 −0 Original line number Diff line number Diff line Loading @@ -16,6 +16,8 @@ #include <linux/module.h> #include <linux/vmalloc.h> #include <linux/uaccess.h> #include <asm/i387.h> /* For use_eager_fpu. Ugh! */ #include <asm/fpu-internal.h> /* For use_eager_fpu. Ugh! */ #include <asm/user.h> #include <asm/xsave.h> #include "cpuid.h" Loading Loading @@ -95,6 +97,8 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu) if (best && (best->eax & (F(XSAVES) | F(XSAVEC)))) best->ebx = xstate_required_size(vcpu->arch.xcr0, true); vcpu->arch.eager_fpu = use_eager_fpu() || guest_cpuid_has_mpx(vcpu); /* * The existing code assumes virtual address is 48-bit in the canonical * address checks; exit if it is ever changed. Loading arch/x86/kvm/cpuid.h +8 −0 Original line number Diff line number Diff line Loading @@ -117,4 +117,12 @@ static inline bool guest_cpuid_has_rtm(struct kvm_vcpu *vcpu) best = kvm_find_cpuid_entry(vcpu, 7, 0); return best && (best->ebx & bit(X86_FEATURE_RTM)); } static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu) { struct kvm_cpuid_entry2 *best; best = kvm_find_cpuid_entry(vcpu, 7, 0); return best && (best->ebx & bit(X86_FEATURE_MPX)); } #endif arch/x86/kvm/svm.c +1 −0 Original line number Diff line number Diff line Loading @@ -4383,6 +4383,7 @@ static struct kvm_x86_ops svm_x86_ops = { .cache_reg = svm_cache_reg, .get_rflags = svm_get_rflags, .set_rflags = svm_set_rflags, .fpu_activate = svm_fpu_activate, .fpu_deactivate = svm_fpu_deactivate, .tlb_flush = svm_flush_tlb, Loading Loading
arch/powerpc/kvm/book3s_hv.c +3 −2 Original line number Diff line number Diff line Loading @@ -1952,7 +1952,7 @@ static void post_guest_process(struct kvmppc_vcore *vc) */ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) { struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu, *vnext; int i; int srcu_idx; Loading Loading @@ -1982,7 +1982,8 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) */ if ((threads_per_core > 1) && ((vc->num_threads > threads_per_subcore) || !on_primary_thread())) { list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) { list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads, arch.run_list) { vcpu->arch.ret = -EBUSY; kvmppc_remove_runnable(vc, vcpu); wake_up(&vcpu->arch.cpu_run); Loading
arch/x86/include/asm/kvm_host.h +2 −0 Original line number Diff line number Diff line Loading @@ -401,6 +401,7 @@ struct kvm_vcpu_arch { struct kvm_mmu_memory_cache mmu_page_header_cache; struct fpu guest_fpu; bool eager_fpu; u64 xcr0; u64 guest_supported_xcr0; u32 guest_xstate_size; Loading Loading @@ -747,6 +748,7 @@ struct kvm_x86_ops { void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg); unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); void (*fpu_activate)(struct kvm_vcpu *vcpu); void (*fpu_deactivate)(struct kvm_vcpu *vcpu); void (*tlb_flush)(struct kvm_vcpu *vcpu); Loading
arch/x86/kvm/cpuid.c +4 −0 Original line number Diff line number Diff line Loading @@ -16,6 +16,8 @@ #include <linux/module.h> #include <linux/vmalloc.h> #include <linux/uaccess.h> #include <asm/i387.h> /* For use_eager_fpu. Ugh! */ #include <asm/fpu-internal.h> /* For use_eager_fpu. Ugh! */ #include <asm/user.h> #include <asm/xsave.h> #include "cpuid.h" Loading Loading @@ -95,6 +97,8 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu) if (best && (best->eax & (F(XSAVES) | F(XSAVEC)))) best->ebx = xstate_required_size(vcpu->arch.xcr0, true); vcpu->arch.eager_fpu = use_eager_fpu() || guest_cpuid_has_mpx(vcpu); /* * The existing code assumes virtual address is 48-bit in the canonical * address checks; exit if it is ever changed. Loading
arch/x86/kvm/cpuid.h +8 −0 Original line number Diff line number Diff line Loading @@ -117,4 +117,12 @@ static inline bool guest_cpuid_has_rtm(struct kvm_vcpu *vcpu) best = kvm_find_cpuid_entry(vcpu, 7, 0); return best && (best->ebx & bit(X86_FEATURE_RTM)); } static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu) { struct kvm_cpuid_entry2 *best; best = kvm_find_cpuid_entry(vcpu, 7, 0); return best && (best->ebx & bit(X86_FEATURE_MPX)); } #endif
arch/x86/kvm/svm.c +1 −0 Original line number Diff line number Diff line Loading @@ -4383,6 +4383,7 @@ static struct kvm_x86_ops svm_x86_ops = { .cache_reg = svm_cache_reg, .get_rflags = svm_get_rflags, .set_rflags = svm_set_rflags, .fpu_activate = svm_fpu_activate, .fpu_deactivate = svm_fpu_deactivate, .tlb_flush = svm_flush_tlb, Loading