Loading arch/x86/include/asm/kvm_host.h +1 −0 Original line number Diff line number Diff line Loading @@ -1285,6 +1285,7 @@ struct kvm_x86_ops { void (*migrate_timers)(struct kvm_vcpu *vcpu); void (*msr_filter_changed)(struct kvm_vcpu *vcpu); int (*complete_emulated_msr)(struct kvm_vcpu *vcpu, int err); }; struct kvm_x86_nested_ops { Loading arch/x86/kvm/svm/svm.c +1 −0 Original line number Diff line number Diff line Loading @@ -4306,6 +4306,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .apic_init_signal_blocked = svm_apic_init_signal_blocked, .msr_filter_changed = svm_msr_filter_changed, .complete_emulated_msr = kvm_complete_insn_gp, }; static struct kvm_x86_init_ops svm_init_ops __initdata = { Loading arch/x86/kvm/vmx/vmx.c +1 −0 Original line number Diff line number Diff line Loading @@ -7701,6 +7701,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = { .migrate_timers = vmx_migrate_timers, .msr_filter_changed = vmx_msr_filter_changed, .complete_emulated_msr = kvm_complete_insn_gp, .cpu_dirty_log_size = vmx_cpu_dirty_log_size, }; Loading arch/x86/kvm/x86.c +4 −4 Original line number Diff line number Diff line Loading @@ -1642,12 +1642,12 @@ static int complete_emulated_rdmsr(struct kvm_vcpu *vcpu) kvm_rdx_write(vcpu, vcpu->run->msr.data >> 32); } return kvm_complete_insn_gp(vcpu, err); return kvm_x86_ops.complete_emulated_msr(vcpu, err); } static int complete_emulated_wrmsr(struct kvm_vcpu *vcpu) { return kvm_complete_insn_gp(vcpu, vcpu->run->msr.error); return kvm_x86_ops.complete_emulated_msr(vcpu, vcpu->run->msr.error); } static u64 kvm_msr_reason(int r) Loading Loading @@ -1719,7 +1719,7 @@ int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu) trace_kvm_msr_read_ex(ecx); } return kvm_complete_insn_gp(vcpu, r); return kvm_x86_ops.complete_emulated_msr(vcpu, r); } EXPORT_SYMBOL_GPL(kvm_emulate_rdmsr); Loading @@ -1745,7 +1745,7 @@ int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu) else trace_kvm_msr_write_ex(ecx, data); return kvm_complete_insn_gp(vcpu, r); return kvm_x86_ops.complete_emulated_msr(vcpu, r); } EXPORT_SYMBOL_GPL(kvm_emulate_wrmsr); Loading Loading
arch/x86/include/asm/kvm_host.h +1 −0 Original line number Diff line number Diff line Loading @@ -1285,6 +1285,7 @@ struct kvm_x86_ops { void (*migrate_timers)(struct kvm_vcpu *vcpu); void (*msr_filter_changed)(struct kvm_vcpu *vcpu); int (*complete_emulated_msr)(struct kvm_vcpu *vcpu, int err); }; struct kvm_x86_nested_ops { Loading
arch/x86/kvm/svm/svm.c +1 −0 Original line number Diff line number Diff line Loading @@ -4306,6 +4306,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .apic_init_signal_blocked = svm_apic_init_signal_blocked, .msr_filter_changed = svm_msr_filter_changed, .complete_emulated_msr = kvm_complete_insn_gp, }; static struct kvm_x86_init_ops svm_init_ops __initdata = { Loading
arch/x86/kvm/vmx/vmx.c +1 −0 Original line number Diff line number Diff line Loading @@ -7701,6 +7701,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = { .migrate_timers = vmx_migrate_timers, .msr_filter_changed = vmx_msr_filter_changed, .complete_emulated_msr = kvm_complete_insn_gp, .cpu_dirty_log_size = vmx_cpu_dirty_log_size, }; Loading
arch/x86/kvm/x86.c +4 −4 Original line number Diff line number Diff line Loading @@ -1642,12 +1642,12 @@ static int complete_emulated_rdmsr(struct kvm_vcpu *vcpu) kvm_rdx_write(vcpu, vcpu->run->msr.data >> 32); } return kvm_complete_insn_gp(vcpu, err); return kvm_x86_ops.complete_emulated_msr(vcpu, err); } static int complete_emulated_wrmsr(struct kvm_vcpu *vcpu) { return kvm_complete_insn_gp(vcpu, vcpu->run->msr.error); return kvm_x86_ops.complete_emulated_msr(vcpu, vcpu->run->msr.error); } static u64 kvm_msr_reason(int r) Loading Loading @@ -1719,7 +1719,7 @@ int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu) trace_kvm_msr_read_ex(ecx); } return kvm_complete_insn_gp(vcpu, r); return kvm_x86_ops.complete_emulated_msr(vcpu, r); } EXPORT_SYMBOL_GPL(kvm_emulate_rdmsr); Loading @@ -1745,7 +1745,7 @@ int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu) else trace_kvm_msr_write_ex(ecx, data); return kvm_complete_insn_gp(vcpu, r); return kvm_x86_ops.complete_emulated_msr(vcpu, r); } EXPORT_SYMBOL_GPL(kvm_emulate_wrmsr); Loading