Loading Documentation/virt/kvm/api.rst +2 −2 Original line number Diff line number Diff line Loading @@ -4803,7 +4803,7 @@ KVM_PV_VM_VERIFY 4.126 KVM_X86_SET_MSR_FILTER ---------------------------- :Capability: KVM_X86_SET_MSR_FILTER :Capability: KVM_CAP_X86_MSR_FILTER :Architectures: x86 :Type: vm ioctl :Parameters: struct kvm_msr_filter Loading Loading @@ -6715,7 +6715,7 @@ accesses that would usually trigger a #GP by KVM into the guest will instead get bounced to user space through the KVM_EXIT_X86_RDMSR and KVM_EXIT_X86_WRMSR exit notifications. 8.27 KVM_X86_SET_MSR_FILTER 8.27 KVM_CAP_X86_MSR_FILTER --------------------------- :Architectures: x86 Loading arch/x86/include/asm/kvm_host.h +12 −3 Original line number Diff line number Diff line Loading @@ -113,6 +113,7 @@ #define VALID_PAGE(x) ((x) != INVALID_PAGE) #define UNMAPPED_GVA (~(gpa_t)0) #define INVALID_GPA (~(gpa_t)0) /* KVM Hugepage definitions for x86 */ #define KVM_MAX_HUGEPAGE_LEVEL PG_LEVEL_1G Loading Loading @@ -199,6 +200,7 @@ enum x86_intercept_stage; #define KVM_NR_DB_REGS 4 #define DR6_BUS_LOCK (1 << 11) #define DR6_BD (1 << 13) #define DR6_BS (1 << 14) #define DR6_BT (1 << 15) Loading @@ -212,7 +214,7 @@ enum x86_intercept_stage; * DR6_ACTIVE_LOW is also used as the init/reset value for DR6. */ #define DR6_ACTIVE_LOW 0xffff0ff0 #define DR6_VOLATILE 0x0001e00f #define DR6_VOLATILE 0x0001e80f #define DR6_FIXED_1 (DR6_ACTIVE_LOW & ~DR6_VOLATILE) #define DR7_BP_EN_MASK 0x000000ff Loading Loading @@ -407,7 +409,7 @@ struct kvm_mmu { u32 pkru_mask; u64 *pae_root; u64 *lm_root; u64 *pml4_root; /* * check zero bits on shadow page table entries, these Loading Loading @@ -1417,6 +1419,7 @@ struct kvm_arch_async_pf { bool direct_map; }; extern u32 __read_mostly kvm_nr_uret_msrs; extern u64 __read_mostly host_efer; extern bool __read_mostly allow_smaller_maxphyaddr; extern struct kvm_x86_ops kvm_x86_ops; Loading Loading @@ -1775,9 +1778,15 @@ int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low, unsigned long ipi_bitmap_high, u32 min, unsigned long icr, int op_64_bit); void kvm_define_user_return_msr(unsigned index, u32 msr); int kvm_add_user_return_msr(u32 msr); int kvm_find_user_return_msr(u32 msr); int kvm_set_user_return_msr(unsigned index, u64 val, u64 mask); static inline bool kvm_is_supported_user_return_msr(u32 msr) { return kvm_find_user_return_msr(msr) >= 0; } u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc); u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc); Loading arch/x86/include/asm/kvm_para.h +2 −8 Original line number Diff line number Diff line Loading @@ -7,8 +7,6 @@ #include <linux/interrupt.h> #include <uapi/asm/kvm_para.h> extern void kvmclock_init(void); #ifdef CONFIG_KVM_GUEST bool kvm_check_and_clear_guest_paused(void); #else Loading Loading @@ -86,13 +84,14 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1, } #ifdef CONFIG_KVM_GUEST void kvmclock_init(void); void kvmclock_disable(void); bool kvm_para_available(void); unsigned int kvm_arch_para_features(void); unsigned int kvm_arch_para_hints(void); void kvm_async_pf_task_wait_schedule(u32 token); void kvm_async_pf_task_wake(u32 token); u32 kvm_read_and_reset_apf_flags(void); void kvm_disable_steal_time(void); bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token); DECLARE_STATIC_KEY_FALSE(kvm_async_pf_enabled); Loading Loading @@ -137,11 +136,6 @@ static inline u32 kvm_read_and_reset_apf_flags(void) return 0; } static inline void kvm_disable_steal_time(void) { return; } static __always_inline bool kvm_handle_async_pf(struct pt_regs *regs, u32 token) { return false; Loading arch/x86/include/uapi/asm/kvm.h +2 −0 Original line number Diff line number Diff line Loading @@ -437,6 +437,8 @@ struct kvm_vmx_nested_state_hdr { __u16 flags; } smm; __u16 pad; __u32 flags; __u64 preemption_timer_deadline; }; Loading arch/x86/kernel/kvm.c +83 −46 Original line number Diff line number Diff line Loading @@ -26,6 +26,7 @@ #include <linux/kprobes.h> #include <linux/nmi.h> #include <linux/swait.h> #include <linux/syscore_ops.h> #include <asm/timer.h> #include <asm/cpu.h> #include <asm/traps.h> Loading @@ -37,6 +38,7 @@ #include <asm/tlb.h> #include <asm/cpuidle_haltpoll.h> #include <asm/ptrace.h> #include <asm/reboot.h> #include <asm/svm.h> DEFINE_STATIC_KEY_FALSE(kvm_async_pf_enabled); Loading Loading @@ -345,7 +347,7 @@ static void kvm_guest_cpu_init(void) wrmsrl(MSR_KVM_ASYNC_PF_EN, pa); __this_cpu_write(apf_reason.enabled, 1); pr_info("KVM setup async PF for cpu %d\n", smp_processor_id()); pr_info("setup async PF for cpu %d\n", smp_processor_id()); } if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) { Loading @@ -371,34 +373,17 @@ static void kvm_pv_disable_apf(void) wrmsrl(MSR_KVM_ASYNC_PF_EN, 0); __this_cpu_write(apf_reason.enabled, 0); pr_info("Unregister pv shared memory for cpu %d\n", smp_processor_id()); pr_info("disable async PF for cpu %d\n", smp_processor_id()); } static void kvm_pv_guest_cpu_reboot(void *unused) static void kvm_disable_steal_time(void) { /* * We disable PV EOI before we load a new kernel by kexec, * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory. * New kernel can re-enable when it boots. */ if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) wrmsrl(MSR_KVM_PV_EOI_EN, 0); kvm_pv_disable_apf(); kvm_disable_steal_time(); } if (!has_steal_clock) return; static int kvm_pv_reboot_notify(struct notifier_block *nb, unsigned long code, void *unused) { if (code == SYS_RESTART) on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1); return NOTIFY_DONE; wrmsr(MSR_KVM_STEAL_TIME, 0, 0); } static struct notifier_block kvm_pv_reboot_nb = { .notifier_call = kvm_pv_reboot_notify, }; static u64 kvm_steal_clock(int cpu) { u64 steal; Loading @@ -416,14 +401,6 @@ static u64 kvm_steal_clock(int cpu) return steal; } void kvm_disable_steal_time(void) { if (!has_steal_clock) return; wrmsr(MSR_KVM_STEAL_TIME, 0, 0); } static inline void __set_percpu_decrypted(void *ptr, unsigned long size) { early_set_memory_decrypted((unsigned long) ptr, size); Loading Loading @@ -451,6 +428,27 @@ static void __init sev_map_percpu_data(void) } } static void kvm_guest_cpu_offline(bool shutdown) { kvm_disable_steal_time(); if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) wrmsrl(MSR_KVM_PV_EOI_EN, 0); kvm_pv_disable_apf(); if (!shutdown) apf_task_wake_all(); kvmclock_disable(); } static int kvm_cpu_online(unsigned int cpu) { unsigned long flags; local_irq_save(flags); kvm_guest_cpu_init(); local_irq_restore(flags); return 0; } #ifdef CONFIG_SMP static DEFINE_PER_CPU(cpumask_var_t, __pv_cpu_mask); Loading Loading @@ -635,31 +633,64 @@ static void __init kvm_smp_prepare_boot_cpu(void) kvm_spinlock_init(); } static void kvm_guest_cpu_offline(void) static int kvm_cpu_down_prepare(unsigned int cpu) { kvm_disable_steal_time(); if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) wrmsrl(MSR_KVM_PV_EOI_EN, 0); kvm_pv_disable_apf(); apf_task_wake_all(); unsigned long flags; local_irq_save(flags); kvm_guest_cpu_offline(false); local_irq_restore(flags); return 0; } static int kvm_cpu_online(unsigned int cpu) #endif static int kvm_suspend(void) { local_irq_disable(); kvm_guest_cpu_init(); local_irq_enable(); kvm_guest_cpu_offline(false); return 0; } static int kvm_cpu_down_prepare(unsigned int cpu) static void kvm_resume(void) { local_irq_disable(); kvm_guest_cpu_offline(); local_irq_enable(); return 0; kvm_cpu_online(raw_smp_processor_id()); } static struct syscore_ops kvm_syscore_ops = { .suspend = kvm_suspend, .resume = kvm_resume, }; static void kvm_pv_guest_cpu_reboot(void *unused) { kvm_guest_cpu_offline(true); } static int kvm_pv_reboot_notify(struct notifier_block *nb, unsigned long code, void *unused) { if (code == SYS_RESTART) on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1); return NOTIFY_DONE; } static struct notifier_block kvm_pv_reboot_nb = { .notifier_call = kvm_pv_reboot_notify, }; /* * After a PV feature is registered, the host will keep writing to the * registered memory location. If the guest happens to shutdown, this memory * won't be valid. In cases like kexec, in which you install a new kernel, this * means a random memory location will be kept being written. */ #ifdef CONFIG_KEXEC_CORE static void kvm_crash_shutdown(struct pt_regs *regs) { kvm_guest_cpu_offline(true); native_machine_crash_shutdown(regs); } #endif static void __init kvm_guest_init(void) Loading Loading @@ -704,6 +735,12 @@ static void __init kvm_guest_init(void) kvm_guest_cpu_init(); #endif #ifdef CONFIG_KEXEC_CORE machine_ops.crash_shutdown = kvm_crash_shutdown; #endif register_syscore_ops(&kvm_syscore_ops); /* * Hard lockup detection is enabled by default. Disable it, as guests * can get false positives too easily, for example if the host is Loading Loading
Documentation/virt/kvm/api.rst +2 −2 Original line number Diff line number Diff line Loading @@ -4803,7 +4803,7 @@ KVM_PV_VM_VERIFY 4.126 KVM_X86_SET_MSR_FILTER ---------------------------- :Capability: KVM_X86_SET_MSR_FILTER :Capability: KVM_CAP_X86_MSR_FILTER :Architectures: x86 :Type: vm ioctl :Parameters: struct kvm_msr_filter Loading Loading @@ -6715,7 +6715,7 @@ accesses that would usually trigger a #GP by KVM into the guest will instead get bounced to user space through the KVM_EXIT_X86_RDMSR and KVM_EXIT_X86_WRMSR exit notifications. 8.27 KVM_X86_SET_MSR_FILTER 8.27 KVM_CAP_X86_MSR_FILTER --------------------------- :Architectures: x86 Loading
arch/x86/include/asm/kvm_host.h +12 −3 Original line number Diff line number Diff line Loading @@ -113,6 +113,7 @@ #define VALID_PAGE(x) ((x) != INVALID_PAGE) #define UNMAPPED_GVA (~(gpa_t)0) #define INVALID_GPA (~(gpa_t)0) /* KVM Hugepage definitions for x86 */ #define KVM_MAX_HUGEPAGE_LEVEL PG_LEVEL_1G Loading Loading @@ -199,6 +200,7 @@ enum x86_intercept_stage; #define KVM_NR_DB_REGS 4 #define DR6_BUS_LOCK (1 << 11) #define DR6_BD (1 << 13) #define DR6_BS (1 << 14) #define DR6_BT (1 << 15) Loading @@ -212,7 +214,7 @@ enum x86_intercept_stage; * DR6_ACTIVE_LOW is also used as the init/reset value for DR6. */ #define DR6_ACTIVE_LOW 0xffff0ff0 #define DR6_VOLATILE 0x0001e00f #define DR6_VOLATILE 0x0001e80f #define DR6_FIXED_1 (DR6_ACTIVE_LOW & ~DR6_VOLATILE) #define DR7_BP_EN_MASK 0x000000ff Loading Loading @@ -407,7 +409,7 @@ struct kvm_mmu { u32 pkru_mask; u64 *pae_root; u64 *lm_root; u64 *pml4_root; /* * check zero bits on shadow page table entries, these Loading Loading @@ -1417,6 +1419,7 @@ struct kvm_arch_async_pf { bool direct_map; }; extern u32 __read_mostly kvm_nr_uret_msrs; extern u64 __read_mostly host_efer; extern bool __read_mostly allow_smaller_maxphyaddr; extern struct kvm_x86_ops kvm_x86_ops; Loading Loading @@ -1775,9 +1778,15 @@ int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low, unsigned long ipi_bitmap_high, u32 min, unsigned long icr, int op_64_bit); void kvm_define_user_return_msr(unsigned index, u32 msr); int kvm_add_user_return_msr(u32 msr); int kvm_find_user_return_msr(u32 msr); int kvm_set_user_return_msr(unsigned index, u64 val, u64 mask); static inline bool kvm_is_supported_user_return_msr(u32 msr) { return kvm_find_user_return_msr(msr) >= 0; } u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc); u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc); Loading
arch/x86/include/asm/kvm_para.h +2 −8 Original line number Diff line number Diff line Loading @@ -7,8 +7,6 @@ #include <linux/interrupt.h> #include <uapi/asm/kvm_para.h> extern void kvmclock_init(void); #ifdef CONFIG_KVM_GUEST bool kvm_check_and_clear_guest_paused(void); #else Loading Loading @@ -86,13 +84,14 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1, } #ifdef CONFIG_KVM_GUEST void kvmclock_init(void); void kvmclock_disable(void); bool kvm_para_available(void); unsigned int kvm_arch_para_features(void); unsigned int kvm_arch_para_hints(void); void kvm_async_pf_task_wait_schedule(u32 token); void kvm_async_pf_task_wake(u32 token); u32 kvm_read_and_reset_apf_flags(void); void kvm_disable_steal_time(void); bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token); DECLARE_STATIC_KEY_FALSE(kvm_async_pf_enabled); Loading Loading @@ -137,11 +136,6 @@ static inline u32 kvm_read_and_reset_apf_flags(void) return 0; } static inline void kvm_disable_steal_time(void) { return; } static __always_inline bool kvm_handle_async_pf(struct pt_regs *regs, u32 token) { return false; Loading
arch/x86/include/uapi/asm/kvm.h +2 −0 Original line number Diff line number Diff line Loading @@ -437,6 +437,8 @@ struct kvm_vmx_nested_state_hdr { __u16 flags; } smm; __u16 pad; __u32 flags; __u64 preemption_timer_deadline; }; Loading
arch/x86/kernel/kvm.c +83 −46 Original line number Diff line number Diff line Loading @@ -26,6 +26,7 @@ #include <linux/kprobes.h> #include <linux/nmi.h> #include <linux/swait.h> #include <linux/syscore_ops.h> #include <asm/timer.h> #include <asm/cpu.h> #include <asm/traps.h> Loading @@ -37,6 +38,7 @@ #include <asm/tlb.h> #include <asm/cpuidle_haltpoll.h> #include <asm/ptrace.h> #include <asm/reboot.h> #include <asm/svm.h> DEFINE_STATIC_KEY_FALSE(kvm_async_pf_enabled); Loading Loading @@ -345,7 +347,7 @@ static void kvm_guest_cpu_init(void) wrmsrl(MSR_KVM_ASYNC_PF_EN, pa); __this_cpu_write(apf_reason.enabled, 1); pr_info("KVM setup async PF for cpu %d\n", smp_processor_id()); pr_info("setup async PF for cpu %d\n", smp_processor_id()); } if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) { Loading @@ -371,34 +373,17 @@ static void kvm_pv_disable_apf(void) wrmsrl(MSR_KVM_ASYNC_PF_EN, 0); __this_cpu_write(apf_reason.enabled, 0); pr_info("Unregister pv shared memory for cpu %d\n", smp_processor_id()); pr_info("disable async PF for cpu %d\n", smp_processor_id()); } static void kvm_pv_guest_cpu_reboot(void *unused) static void kvm_disable_steal_time(void) { /* * We disable PV EOI before we load a new kernel by kexec, * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory. * New kernel can re-enable when it boots. */ if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) wrmsrl(MSR_KVM_PV_EOI_EN, 0); kvm_pv_disable_apf(); kvm_disable_steal_time(); } if (!has_steal_clock) return; static int kvm_pv_reboot_notify(struct notifier_block *nb, unsigned long code, void *unused) { if (code == SYS_RESTART) on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1); return NOTIFY_DONE; wrmsr(MSR_KVM_STEAL_TIME, 0, 0); } static struct notifier_block kvm_pv_reboot_nb = { .notifier_call = kvm_pv_reboot_notify, }; static u64 kvm_steal_clock(int cpu) { u64 steal; Loading @@ -416,14 +401,6 @@ static u64 kvm_steal_clock(int cpu) return steal; } void kvm_disable_steal_time(void) { if (!has_steal_clock) return; wrmsr(MSR_KVM_STEAL_TIME, 0, 0); } static inline void __set_percpu_decrypted(void *ptr, unsigned long size) { early_set_memory_decrypted((unsigned long) ptr, size); Loading Loading @@ -451,6 +428,27 @@ static void __init sev_map_percpu_data(void) } } static void kvm_guest_cpu_offline(bool shutdown) { kvm_disable_steal_time(); if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) wrmsrl(MSR_KVM_PV_EOI_EN, 0); kvm_pv_disable_apf(); if (!shutdown) apf_task_wake_all(); kvmclock_disable(); } static int kvm_cpu_online(unsigned int cpu) { unsigned long flags; local_irq_save(flags); kvm_guest_cpu_init(); local_irq_restore(flags); return 0; } #ifdef CONFIG_SMP static DEFINE_PER_CPU(cpumask_var_t, __pv_cpu_mask); Loading Loading @@ -635,31 +633,64 @@ static void __init kvm_smp_prepare_boot_cpu(void) kvm_spinlock_init(); } static void kvm_guest_cpu_offline(void) static int kvm_cpu_down_prepare(unsigned int cpu) { kvm_disable_steal_time(); if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) wrmsrl(MSR_KVM_PV_EOI_EN, 0); kvm_pv_disable_apf(); apf_task_wake_all(); unsigned long flags; local_irq_save(flags); kvm_guest_cpu_offline(false); local_irq_restore(flags); return 0; } static int kvm_cpu_online(unsigned int cpu) #endif static int kvm_suspend(void) { local_irq_disable(); kvm_guest_cpu_init(); local_irq_enable(); kvm_guest_cpu_offline(false); return 0; } static int kvm_cpu_down_prepare(unsigned int cpu) static void kvm_resume(void) { local_irq_disable(); kvm_guest_cpu_offline(); local_irq_enable(); return 0; kvm_cpu_online(raw_smp_processor_id()); } static struct syscore_ops kvm_syscore_ops = { .suspend = kvm_suspend, .resume = kvm_resume, }; static void kvm_pv_guest_cpu_reboot(void *unused) { kvm_guest_cpu_offline(true); } static int kvm_pv_reboot_notify(struct notifier_block *nb, unsigned long code, void *unused) { if (code == SYS_RESTART) on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1); return NOTIFY_DONE; } static struct notifier_block kvm_pv_reboot_nb = { .notifier_call = kvm_pv_reboot_notify, }; /* * After a PV feature is registered, the host will keep writing to the * registered memory location. If the guest happens to shutdown, this memory * won't be valid. In cases like kexec, in which you install a new kernel, this * means a random memory location will be kept being written. */ #ifdef CONFIG_KEXEC_CORE static void kvm_crash_shutdown(struct pt_regs *regs) { kvm_guest_cpu_offline(true); native_machine_crash_shutdown(regs); } #endif static void __init kvm_guest_init(void) Loading Loading @@ -704,6 +735,12 @@ static void __init kvm_guest_init(void) kvm_guest_cpu_init(); #endif #ifdef CONFIG_KEXEC_CORE machine_ops.crash_shutdown = kvm_crash_shutdown; #endif register_syscore_ops(&kvm_syscore_ops); /* * Hard lockup detection is enabled by default. Disable it, as guests * can get false positives too easily, for example if the host is Loading