Loading arch/x86/kvm/mmu.h +1 −1 Original line number Diff line number Diff line Loading @@ -51,7 +51,7 @@ static inline u64 rsvd_bits(int s, int e) return ((1ULL << (e - s + 1)) - 1) << s; } void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value, u64 access_mask); void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 access_mask); void reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context); Loading arch/x86/kvm/mmu/mmu.c +22 −34 Original line number Diff line number Diff line Loading @@ -247,7 +247,6 @@ static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */ static u64 __read_mostly shadow_user_mask; static u64 __read_mostly shadow_accessed_mask; static u64 __read_mostly shadow_dirty_mask; static u64 __read_mostly shadow_mmio_mask; static u64 __read_mostly shadow_mmio_value; static u64 __read_mostly shadow_mmio_access_mask; static u64 __read_mostly shadow_present_mask; Loading Loading @@ -334,19 +333,19 @@ static void kvm_flush_remote_tlbs_with_address(struct kvm *kvm, kvm_flush_remote_tlbs_with_range(kvm, &range); } void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value, u64 access_mask) void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 access_mask) { BUG_ON((u64)(unsigned)access_mask != access_mask); BUG_ON((mmio_mask & mmio_value) != mmio_value); WARN_ON(mmio_value & (shadow_nonpresent_or_rsvd_mask << shadow_nonpresent_or_rsvd_mask_len)); WARN_ON(mmio_value & shadow_nonpresent_or_rsvd_lower_gfn_mask); shadow_mmio_value = mmio_value | SPTE_MMIO_MASK; shadow_mmio_mask = mmio_mask | SPTE_SPECIAL_MASK; shadow_mmio_access_mask = access_mask; } EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask); static bool is_mmio_spte(u64 spte) { return (spte & shadow_mmio_mask) == shadow_mmio_value; return (spte & SPTE_SPECIAL_MASK) == SPTE_MMIO_MASK; } static inline bool sp_ad_disabled(struct kvm_mmu_page *sp) Loading Loading @@ -569,7 +568,6 @@ static void kvm_mmu_reset_all_pte_masks(void) shadow_dirty_mask = 0; shadow_nx_mask = 0; shadow_x_mask = 0; shadow_mmio_mask = 0; shadow_present_mask = 0; shadow_acc_track_mask = 0; Loading @@ -586,16 +584,15 @@ static void kvm_mmu_reset_all_pte_masks(void) * the most significant bits of legal physical address space. */ shadow_nonpresent_or_rsvd_mask = 0; low_phys_bits = boot_cpu_data.x86_cache_bits; if (boot_cpu_data.x86_cache_bits < 52 - shadow_nonpresent_or_rsvd_mask_len) { low_phys_bits = boot_cpu_data.x86_phys_bits; if (boot_cpu_has_bug(X86_BUG_L1TF) && !WARN_ON_ONCE(boot_cpu_data.x86_cache_bits >= 52 - shadow_nonpresent_or_rsvd_mask_len)) { low_phys_bits = boot_cpu_data.x86_cache_bits - shadow_nonpresent_or_rsvd_mask_len; shadow_nonpresent_or_rsvd_mask = rsvd_bits(boot_cpu_data.x86_cache_bits - shadow_nonpresent_or_rsvd_mask_len, boot_cpu_data.x86_cache_bits - 1); low_phys_bits -= shadow_nonpresent_or_rsvd_mask_len; } else WARN_ON_ONCE(boot_cpu_has_bug(X86_BUG_L1TF)); rsvd_bits(low_phys_bits, boot_cpu_data.x86_cache_bits - 1); } shadow_nonpresent_or_rsvd_lower_gfn_mask = GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT); Loading Loading @@ -6134,27 +6131,18 @@ static void kvm_set_mmio_spte_mask(void) u64 mask; /* * Set the reserved bits and the present bit of an paging-structure * entry to generate page fault with PFER.RSV = 1. */ /* * Mask the uppermost physical address bit, which would be reserved as * long as the supported physical address width is less than 52. * Set a reserved PA bit in MMIO SPTEs to generate page faults with * PFEC.RSVD=1 on MMIO accesses. 64-bit PTEs (PAE, x86-64, and EPT * paging) support a maximum of 52 bits of PA, i.e. if the CPU supports * 52-bit physical addresses then there are no reserved PA bits in the * PTEs and so the reserved PA approach must be disabled. */ mask = 1ull << 51; /* Set the present bit. */ mask |= 1ull; /* * If reserved bit is not supported, clear the present bit to disable * mmio page fault. */ if (shadow_phys_bits == 52) mask &= ~1ull; if (shadow_phys_bits < 52) mask = BIT_ULL(51) | PT_PRESENT_MASK; else mask = 0; kvm_mmu_set_mmio_spte_mask(mask, mask, ACC_WRITE_MASK | ACC_USER_MASK); kvm_mmu_set_mmio_spte_mask(mask, ACC_WRITE_MASK | ACC_USER_MASK); } static bool get_nx_auto_mode(void) Loading arch/x86/kvm/svm/svm.c +1 −1 Original line number Diff line number Diff line Loading @@ -778,7 +778,7 @@ static __init void svm_adjust_mmio_mask(void) */ mask = (mask_bit < 52) ? rsvd_bits(mask_bit, 51) | PT_PRESENT_MASK : 0; kvm_mmu_set_mmio_spte_mask(mask, mask, PT_WRITABLE_MASK | PT_USER_MASK); kvm_mmu_set_mmio_spte_mask(mask, PT_WRITABLE_MASK | PT_USER_MASK); } static void svm_hardware_teardown(void) Loading arch/x86/kvm/vmx/vmx.c +4 −2 Original line number Diff line number Diff line Loading @@ -4233,8 +4233,7 @@ static void ept_set_mmio_spte_mask(void) * EPT Misconfigurations can be generated if the value of bits 2:0 * of an EPT paging-structure entry is 110b (write/execute). */ kvm_mmu_set_mmio_spte_mask(VMX_EPT_RWX_MASK, VMX_EPT_MISCONFIG_WX_VALUE, 0); kvm_mmu_set_mmio_spte_mask(VMX_EPT_MISCONFIG_WX_VALUE, 0); } #define VMX_XSS_EXIT_BITMAP 0 Loading Loading @@ -7300,6 +7299,9 @@ static __init void vmx_set_cpu_caps(void) /* CPUID 0x80000001 */ if (!cpu_has_vmx_rdtscp()) kvm_cpu_cap_clear(X86_FEATURE_RDTSCP); if (vmx_waitpkg_supported()) kvm_cpu_cap_check_and_set(X86_FEATURE_WAITPKG); } static void vmx_request_immediate_exit(struct kvm_vcpu *vcpu) Loading arch/x86/kvm/x86.c +5 −1 Original line number Diff line number Diff line Loading @@ -3799,7 +3799,7 @@ static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu, unsigned bank_num = mcg_cap & 0xff, bank; r = -EINVAL; if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS) if (!bank_num || bank_num > KVM_MAX_MCE_BANKS) goto out; if (mcg_cap & ~(kvm_mce_cap_supported | 0xff | 0xff0000)) goto out; Loading Loading @@ -5282,6 +5282,10 @@ static void kvm_init_msr_list(void) if (!kvm_cpu_cap_has(X86_FEATURE_RDTSCP)) continue; break; case MSR_IA32_UMWAIT_CONTROL: if (!kvm_cpu_cap_has(X86_FEATURE_WAITPKG)) continue; break; case MSR_IA32_RTIT_CTL: case MSR_IA32_RTIT_STATUS: if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT)) Loading Loading
arch/x86/kvm/mmu.h +1 −1 Original line number Diff line number Diff line Loading @@ -51,7 +51,7 @@ static inline u64 rsvd_bits(int s, int e) return ((1ULL << (e - s + 1)) - 1) << s; } void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value, u64 access_mask); void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 access_mask); void reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context); Loading
arch/x86/kvm/mmu/mmu.c +22 −34 Original line number Diff line number Diff line Loading @@ -247,7 +247,6 @@ static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */ static u64 __read_mostly shadow_user_mask; static u64 __read_mostly shadow_accessed_mask; static u64 __read_mostly shadow_dirty_mask; static u64 __read_mostly shadow_mmio_mask; static u64 __read_mostly shadow_mmio_value; static u64 __read_mostly shadow_mmio_access_mask; static u64 __read_mostly shadow_present_mask; Loading Loading @@ -334,19 +333,19 @@ static void kvm_flush_remote_tlbs_with_address(struct kvm *kvm, kvm_flush_remote_tlbs_with_range(kvm, &range); } void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value, u64 access_mask) void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 access_mask) { BUG_ON((u64)(unsigned)access_mask != access_mask); BUG_ON((mmio_mask & mmio_value) != mmio_value); WARN_ON(mmio_value & (shadow_nonpresent_or_rsvd_mask << shadow_nonpresent_or_rsvd_mask_len)); WARN_ON(mmio_value & shadow_nonpresent_or_rsvd_lower_gfn_mask); shadow_mmio_value = mmio_value | SPTE_MMIO_MASK; shadow_mmio_mask = mmio_mask | SPTE_SPECIAL_MASK; shadow_mmio_access_mask = access_mask; } EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask); static bool is_mmio_spte(u64 spte) { return (spte & shadow_mmio_mask) == shadow_mmio_value; return (spte & SPTE_SPECIAL_MASK) == SPTE_MMIO_MASK; } static inline bool sp_ad_disabled(struct kvm_mmu_page *sp) Loading Loading @@ -569,7 +568,6 @@ static void kvm_mmu_reset_all_pte_masks(void) shadow_dirty_mask = 0; shadow_nx_mask = 0; shadow_x_mask = 0; shadow_mmio_mask = 0; shadow_present_mask = 0; shadow_acc_track_mask = 0; Loading @@ -586,16 +584,15 @@ static void kvm_mmu_reset_all_pte_masks(void) * the most significant bits of legal physical address space. */ shadow_nonpresent_or_rsvd_mask = 0; low_phys_bits = boot_cpu_data.x86_cache_bits; if (boot_cpu_data.x86_cache_bits < 52 - shadow_nonpresent_or_rsvd_mask_len) { low_phys_bits = boot_cpu_data.x86_phys_bits; if (boot_cpu_has_bug(X86_BUG_L1TF) && !WARN_ON_ONCE(boot_cpu_data.x86_cache_bits >= 52 - shadow_nonpresent_or_rsvd_mask_len)) { low_phys_bits = boot_cpu_data.x86_cache_bits - shadow_nonpresent_or_rsvd_mask_len; shadow_nonpresent_or_rsvd_mask = rsvd_bits(boot_cpu_data.x86_cache_bits - shadow_nonpresent_or_rsvd_mask_len, boot_cpu_data.x86_cache_bits - 1); low_phys_bits -= shadow_nonpresent_or_rsvd_mask_len; } else WARN_ON_ONCE(boot_cpu_has_bug(X86_BUG_L1TF)); rsvd_bits(low_phys_bits, boot_cpu_data.x86_cache_bits - 1); } shadow_nonpresent_or_rsvd_lower_gfn_mask = GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT); Loading Loading @@ -6134,27 +6131,18 @@ static void kvm_set_mmio_spte_mask(void) u64 mask; /* * Set the reserved bits and the present bit of an paging-structure * entry to generate page fault with PFER.RSV = 1. */ /* * Mask the uppermost physical address bit, which would be reserved as * long as the supported physical address width is less than 52. * Set a reserved PA bit in MMIO SPTEs to generate page faults with * PFEC.RSVD=1 on MMIO accesses. 64-bit PTEs (PAE, x86-64, and EPT * paging) support a maximum of 52 bits of PA, i.e. if the CPU supports * 52-bit physical addresses then there are no reserved PA bits in the * PTEs and so the reserved PA approach must be disabled. */ mask = 1ull << 51; /* Set the present bit. */ mask |= 1ull; /* * If reserved bit is not supported, clear the present bit to disable * mmio page fault. */ if (shadow_phys_bits == 52) mask &= ~1ull; if (shadow_phys_bits < 52) mask = BIT_ULL(51) | PT_PRESENT_MASK; else mask = 0; kvm_mmu_set_mmio_spte_mask(mask, mask, ACC_WRITE_MASK | ACC_USER_MASK); kvm_mmu_set_mmio_spte_mask(mask, ACC_WRITE_MASK | ACC_USER_MASK); } static bool get_nx_auto_mode(void) Loading
arch/x86/kvm/svm/svm.c +1 −1 Original line number Diff line number Diff line Loading @@ -778,7 +778,7 @@ static __init void svm_adjust_mmio_mask(void) */ mask = (mask_bit < 52) ? rsvd_bits(mask_bit, 51) | PT_PRESENT_MASK : 0; kvm_mmu_set_mmio_spte_mask(mask, mask, PT_WRITABLE_MASK | PT_USER_MASK); kvm_mmu_set_mmio_spte_mask(mask, PT_WRITABLE_MASK | PT_USER_MASK); } static void svm_hardware_teardown(void) Loading
arch/x86/kvm/vmx/vmx.c +4 −2 Original line number Diff line number Diff line Loading @@ -4233,8 +4233,7 @@ static void ept_set_mmio_spte_mask(void) * EPT Misconfigurations can be generated if the value of bits 2:0 * of an EPT paging-structure entry is 110b (write/execute). */ kvm_mmu_set_mmio_spte_mask(VMX_EPT_RWX_MASK, VMX_EPT_MISCONFIG_WX_VALUE, 0); kvm_mmu_set_mmio_spte_mask(VMX_EPT_MISCONFIG_WX_VALUE, 0); } #define VMX_XSS_EXIT_BITMAP 0 Loading Loading @@ -7300,6 +7299,9 @@ static __init void vmx_set_cpu_caps(void) /* CPUID 0x80000001 */ if (!cpu_has_vmx_rdtscp()) kvm_cpu_cap_clear(X86_FEATURE_RDTSCP); if (vmx_waitpkg_supported()) kvm_cpu_cap_check_and_set(X86_FEATURE_WAITPKG); } static void vmx_request_immediate_exit(struct kvm_vcpu *vcpu) Loading
arch/x86/kvm/x86.c +5 −1 Original line number Diff line number Diff line Loading @@ -3799,7 +3799,7 @@ static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu, unsigned bank_num = mcg_cap & 0xff, bank; r = -EINVAL; if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS) if (!bank_num || bank_num > KVM_MAX_MCE_BANKS) goto out; if (mcg_cap & ~(kvm_mce_cap_supported | 0xff | 0xff0000)) goto out; Loading Loading @@ -5282,6 +5282,10 @@ static void kvm_init_msr_list(void) if (!kvm_cpu_cap_has(X86_FEATURE_RDTSCP)) continue; break; case MSR_IA32_UMWAIT_CONTROL: if (!kvm_cpu_cap_has(X86_FEATURE_WAITPKG)) continue; break; case MSR_IA32_RTIT_CTL: case MSR_IA32_RTIT_STATUS: if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT)) Loading