Loading arch/x86/kvm/vmx/nested.c +17 −11 Original line number Diff line number Diff line Loading @@ -2438,22 +2438,28 @@ static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) /* * Whether page-faults are trapped is determined by a combination of * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF. * If enable_ept, L0 doesn't care about page faults and we should * set all of these to L1's desires. However, if !enable_ept, L0 does * care about (at least some) page faults, and because it is not easy * (if at all possible?) to merge L0 and L1's desires, we simply ask * to exit on each and every L2 page fault. This is done by setting * MASK=MATCH=0 and (see below) EB.PF=1. * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF. If L0 * doesn't care about page faults then we should set all of these to * L1's desires. However, if L0 does care about (some) page faults, it * is not easy (if at all possible?) to merge L0 and L1's desires, we * simply ask to exit on each and every L2 page fault. This is done by * setting MASK=MATCH=0 and (see below) EB.PF=1. * Note that below we don't need special code to set EB.PF beyond the * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept, * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when * !enable_ept, EB.PF is 1, so the "or" will always be 1. */ vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, enable_ept ? vmcs12->page_fault_error_code_mask : 0); vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, enable_ept ? vmcs12->page_fault_error_code_match : 0); if (vmx_need_pf_intercept(&vmx->vcpu)) { /* * TODO: if both L0 and L1 need the same MASK and MATCH, * go ahead and use it? */ vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0); vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0); } else { vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, vmcs12->page_fault_error_code_mask); vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, vmcs12->page_fault_error_code_match); } if (cpu_has_vmx_apicv()) { vmcs_write64(EOI_EXIT_BITMAP0, vmcs12->eoi_exit_bitmap0); Loading arch/x86/kvm/vmx/vmx.c +1 −1 Original line number Diff line number Diff line Loading @@ -780,7 +780,7 @@ void update_exception_bitmap(struct kvm_vcpu *vcpu) eb |= 1u << BP_VECTOR; if (to_vmx(vcpu)->rmode.vm86_active) eb = ~0; if (enable_ept) if (!vmx_need_pf_intercept(vcpu)) eb &= ~(1u << PF_VECTOR); /* When we are running a nested L2 guest and L1 specified for it a Loading arch/x86/kvm/vmx/vmx.h +5 −0 Original line number Diff line number Diff line Loading @@ -550,6 +550,11 @@ static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx) SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE; } static inline bool vmx_need_pf_intercept(struct kvm_vcpu *vcpu) { return !enable_ept; } void dump_vmcs(void); #endif /* __KVM_X86_VMX_H */ Loading
arch/x86/kvm/vmx/nested.c +17 −11 Original line number Diff line number Diff line Loading @@ -2438,22 +2438,28 @@ static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) /* * Whether page-faults are trapped is determined by a combination of * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF. * If enable_ept, L0 doesn't care about page faults and we should * set all of these to L1's desires. However, if !enable_ept, L0 does * care about (at least some) page faults, and because it is not easy * (if at all possible?) to merge L0 and L1's desires, we simply ask * to exit on each and every L2 page fault. This is done by setting * MASK=MATCH=0 and (see below) EB.PF=1. * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF. If L0 * doesn't care about page faults then we should set all of these to * L1's desires. However, if L0 does care about (some) page faults, it * is not easy (if at all possible?) to merge L0 and L1's desires, we * simply ask to exit on each and every L2 page fault. This is done by * setting MASK=MATCH=0 and (see below) EB.PF=1. * Note that below we don't need special code to set EB.PF beyond the * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept, * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when * !enable_ept, EB.PF is 1, so the "or" will always be 1. */ vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, enable_ept ? vmcs12->page_fault_error_code_mask : 0); vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, enable_ept ? vmcs12->page_fault_error_code_match : 0); if (vmx_need_pf_intercept(&vmx->vcpu)) { /* * TODO: if both L0 and L1 need the same MASK and MATCH, * go ahead and use it? */ vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0); vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0); } else { vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, vmcs12->page_fault_error_code_mask); vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, vmcs12->page_fault_error_code_match); } if (cpu_has_vmx_apicv()) { vmcs_write64(EOI_EXIT_BITMAP0, vmcs12->eoi_exit_bitmap0); Loading
arch/x86/kvm/vmx/vmx.c +1 −1 Original line number Diff line number Diff line Loading @@ -780,7 +780,7 @@ void update_exception_bitmap(struct kvm_vcpu *vcpu) eb |= 1u << BP_VECTOR; if (to_vmx(vcpu)->rmode.vm86_active) eb = ~0; if (enable_ept) if (!vmx_need_pf_intercept(vcpu)) eb &= ~(1u << PF_VECTOR); /* When we are running a nested L2 guest and L1 specified for it a Loading
arch/x86/kvm/vmx/vmx.h +5 −0 Original line number Diff line number Diff line Loading @@ -550,6 +550,11 @@ static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx) SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE; } static inline bool vmx_need_pf_intercept(struct kvm_vcpu *vcpu) { return !enable_ept; } void dump_vmcs(void); #endif /* __KVM_X86_VMX_H */