Commit c33f6f22 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini
Browse files

KVM: x86: Split kvm_is_valid_cr4() and export only the non-vendor bits



Split the common x86 parts of kvm_is_valid_cr4(), i.e. the reserved bits
checks, into a separate helper, __kvm_is_valid_cr4(), and export only the
inner helper to vendor code in order to prevent nested VMX from calling
back into vmx_is_valid_cr4() via kvm_is_valid_cr4().

On SVM, this is a nop as SVM doesn't place any additional restrictions on
CR4.

On VMX, this is also currently a nop, but only because nested VMX is
missing checks on reserved CR4 bits for nested VM-Enter.  That bug will
be fixed in a future patch, and could simply use kvm_is_valid_cr4() as-is,
but nVMX has _another_ bug where VMXON emulation doesn't enforce VMX's
restrictions on CR0/CR4.  The cleanest and most intuitive way to fix the
VMXON bug is to use nested_host_cr{0,4}_valid().  If the CR4 variant
routes through kvm_is_valid_cr4(), using nested_host_cr4_valid() won't do
the right thing for the VMXON case as vmx_is_valid_cr4() enforces VMX's
restrictions if and only if the vCPU is post-VMXON.

Cc: stable@vger.kernel.org
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20220607213604.3346000-2-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent cfe12e64
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -325,7 +325,8 @@ static bool __nested_vmcb_check_save(struct kvm_vcpu *vcpu,
			return false;
	}

	if (CC(!kvm_is_valid_cr4(vcpu, save->cr4)))
	/* Note, SVM doesn't have any additional restrictions on CR4. */
	if (CC(!__kvm_is_valid_cr4(vcpu, save->cr4)))
		return false;

	if (CC(!kvm_valid_efer(vcpu, save->efer)))
+2 −2
Original line number Diff line number Diff line
@@ -3238,8 +3238,8 @@ static bool vmx_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
	/*
	 * We operate under the default treatment of SMM, so VMX cannot be
	 * enabled under SMM.  Note, whether or not VMXE is allowed at all is
	 * handled by kvm_is_valid_cr4().
	 * enabled under SMM.  Note, whether or not VMXE is allowed at all,
	 * i.e. is a reserved bit, is handled by common x86 code.
	 */
	if ((cr4 & X86_CR4_VMXE) && is_smm(vcpu))
		return false;
+9 −3
Original line number Diff line number Diff line
@@ -1081,7 +1081,7 @@ int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_emulate_xsetbv);

bool kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
bool __kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
	if (cr4 & cr4_reserved_bits)
		return false;
@@ -1089,9 +1089,15 @@ bool kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
	if (cr4 & vcpu->arch.cr4_guest_rsvd_bits)
		return false;

	return static_call(kvm_x86_is_valid_cr4)(vcpu, cr4);
	return true;
}
EXPORT_SYMBOL_GPL(__kvm_is_valid_cr4);

static bool kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
	return __kvm_is_valid_cr4(vcpu, cr4) &&
	       static_call(kvm_x86_is_valid_cr4)(vcpu, cr4);
}
EXPORT_SYMBOL_GPL(kvm_is_valid_cr4);

void kvm_post_set_cr4(struct kvm_vcpu *vcpu, unsigned long old_cr4, unsigned long cr4)
{
+1 −1
Original line number Diff line number Diff line
@@ -434,7 +434,7 @@ static inline void kvm_machine_check(void)
void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu);
void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu);
int kvm_spec_ctrl_test_value(u64 value);
bool kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
bool __kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
			      struct x86_exception *e);
int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva);