Commit decddf55 authored by Josh Poimboeuf's avatar Josh Poimboeuf Committed by Zheng Zengkai
Browse files

KVM: VMX: Convert launched argument to flags

stable inclusion
from stable-v5.10.133
commit 84061fff2ad98a7809f00e88a54f584f84830388
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I5PTAS
CVE: CVE-2022-29900,CVE-2022-23816,CVE-2022-29901

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=84061fff2ad98a7809f00e88a54f584f84830388



--------------------------------

commit bb066506 upstream.

Convert __vmx_vcpu_run()'s 'launched' argument to 'flags', in
preparation for doing SPEC_CTRL handling immediately after vmexit, which
will need another flag.

This is much easier than adding a fourth argument, because this code
supports both 32-bit and 64-bit, and the fourth argument on 32-bit would
have to be pushed on the stack.

Note that __vmx_vcpu_run_flags() is called outside of the noinstr
critical section because it will soon start calling potentially
traceable functions.

Signed-off-by: default avatarJosh Poimboeuf <jpoimboe@kernel.org>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Signed-off-by: default avatarThadeu Lima de Souza Cascardo <cascardo@canonical.com>
Signed-off-by: default avatarBen Hutchings <ben@decadent.org.uk>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>

conflict:
	arch/x86/kvm/vmx/vmx.h

Signed-off-by: default avatarLin Yujun <linyujun809@huawei.com>
Reviewed-by: default avatarZhang Jianhua <chris.zjh@huawei.com>
Signed-off-by: default avatarZheng Zengkai <zhengzengkai@huawei.com>
parent 1db122e4
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -3081,7 +3081,7 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
	}

	vm_fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs,
				 vmx->loaded_vmcs->launched);
				 __vmx_vcpu_run_flags(vmx));

	if (vmx->msr_autoload.host.nr)
		vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
+7 −0
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __KVM_X86_VMX_RUN_FLAGS_H
#define __KVM_X86_VMX_RUN_FLAGS_H

#define VMX_RUN_VMRESUME	(1 << 0)

#endif /* __KVM_X86_VMX_RUN_FLAGS_H */
+5 −4
Original line number Diff line number Diff line
@@ -5,6 +5,7 @@
#include <asm/kvm_vcpu_regs.h>
#include <asm/nospec-branch.h>
#include <asm/segment.h>
#include "run_flags.h"

#define WORD_SIZE (BITS_PER_LONG / 8)

@@ -34,7 +35,7 @@
 * __vmx_vcpu_run - Run a vCPU via a transition to VMX guest mode
 * @vmx:	struct vcpu_vmx * (forwarded to vmx_update_host_rsp)
 * @regs:	unsigned long * (to guest registers)
 * @launched:	%true if the VMCS has been launched
 * @flags:	VMX_RUN_VMRESUME: use VMRESUME instead of VMLAUNCH
 *
 * Returns:
 *	0 on VM-Exit, 1 on VM-Fail
@@ -59,7 +60,7 @@ SYM_FUNC_START(__vmx_vcpu_run)
	 */
	push %_ASM_ARG2

	/* Copy @launched to BL, _ASM_ARG3 is volatile. */
	/* Copy @flags to BL, _ASM_ARG3 is volatile. */
	mov %_ASM_ARG3B, %bl

	lea (%_ASM_SP), %_ASM_ARG2
@@ -69,7 +70,7 @@ SYM_FUNC_START(__vmx_vcpu_run)
	mov (%_ASM_SP), %_ASM_AX

	/* Check if vmlaunch or vmresume is needed */
	testb %bl, %bl
	testb $VMX_RUN_VMRESUME, %bl

	/* Load guest registers.  Don't clobber flags. */
	mov VCPU_RCX(%_ASM_AX), %_ASM_CX
@@ -92,7 +93,7 @@ SYM_FUNC_START(__vmx_vcpu_run)
	mov VCPU_RAX(%_ASM_AX), %_ASM_AX

	/* Check EFLAGS.ZF from 'testb' above */
	je .Lvmlaunch
	jz .Lvmlaunch

	/*
	 * After a successful VMRESUME/VMLAUNCH, control flow "magically"
+14 −3
Original line number Diff line number Diff line
@@ -945,6 +945,16 @@ static bool msr_write_intercepted(struct vcpu_vmx *vmx, u32 msr)
	return true;
}

unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx *vmx)
{
	unsigned int flags = 0;

	if (vmx->loaded_vmcs->launched)
		flags |= VMX_RUN_VMRESUME;

	return flags;
}

static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
		unsigned long entry, unsigned long exit)
{
@@ -6818,7 +6828,8 @@ static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
}

static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
					struct vcpu_vmx *vmx)
					struct vcpu_vmx *vmx,
					unsigned long flags)
{
	/*
	 * VMENTER enables interrupts (host state), but the kernel state is
@@ -6855,7 +6866,7 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
		native_write_cr2(vcpu->arch.cr2);

	vmx->fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs,
				   vmx->loaded_vmcs->launched);
				   flags);

	vcpu->arch.cr2 = native_read_cr2();

@@ -6956,7 +6967,7 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
	x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);

	/* The actual VMENTER/EXIT is in the .noinstr.text section. */
	vmx_vcpu_enter_exit(vcpu, vmx);
	vmx_vcpu_enter_exit(vcpu, vmx, __vmx_vcpu_run_flags(vmx));

	/*
	 * We do not use IBRS in the kernel. If this vCPU has used the
+4 −1
Original line number Diff line number Diff line
@@ -13,6 +13,7 @@
#include "vmcs.h"
#include "vmx_ops.h"
#include "cpuid.h"
#include "run_flags.h"

extern const u32 vmx_msr_index[];

@@ -394,7 +395,9 @@ void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr);
void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu);
void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp);
bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched);
unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx *vmx);
bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs,
                   unsigned int flags);
int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr);
void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu);
void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu,