Commit 27b4a9c4 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini
Browse files

KVM: x86: Rename GPR accessors to make mode-aware variants the defaults



Append raw to the direct variants of kvm_register_read/write(), and
drop the "l" from the mode-aware variants.  I.e. make the mode-aware
variants the default, and make the direct variants scary sounding so as
to discourage use.  Accessing the full 64-bit values irrespective of
mode is rarely the desired behavior.

Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20210422022128.3464144-10-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent bc9eff67
Loading
Loading
Loading
Loading
+12 −7
Original line number Diff line number Diff line
@@ -62,7 +62,12 @@ static inline void kvm_register_mark_dirty(struct kvm_vcpu *vcpu,
	__set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
}

static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg)
/*
 * The "raw" register helpers are only for cases where the full 64 bits of a
 * register are read/written irrespective of current vCPU mode.  In other words,
 * odds are good you shouldn't be using the raw variants.
 */
static inline unsigned long kvm_register_read_raw(struct kvm_vcpu *vcpu, int reg)
{
	if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS))
		return 0;
@@ -73,7 +78,7 @@ static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg)
	return vcpu->arch.regs[reg];
}

static inline void kvm_register_write(struct kvm_vcpu *vcpu, int reg,
static inline void kvm_register_write_raw(struct kvm_vcpu *vcpu, int reg,
					  unsigned long val)
{
	if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS))
@@ -85,22 +90,22 @@ static inline void kvm_register_write(struct kvm_vcpu *vcpu, int reg,

static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu)
{
	return kvm_register_read(vcpu, VCPU_REGS_RIP);
	return kvm_register_read_raw(vcpu, VCPU_REGS_RIP);
}

static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val)
{
	kvm_register_write(vcpu, VCPU_REGS_RIP, val);
	kvm_register_write_raw(vcpu, VCPU_REGS_RIP, val);
}

static inline unsigned long kvm_rsp_read(struct kvm_vcpu *vcpu)
{
	return kvm_register_read(vcpu, VCPU_REGS_RSP);
	return kvm_register_read_raw(vcpu, VCPU_REGS_RSP);
}

static inline void kvm_rsp_write(struct kvm_vcpu *vcpu, unsigned long val)
{
	kvm_register_write(vcpu, VCPU_REGS_RSP, val);
	kvm_register_write_raw(vcpu, VCPU_REGS_RSP, val);
}

static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
+4 −4
Original line number Diff line number Diff line
@@ -2457,7 +2457,7 @@ static int cr_interception(struct kvm_vcpu *vcpu)
	err = 0;
	if (cr >= 16) { /* mov to cr */
		cr -= 16;
		val = kvm_register_readl(vcpu, reg);
		val = kvm_register_read(vcpu, reg);
		trace_kvm_cr_write(cr, val);
		switch (cr) {
		case 0:
@@ -2503,7 +2503,7 @@ static int cr_interception(struct kvm_vcpu *vcpu)
			kvm_queue_exception(vcpu, UD_VECTOR);
			return 1;
		}
		kvm_register_writel(vcpu, reg, val);
		kvm_register_write(vcpu, reg, val);
		trace_kvm_cr_read(cr, val);
	}
	return kvm_complete_insn_gp(vcpu, err);
@@ -2569,11 +2569,11 @@ static int dr_interception(struct kvm_vcpu *vcpu)
	dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0;
	if (dr >= 16) { /* mov to DRn  */
		dr -= 16;
		val = kvm_register_readl(vcpu, reg);
		val = kvm_register_read(vcpu, reg);
		err = kvm_set_dr(vcpu, dr, val);
	} else {
		kvm_get_dr(vcpu, dr, &val);
		kvm_register_writel(vcpu, reg, val);
		kvm_register_write(vcpu, reg, val);
	}

	return kvm_complete_insn_gp(vcpu, err);
+10 −10
Original line number Diff line number Diff line
@@ -4619,9 +4619,9 @@ int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
	else if (addr_size == 0)
		off = (gva_t)sign_extend64(off, 15);
	if (base_is_valid)
		off += kvm_register_readl(vcpu, base_reg);
		off += kvm_register_read(vcpu, base_reg);
	if (index_is_valid)
		off += kvm_register_readl(vcpu, index_reg) << scaling;
		off += kvm_register_read(vcpu, index_reg) << scaling;
	vmx_get_segment(vcpu, &s, seg_reg);

	/*
@@ -5023,7 +5023,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
		return nested_vmx_failInvalid(vcpu);

	/* Decode instruction info and find the field to read */
	field = kvm_register_readl(vcpu, (((instr_info) >> 28) & 0xf));
	field = kvm_register_read(vcpu, (((instr_info) >> 28) & 0xf));

	offset = vmcs_field_to_offset(field);
	if (offset < 0)
@@ -5041,7 +5041,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
	 * on the guest's mode (32 or 64 bit), not on the given field's length.
	 */
	if (instr_info & BIT(10)) {
		kvm_register_writel(vcpu, (((instr_info) >> 3) & 0xf), value);
		kvm_register_write(vcpu, (((instr_info) >> 3) & 0xf), value);
	} else {
		len = is_64_bit_mode(vcpu) ? 8 : 4;
		if (get_vmx_mem_address(vcpu, exit_qualification,
@@ -5115,7 +5115,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
		return nested_vmx_failInvalid(vcpu);

	if (instr_info & BIT(10))
		value = kvm_register_readl(vcpu, (((instr_info) >> 3) & 0xf));
		value = kvm_register_read(vcpu, (((instr_info) >> 3) & 0xf));
	else {
		len = is_64_bit_mode(vcpu) ? 8 : 4;
		if (get_vmx_mem_address(vcpu, exit_qualification,
@@ -5126,7 +5126,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
			return kvm_handle_memory_failure(vcpu, r, &e);
	}

	field = kvm_register_readl(vcpu, (((instr_info) >> 28) & 0xf));
	field = kvm_register_read(vcpu, (((instr_info) >> 28) & 0xf));

	offset = vmcs_field_to_offset(field);
	if (offset < 0)
@@ -5323,7 +5323,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
		return 1;

	vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
	type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
	type = kvm_register_read(vcpu, (vmx_instruction_info >> 28) & 0xf);

	types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6;

@@ -5403,7 +5403,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
		return 1;

	vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
	type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
	type = kvm_register_read(vcpu, (vmx_instruction_info >> 28) & 0xf);

	types = (vmx->nested.msrs.vpid_caps &
			VMX_VPID_EXTENT_SUPPORTED_MASK) >> 8;
@@ -5659,7 +5659,7 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
	switch ((exit_qualification >> 4) & 3) {
	case 0: /* mov to cr */
		reg = (exit_qualification >> 8) & 15;
		val = kvm_register_readl(vcpu, reg);
		val = kvm_register_read(vcpu, reg);
		switch (cr) {
		case 0:
			if (vmcs12->cr0_guest_host_mask &
@@ -5745,7 +5745,7 @@ static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu *vcpu,

	/* Decode instruction info and find the field to access */
	vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
	field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
	field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));

	/* Out-of-range fields always cause a VM exit from L2 to L1 */
	if (field >> 15)
+6 −6
Original line number Diff line number Diff line
@@ -5079,7 +5079,7 @@ static int handle_cr(struct kvm_vcpu *vcpu)
	reg = (exit_qualification >> 8) & 15;
	switch ((exit_qualification >> 4) & 3) {
	case 0: /* mov to cr */
		val = kvm_register_readl(vcpu, reg);
		val = kvm_register_read(vcpu, reg);
		trace_kvm_cr_write(cr, val);
		switch (cr) {
		case 0:
@@ -5121,12 +5121,12 @@ static int handle_cr(struct kvm_vcpu *vcpu)
		case 3:
			WARN_ON_ONCE(enable_unrestricted_guest);
			val = kvm_read_cr3(vcpu);
			kvm_register_writel(vcpu, reg, val);
			kvm_register_write(vcpu, reg, val);
			trace_kvm_cr_read(cr, val);
			return kvm_skip_emulated_instruction(vcpu);
		case 8:
			val = kvm_get_cr8(vcpu);
			kvm_register_writel(vcpu, reg, val);
			kvm_register_write(vcpu, reg, val);
			trace_kvm_cr_read(cr, val);
			return kvm_skip_emulated_instruction(vcpu);
		}
@@ -5199,10 +5199,10 @@ static int handle_dr(struct kvm_vcpu *vcpu)
		unsigned long val;

		kvm_get_dr(vcpu, dr, &val);
		kvm_register_writel(vcpu, reg, val);
		kvm_register_write(vcpu, reg, val);
		err = 0;
	} else {
		err = kvm_set_dr(vcpu, dr, kvm_register_readl(vcpu, reg));
		err = kvm_set_dr(vcpu, dr, kvm_register_read(vcpu, reg));
	}

out:
@@ -5554,7 +5554,7 @@ static int handle_invpcid(struct kvm_vcpu *vcpu)
	}

	vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
	type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
	type = kvm_register_read(vcpu, (vmx_instruction_info >> 28) & 0xf);

	if (type > 3) {
		kvm_inject_gp(vcpu, 0);
+4 −4
Original line number Diff line number Diff line
@@ -7016,12 +7016,12 @@ static bool emulator_guest_has_fxsr(struct x86_emulate_ctxt *ctxt)

static ulong emulator_read_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg)
{
	return kvm_register_read(emul_to_vcpu(ctxt), reg);
	return kvm_register_read_raw(emul_to_vcpu(ctxt), reg);
}

static void emulator_write_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val)
{
	kvm_register_write(emul_to_vcpu(ctxt), reg, val);
	kvm_register_write_raw(emul_to_vcpu(ctxt), reg, val);
}

static void emulator_set_nmi_mask(struct x86_emulate_ctxt *ctxt, bool masked)
@@ -8701,7 +8701,7 @@ static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, char *buf)
	put_smstate(u32, buf, 0x7ff0, kvm_rip_read(vcpu));

	for (i = 0; i < 8; i++)
		put_smstate(u32, buf, 0x7fd0 + i * 4, kvm_register_read(vcpu, i));
		put_smstate(u32, buf, 0x7fd0 + i * 4, kvm_register_read_raw(vcpu, i));

	kvm_get_dr(vcpu, 6, &val);
	put_smstate(u32, buf, 0x7fcc, (u32)val);
@@ -8747,7 +8747,7 @@ static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf)
	int i;

	for (i = 0; i < 16; i++)
		put_smstate(u64, buf, 0x7ff8 - i * 8, kvm_register_read(vcpu, i));
		put_smstate(u64, buf, 0x7ff8 - i * 8, kvm_register_read_raw(vcpu, i));

	put_smstate(u64, buf, 0x7f78, kvm_rip_read(vcpu));
	put_smstate(u32, buf, 0x7f70, kvm_get_rflags(vcpu));
Loading