Commit 1fb85d06 authored by Adrian Hunter's avatar Adrian Hunter Committed by Peter Zijlstra
Browse files

x86: Share definition of __is_canonical_address()



Reduce code duplication by moving canonical address code to a common header
file.

Signed-off-by: default avatarAdrian Hunter <adrian.hunter@intel.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20220131072453.2839535-3-adrian.hunter@intel.com
parent c243cecb
Loading
Loading
Loading
Loading
+2 −12
Original line number Diff line number Diff line
@@ -1350,20 +1350,10 @@ static void pt_addr_filters_fini(struct perf_event *event)
}

#ifdef CONFIG_X86_64
static u64 canonical_address(u64 vaddr, u8 vaddr_bits)
{
	return ((s64)vaddr << (64 - vaddr_bits)) >> (64 - vaddr_bits);
}

static u64 is_canonical_address(u64 vaddr, u8 vaddr_bits)
{
	return canonical_address(vaddr, vaddr_bits) == vaddr;
}

/* Clamp to a canonical address greater-than-or-equal-to the address given */
static u64 clamp_to_ge_canonical_addr(u64 vaddr, u8 vaddr_bits)
{
	return is_canonical_address(vaddr, vaddr_bits) ?
	return __is_canonical_address(vaddr, vaddr_bits) ?
	       vaddr :
	       -BIT_ULL(vaddr_bits - 1);
}
@@ -1371,7 +1361,7 @@ static u64 clamp_to_ge_canonical_addr(u64 vaddr, u8 vaddr_bits)
/* Clamp to a canonical address less-than-or-equal-to the address given */
static u64 clamp_to_le_canonical_addr(u64 vaddr, u8 vaddr_bits)
{
	return is_canonical_address(vaddr, vaddr_bits) ?
	return __is_canonical_address(vaddr, vaddr_bits) ?
	       vaddr :
	       BIT_ULL(vaddr_bits - 1) - 1;
}
+10 −0
Original line number Diff line number Diff line
@@ -71,6 +71,16 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
extern bool __virt_addr_valid(unsigned long kaddr);
#define virt_addr_valid(kaddr)	__virt_addr_valid((unsigned long) (kaddr))

static __always_inline u64 __canonical_address(u64 vaddr, u8 vaddr_bits)
{
	return ((s64)vaddr << (64 - vaddr_bits)) >> (64 - vaddr_bits);
}

static __always_inline u64 __is_canonical_address(u64 vaddr, u8 vaddr_bits)
{
	return __canonical_address(vaddr, vaddr_bits) == vaddr;
}

#endif	/* __ASSEMBLY__ */

#include <asm-generic/memory_model.h>
+2 −2
Original line number Diff line number Diff line
@@ -665,7 +665,7 @@ static inline u8 ctxt_virt_addr_bits(struct x86_emulate_ctxt *ctxt)
static inline bool emul_is_noncanonical_address(u64 la,
						struct x86_emulate_ctxt *ctxt)
{
	return get_canonical(la, ctxt_virt_addr_bits(ctxt)) != la;
	return !__is_canonical_address(la, ctxt_virt_addr_bits(ctxt));
}

/*
@@ -715,7 +715,7 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
	case X86EMUL_MODE_PROT64:
		*linear = la;
		va_bits = ctxt_virt_addr_bits(ctxt);
		if (get_canonical(la, va_bits) != la)
		if (!__is_canonical_address(la, va_bits))
			goto bad;

		*max_size = min_t(u64, ~0u, (1ull << va_bits) - la);
+1 −1
Original line number Diff line number Diff line
@@ -1735,7 +1735,7 @@ static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data,
		 * value, and that something deterministic happens if the guest
		 * invokes 64-bit SYSENTER.
		 */
		data = get_canonical(data, vcpu_virt_addr_bits(vcpu));
		data = __canonical_address(data, vcpu_virt_addr_bits(vcpu));
		break;
	case MSR_TSC_AUX:
		if (!kvm_is_supported_user_return_msr(MSR_TSC_AUX))
+1 −6
Original line number Diff line number Diff line
@@ -211,14 +211,9 @@ static inline u8 vcpu_virt_addr_bits(struct kvm_vcpu *vcpu)
	return kvm_read_cr4_bits(vcpu, X86_CR4_LA57) ? 57 : 48;
}

static inline u64 get_canonical(u64 la, u8 vaddr_bits)
{
	return ((int64_t)la << (64 - vaddr_bits)) >> (64 - vaddr_bits);
}

static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu)
{
	return get_canonical(la, vcpu_virt_addr_bits(vcpu)) != la;
	return !__is_canonical_address(la, vcpu_virt_addr_bits(vcpu));
}

static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
Loading