Commit c199a009 authored by Marc Zyngier's avatar Marc Zyngier
Browse files

Merge branch 'kvm-arm64/el2-obj-v4.1' into kvmarm-master/next-WIP



Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
parents 9ebcfadb 6de7dd31
Loading
Loading
Loading
Loading
+56 −10
Original line number Diff line number Diff line
@@ -42,22 +42,70 @@

#include <linux/mm.h>

/* Translate a kernel address of @sym into its equivalent linear mapping */
#define kvm_ksym_ref(sym)						\
/*
 * Translate name of a symbol defined in nVHE hyp to the name seen
 * by kernel proper. All nVHE symbols are prefixed by the build system
 * to avoid clashes with the VHE variants.
 */
#define kvm_nvhe_sym(sym)	__kvm_nvhe_##sym

#define DECLARE_KVM_VHE_SYM(sym)	extern char sym[]
#define DECLARE_KVM_NVHE_SYM(sym)	extern char kvm_nvhe_sym(sym)[]

/*
 * Define a pair of symbols sharing the same name but one defined in
 * VHE and the other in nVHE hyp implementations.
 */
#define DECLARE_KVM_HYP_SYM(sym)		\
	DECLARE_KVM_VHE_SYM(sym);		\
	DECLARE_KVM_NVHE_SYM(sym)

#define CHOOSE_VHE_SYM(sym)	sym
#define CHOOSE_NVHE_SYM(sym)	kvm_nvhe_sym(sym)

#ifndef __KVM_NVHE_HYPERVISOR__
/*
 * BIG FAT WARNINGS:
 *
 * - Don't be tempted to change the following is_kernel_in_hyp_mode()
 *   to has_vhe(). has_vhe() is implemented as a *final* capability,
 *   while this is used early at boot time, when the capabilities are
 *   not final yet....
 *
 * - Don't let the nVHE hypervisor have access to this, as it will
 *   pick the *wrong* symbol (yes, it runs at EL2...).
 */
#define CHOOSE_HYP_SYM(sym)	(is_kernel_in_hyp_mode() ? CHOOSE_VHE_SYM(sym) \
					   : CHOOSE_NVHE_SYM(sym))
#else
/* The nVHE hypervisor shouldn't even try to access anything */
extern void *__nvhe_undefined_symbol;
#define CHOOSE_HYP_SYM(sym)	__nvhe_undefined_symbol
#endif

/* Translate a kernel address @ptr into its equivalent linear mapping */
#define kvm_ksym_ref(ptr)						\
	({								\
		void *val = &sym;					\
		void *val = (ptr);					\
		if (!is_kernel_in_hyp_mode())				\
			val = lm_alias(&sym);				\
			val = lm_alias((ptr));				\
		val;							\
	 })
#define kvm_ksym_ref_nvhe(sym)	kvm_ksym_ref(kvm_nvhe_sym(sym))

struct kvm;
struct kvm_vcpu;

extern char __kvm_hyp_init[];
extern char __kvm_hyp_init_end[];
DECLARE_KVM_NVHE_SYM(__kvm_hyp_init);
DECLARE_KVM_HYP_SYM(__kvm_hyp_vector);
#define __kvm_hyp_init		CHOOSE_NVHE_SYM(__kvm_hyp_init)
#define __kvm_hyp_vector	CHOOSE_HYP_SYM(__kvm_hyp_vector)

extern char __kvm_hyp_vector[];
#ifdef CONFIG_KVM_INDIRECT_VECTORS
extern atomic_t arm64_el2_vector_last_slot;
DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
#define __bp_harden_hyp_vecs	CHOOSE_HYP_SYM(__bp_harden_hyp_vecs)
#endif

extern void __kvm_flush_vm_context(void);
extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
@@ -66,9 +114,7 @@ extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);

extern void __kvm_timer_set_cntvoff(u64 cntvoff);

extern int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu);

extern int __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu);
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);

extern void __kvm_enable_ssbs(void);

+1 −1
Original line number Diff line number Diff line
@@ -516,7 +516,7 @@ static __always_inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_i
 * Skip an instruction which has been emulated at hyp while most guest sysregs
 * are live.
 */
static __always_inline void __hyp_text __kvm_skip_instr(struct kvm_vcpu *vcpu)
static __always_inline void __kvm_skip_instr(struct kvm_vcpu *vcpu)
{
	*vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
	vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(SYS_SPSR);
+17 −6
Original line number Diff line number Diff line
@@ -338,7 +338,7 @@ struct kvm_vcpu_arch {
	struct vcpu_reset_state	reset_state;

	/* True when deferrable sysregs are loaded on the physical CPU,
	 * see kvm_vcpu_load_sysregs and kvm_vcpu_put_sysregs. */
	 * see kvm_vcpu_load_sysregs_vhe and kvm_vcpu_put_sysregs_vhe. */
	bool sysregs_loaded_on_cpu;

	/* Guest PV state */
@@ -448,6 +448,18 @@ void kvm_arm_resume_guest(struct kvm *kvm);

u64 __kvm_call_hyp(void *hypfn, ...);

#define kvm_call_hyp_nvhe(f, ...)					\
	do {								\
		DECLARE_KVM_NVHE_SYM(f);				\
		__kvm_call_hyp(kvm_ksym_ref_nvhe(f), ##__VA_ARGS__);	\
	} while(0)

#define kvm_call_hyp_nvhe_ret(f, ...)					\
	({								\
		DECLARE_KVM_NVHE_SYM(f);				\
		__kvm_call_hyp(kvm_ksym_ref_nvhe(f), ##__VA_ARGS__);	\
	})

/*
 * The couple of isb() below are there to guarantee the same behaviour
 * on VHE as on !VHE, where the eret to EL1 acts as a context
@@ -459,7 +471,7 @@ u64 __kvm_call_hyp(void *hypfn, ...);
			f(__VA_ARGS__);					\
			isb();						\
		} else {						\
			__kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__); \
			kvm_call_hyp_nvhe(f, ##__VA_ARGS__);		\
		}							\
	} while(0)

@@ -471,8 +483,7 @@ u64 __kvm_call_hyp(void *hypfn, ...);
			ret = f(__VA_ARGS__);				\
			isb();						\
		} else {						\
			ret = __kvm_call_hyp(kvm_ksym_ref(f),		\
					     ##__VA_ARGS__);		\
			ret = kvm_call_hyp_nvhe_ret(f, ##__VA_ARGS__);	\
		}							\
									\
		ret;							\
@@ -628,8 +639,8 @@ static inline int kvm_arm_have_ssbd(void)
	}
}

void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu);
void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu);
void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu);
void kvm_vcpu_put_sysregs_vhe(struct kvm_vcpu *vcpu);

int kvm_set_ipa_limit(void);

+11 −4
Original line number Diff line number Diff line
@@ -12,8 +12,6 @@
#include <asm/alternative.h>
#include <asm/sysreg.h>

#define __hyp_text __section(.hyp.text) notrace __noscs

#define read_sysreg_elx(r,nvh,vh)					\
	({								\
		u64 reg;						\
@@ -63,17 +61,20 @@ void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if);
void __vgic_v3_restore_aprs(struct vgic_v3_cpu_if *cpu_if);
int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu);

#ifdef __KVM_NVHE_HYPERVISOR__
void __timer_enable_traps(struct kvm_vcpu *vcpu);
void __timer_disable_traps(struct kvm_vcpu *vcpu);
#endif

#ifdef __KVM_NVHE_HYPERVISOR__
void __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt);
void __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt);
#else
void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt);
void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt);
void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt);
void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt);
void __sysreg32_save_state(struct kvm_vcpu *vcpu);
void __sysreg32_restore_state(struct kvm_vcpu *vcpu);
#endif

void __debug_switch_to_guest(struct kvm_vcpu *vcpu);
void __debug_switch_to_host(struct kvm_vcpu *vcpu);
@@ -81,11 +82,17 @@ void __debug_switch_to_host(struct kvm_vcpu *vcpu);
void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);

#ifndef __KVM_NVHE_HYPERVISOR__
void activate_traps_vhe_load(struct kvm_vcpu *vcpu);
void deactivate_traps_vhe_put(void);
#endif

u64 __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host_ctxt);

void __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt);
#ifdef __KVM_NVHE_HYPERVISOR__
void __noreturn __hyp_do_panic(unsigned long, ...);
#endif

#endif /* __ARM64_KVM_HYP_H__ */
+0 −7
Original line number Diff line number Diff line
@@ -45,13 +45,6 @@ struct bp_hardening_data {
	bp_hardening_cb_t	fn;
};

#if (defined(CONFIG_HARDEN_BRANCH_PREDICTOR) ||	\
     defined(CONFIG_HARDEN_EL2_VECTORS))

extern char __bp_harden_hyp_vecs[];
extern atomic_t arm64_el2_vector_last_slot;
#endif  /* CONFIG_HARDEN_BRANCH_PREDICTOR || CONFIG_HARDEN_EL2_VECTORS */

#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);

Loading