Commit 5c288417 authored by Marc Zyngier's avatar Marc Zyngier Committed by Junhao He
Browse files

KVM: arm64: Add accessor for per-CPU state

mainline inclusion
from mainline-v6.9-rc1
commit 87f842c6c6543cf0dd66161fdf4b62cec804479b
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I8EC9K
CVE: NA

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=87f842c6c6543cf0dd66161fdf4b62cec804479b



--------------------------------

In order to facilitate the introduction of new per-CPU state,
add a new host_data_ptr() helped that hides some of the per-CPU
verbosity, and make it easier to move that state around in the
future.

Reviewed-by: default avatarSuzuki K Poulose <suzuki.poulose@arm.com>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Signed-off-by: default avatarJunhao He <hejunhao3@huawei.com>
parent 15861d02
Loading
Loading
Loading
Loading
+37 −0
Original line number Diff line number Diff line
@@ -450,6 +450,17 @@ struct kvm_cpu_context {
	struct kvm_vcpu *__hyp_running_vcpu;
};

/*
 * This structure is instantiated on a per-CPU basis, and contains
 * data that is:
 *
 * - tied to a single physical CPU, and
 * - either have a lifetime that does not extend past vcpu_put()
 * - or is an invariant for the lifetime of the system
 *
 * Use host_data_ptr(field) as a way to access a pointer to such a
 * field.
 */
struct kvm_host_data {
	struct kvm_cpu_context host_ctxt;
};
@@ -1129,6 +1140,32 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);

DECLARE_KVM_HYP_PER_CPU(struct kvm_host_data, kvm_host_data);

/*
 * How we access per-CPU host data depends on the where we access it from,
 * and the mode we're in:
 *
 * - VHE and nVHE hypervisor bits use their locally defined instance
 *
 * - the rest of the kernel use either the VHE or nVHE one, depending on
 *   the mode we're running in.
 *
 *   Unless we're in protected mode, fully deprivileged, and the nVHE
 *   per-CPU stuff is exclusively accessible to the protected EL2 code.
 *   In this case, the EL1 code uses the *VHE* data as its private state
 *   (which makes sense in a way as there shouldn't be any shared state
 *   between the host and the hypervisor).
 *
 * Yes, this is all totally trivial. Shoot me now.
 */
#if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__)
#define host_data_ptr(f)	(&this_cpu_ptr(&kvm_host_data)->f)
#else
#define host_data_ptr(f)						\
	(static_branch_unlikely(&kvm_protected_mode_initialized) ?	\
	 &this_cpu_ptr(&kvm_host_data)->f :				\
	 &this_cpu_ptr_hyp_sym(kvm_host_data)->f)
#endif

static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
{
	/* The host's MPIDR is immutable, so let's set it up at boot time */
+1 −1
Original line number Diff line number Diff line
@@ -2075,7 +2075,7 @@ static void cpu_set_hyp_vector(void)

static void cpu_hyp_init_context(void)
{
	kvm_init_host_cpu_context(&this_cpu_ptr_hyp_sym(kvm_host_data)->host_ctxt);
	kvm_init_host_cpu_context(host_data_ptr(host_ctxt));

	if (!is_kernel_in_hyp_mode())
		cpu_init_hyp_mode();
+2 −2
Original line number Diff line number Diff line
@@ -135,7 +135,7 @@ static inline void __debug_switch_to_guest_common(struct kvm_vcpu *vcpu)
	if (!vcpu_get_flag(vcpu, DEBUG_DIRTY))
		return;

	host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
	host_ctxt = host_data_ptr(host_ctxt);
	guest_ctxt = &vcpu->arch.ctxt;
	host_dbg = &vcpu->arch.host_debug_state.regs;
	guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);
@@ -154,7 +154,7 @@ static inline void __debug_switch_to_host_common(struct kvm_vcpu *vcpu)
	if (!vcpu_get_flag(vcpu, DEBUG_DIRTY))
		return;

	host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
	host_ctxt = host_data_ptr(host_ctxt);
	guest_ctxt = &vcpu->arch.ctxt;
	host_dbg = &vcpu->arch.host_debug_state.regs;
	guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);
+4 −4
Original line number Diff line number Diff line
@@ -82,7 +82,7 @@ static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)

static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu)
{
	struct kvm_cpu_context *hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
	struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt);
	u64 r_clr = 0, w_clr = 0, r_set = 0, w_set = 0, tmp;
	u64 r_val, w_val;

@@ -157,7 +157,7 @@ static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu)

static inline void __deactivate_traps_hfgxtr(struct kvm_vcpu *vcpu)
{
	struct kvm_cpu_context *hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
	struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt);

	if (!cpus_have_final_cap(ARM64_HAS_FGT))
		return;
@@ -218,7 +218,7 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)

		write_sysreg(0, pmselr_el0);

		hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
		hctxt = host_data_ptr(host_ctxt);
		ctxt_sys_reg(hctxt, PMUSERENR_EL0) = read_sysreg(pmuserenr_el0);
		write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
		vcpu_set_flag(vcpu, PMUSERENR_ON_CPU);
@@ -261,7 +261,7 @@ static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu)
	if (kvm_arm_support_pmu_v3()) {
		struct kvm_cpu_context *hctxt;

		hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
		hctxt = host_data_ptr(host_ctxt);
		write_sysreg(ctxt_sys_reg(hctxt, PMUSERENR_EL0), pmuserenr_el0);
		vcpu_clear_flag(vcpu, PMUSERENR_ON_CPU);
	}
+1 −1
Original line number Diff line number Diff line
@@ -205,7 +205,7 @@ asmlinkage void __noreturn __kvm_host_psci_cpu_entry(bool is_cpu_on)
	struct psci_boot_args *boot_args;
	struct kvm_cpu_context *host_ctxt;

	host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
	host_ctxt = host_data_ptr(host_ctxt);

	if (is_cpu_on)
		boot_args = this_cpu_ptr(&cpu_on_args);
Loading