Commit 76732c97 authored by Zengruan Ye's avatar Zengruan Ye Committed by Zheng Zengkai
Browse files

KVM: arm64: Support pvsched preempted via shared structure



virt inclusion
category: feature
bugzilla: 47624
CVE: NA

--------------------------------

Implement the service call for configuring a shared structure between a
vCPU and the hypervisor in which the hypervisor can tell the vCPU that is
running or not.

Signed-off-by: default avatarZengruan Ye <yezengruan@huawei.com>
Reviewed-by: default avatarZhanghailiang <zhang.zhanghailiang@huawei.com>
Signed-off-by: default avatarZheng Zengkai <zhengzengkai@huawei.com>
parent a0b95bdf
Loading
Loading
Loading
Loading
+16 −0
Original line number Original line Diff line number Diff line
@@ -381,6 +381,11 @@ struct kvm_vcpu_arch {
		u64 last_steal;
		u64 last_steal;
		gpa_t base;
		gpa_t base;
	} steal;
	} steal;

	/* Guest PV sched state */
	struct {
		gpa_t base;
	} pvsched;
};
};


/* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
/* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
@@ -590,6 +595,17 @@ static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch)
}
}


long kvm_hypercall_pvsched_features(struct kvm_vcpu *vcpu);
long kvm_hypercall_pvsched_features(struct kvm_vcpu *vcpu);
void kvm_update_pvsched_preempted(struct kvm_vcpu *vcpu, u32 preempted);

static inline void kvm_arm_pvsched_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
{
	vcpu_arch->pvsched.base = GPA_INVALID;
}

static inline bool kvm_arm_is_pvsched_enabled(struct kvm_vcpu_arch *vcpu_arch)
{
	return (vcpu_arch->pvsched.base != GPA_INVALID);
}


void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);
void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);


+8 −0
Original line number Original line Diff line number Diff line
@@ -288,6 +288,8 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)


	kvm_arm_pvtime_vcpu_init(&vcpu->arch);
	kvm_arm_pvtime_vcpu_init(&vcpu->arch);


	kvm_arm_pvsched_vcpu_init(&vcpu->arch);

	vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu;
	vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu;


	err = kvm_vgic_vcpu_init(vcpu);
	err = kvm_vgic_vcpu_init(vcpu);
@@ -398,6 +400,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
	update_steal_time(vcpu);
	update_steal_time(vcpu);
	if (vcpu_has_ptrauth(vcpu))
	if (vcpu_has_ptrauth(vcpu))
		vcpu_ptrauth_disable(vcpu);
		vcpu_ptrauth_disable(vcpu);

	if (kvm_arm_is_pvsched_enabled(&vcpu->arch))
		kvm_update_pvsched_preempted(vcpu, 0);
}
}


void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
@@ -410,6 +415,9 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
	kvm_vcpu_pmu_restore_host(vcpu);
	kvm_vcpu_pmu_restore_host(vcpu);


	vcpu->cpu = -1;
	vcpu->cpu = -1;

	if (kvm_arm_is_pvsched_enabled(&vcpu->arch))
		kvm_update_pvsched_preempted(vcpu, 1);
}
}


static void vcpu_power_off(struct kvm_vcpu *vcpu)
static void vcpu_power_off(struct kvm_vcpu *vcpu)
+11 −0
Original line number Original line Diff line number Diff line
@@ -77,6 +77,17 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
	case ARM_SMCCC_HV_PV_SCHED_FEATURES:
	case ARM_SMCCC_HV_PV_SCHED_FEATURES:
		val = kvm_hypercall_pvsched_features(vcpu);
		val = kvm_hypercall_pvsched_features(vcpu);
		break;
		break;
	case ARM_SMCCC_HV_PV_SCHED_IPA_INIT:
		gpa = smccc_get_arg1(vcpu);
		if (gpa != GPA_INVALID) {
			vcpu->arch.pvsched.base = gpa;
			val = SMCCC_RET_SUCCESS;
		}
		break;
	case ARM_SMCCC_HV_PV_SCHED_IPA_RELEASE:
		vcpu->arch.pvsched.base = GPA_INVALID;
		val = SMCCC_RET_SUCCESS;
		break;
	default:
	default:
		return kvm_psci_call(vcpu);
		return kvm_psci_call(vcpu);
	}
	}
+28 −0
Original line number Original line Diff line number Diff line
@@ -5,9 +5,35 @@
 */
 */


#include <linux/arm-smccc.h>
#include <linux/arm-smccc.h>
#include <linux/kvm_host.h>

#include <asm/pvsched-abi.h>


#include <kvm/arm_hypercalls.h>
#include <kvm/arm_hypercalls.h>


void kvm_update_pvsched_preempted(struct kvm_vcpu *vcpu, u32 preempted)
{
	struct kvm *kvm = vcpu->kvm;
	u64 base = vcpu->arch.pvsched.base;
	u64 offset = offsetof(struct pvsched_vcpu_state, preempted);
	int idx;

	if (base == GPA_INVALID)
		return;

	/*
	 * This function is called from atomic context, so we need to
	 * disable page faults.
	 */
	pagefault_disable();

	idx = srcu_read_lock(&kvm->srcu);
	kvm_put_guest(kvm, base + offset, cpu_to_le32(preempted));
	srcu_read_unlock(&kvm->srcu, idx);

	pagefault_enable();
}

long kvm_hypercall_pvsched_features(struct kvm_vcpu *vcpu)
long kvm_hypercall_pvsched_features(struct kvm_vcpu *vcpu)
{
{
	u32 feature = smccc_get_arg1(vcpu);
	u32 feature = smccc_get_arg1(vcpu);
@@ -15,6 +41,8 @@ long kvm_hypercall_pvsched_features(struct kvm_vcpu *vcpu)


	switch (feature) {
	switch (feature) {
	case ARM_SMCCC_HV_PV_SCHED_FEATURES:
	case ARM_SMCCC_HV_PV_SCHED_FEATURES:
	case ARM_SMCCC_HV_PV_SCHED_IPA_INIT:
	case ARM_SMCCC_HV_PV_SCHED_IPA_RELEASE:
		val = SMCCC_RET_SUCCESS;
		val = SMCCC_RET_SUCCESS;
		break;
		break;
	}
	}