Commit 1d6e31e3 authored by Yanan Wang's avatar Yanan Wang Committed by Dongxu Sun
Browse files

KVM:arm64:Add a module param to enable/disable pv_preempted dynamically

virt inclusion
category: performance
bugzilla: https://gitee.com/openeuler/kernel/issues/IBNSBL



--------------------------------

Pv_preempted can not ensure performance improvement in any scenario,
so add a module param to enable/disable pv_preempted dynamically if
we don't need it.

Signed-off-by: default avatarYanan Wang <wangyanan55@huawei.com>
Signed-off-by: default avatarDongxu Sun <sundongxu3@huawei.com>
parent 0f19fd14
Loading
Loading
Loading
Loading
+4 −1
Original line number Diff line number Diff line
@@ -412,6 +412,7 @@ struct kvm_vcpu_arch {
	/* Guest PV sched state */
	struct {
		bool pv_unhalted;
		bool preempted;
		gpa_t base;
	} pvsched;

@@ -645,12 +646,14 @@ long kvm_hypercall_pvsched_features(struct kvm_vcpu *vcpu);
void kvm_update_pvsched_preempted(struct kvm_vcpu *vcpu, u32 preempted);
long kvm_pvsched_kick_vcpu(struct kvm_vcpu *vcpu);

extern bool pv_preempted_enable;
static inline void kvm_arm_pvsched_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
{
	vcpu_arch->pvsched.base = GPA_INVALID;
	vcpu_arch->pvsched.preempted = false;
}

static inline bool kvm_arm_is_pvsched_enabled(struct kvm_vcpu_arch *vcpu_arch)
static inline bool kvm_arm_is_pvsched_valid(struct kvm_vcpu_arch *vcpu_arch)
{
	return (vcpu_arch->pvsched.base != GPA_INVALID);
}
+24 −3
Original line number Diff line number Diff line
@@ -84,6 +84,15 @@ unsigned int twedel = 0;
module_param(twedel, uint, S_IRUGO | S_IWUSR);
#endif

static const struct kernel_param_ops pv_preempted_enable_ops = {
	.set = param_set_bool,
	.get = param_get_bool,
};

bool pv_preempted_enable = true;
MODULE_PARM_DESC(pv_preempted_enable, "bool");
module_param_cb(pv_preempted_enable, &pv_preempted_enable_ops, &pv_preempted_enable, 0644);

static int vcpu_req_reload_wfi_traps(const char *val, const struct kernel_param *kp);

static const struct kernel_param_ops force_wfi_trap_ops = {
@@ -575,8 +584,20 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
	if (vcpu_has_ptrauth(vcpu))
		vcpu_ptrauth_disable(vcpu);

	if (kvm_arm_is_pvsched_enabled(&vcpu->arch))
	/*
	 * When pv_preempted is changed from enabled to disabled, preempted
	 * state will not be updated in kvm_arch_vcpu_put/load. So we must
	 * update the preempted state to 0 for every vCPU in case some vCPUs'
	 * preempted state will always be 1.
	 */
	if (kvm_arm_is_pvsched_valid(&vcpu->arch)) {
		if (pv_preempted_enable)
			kvm_update_pvsched_preempted(vcpu, 0);
		else {
			if (vcpu->arch.pvsched.preempted)
				kvm_update_pvsched_preempted(vcpu, 0);
		}
	}

#ifdef CONFIG_KVM_HISI_VIRT
	kvm_hisi_dvmbm_load(vcpu);
@@ -600,7 +621,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)

	vcpu->cpu = -1;

	if (kvm_arm_is_pvsched_enabled(&vcpu->arch))
	if (kvm_arm_is_pvsched_valid(&vcpu->arch) && pv_preempted_enable)
		kvm_update_pvsched_preempted(vcpu, 1);

#ifdef CONFIG_KVM_HISI_VIRT
+2 −0
Original line number Diff line number Diff line
@@ -34,6 +34,8 @@ void kvm_update_pvsched_preempted(struct kvm_vcpu *vcpu, u32 preempted)
	srcu_read_unlock(&kvm->srcu, idx);

	pagefault_enable();

	vcpu->arch.pvsched.preempted = !!preempted;
}

long kvm_pvsched_kick_vcpu(struct kvm_vcpu *vcpu)