Unverified Commit 12698b5b authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!4785 Support PV-sched feature

Merge Pull Request from: @ci-robot 
 
PR sync from: lishusen <lishusen2@huawei.com>
https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/IAHHTVZYSZUSQRUZH3YQN2MAQAMVERIU/ 
PV sched supports the vCPU preemption check to enhance lock performance on
overcommitted hosts

Zengruan Ye (5):
  KVM: arm64: Document PV-sched interface
  KVM: arm64: Implement PV_SCHED_FEATURES call
  KVM: arm64: Support pvsched preempted via shared structure
  KVM: arm64: Add interface to support vCPU preempted check
  KVM: arm64: Support the vCPU preemption check


-- 
2.33.0
 
https://gitee.com/openeuler/kernel/issues/I8WMFU 
 
Link:https://gitee.com/openeuler/kernel/pulls/4785

 

Reviewed-by: default avatarZenghui Yu <yuzenghui@huawei.com>
Reviewed-by: default avatarZhang Jianhua <chris.zjh@huawei.com>
Signed-off-by: default avatarZheng Zengkai <zhengzengkai@huawei.com>
parents 50357a03 858bccb7
Loading
Loading
Loading
Loading
+58 −0
Original line number Diff line number Diff line
.. SPDX-License-Identifier: GPL-2.0

Paravirtualized sched support for arm64
=======================================

KVM/arm64 provides some hypervisor service calls to support a paravirtualized
sched.

Some SMCCC compatible hypercalls are defined:

* PV_SCHED_FEATURES:          0xC5000090
* PV_SCHED_IPA_INIT:          0xC5000091
* PV_SCHED_IPA_RELEASE:       0xC5000092

The existence of the PV_SCHED hypercall should be probed using the SMCCC 1.1
ARCH_FEATURES mechanism before calling it.

PV_SCHED_FEATURES
    ============= ========    ==========
    Function ID:  (uint32)    0xC5000090
    PV_call_id:   (uint32)    The function to query for support.
    Return value: (int64)     NOT_SUPPORTED (-1) or SUCCESS (0) if the relevant
                              PV-sched feature is supported by the hypervisor.
    ============= ========    ==========

PV_SCHED_IPA_INIT
    ============= ========    ==========
    Function ID:  (uint32)    0xC5000091
    Return value: (int64)     NOT_SUPPORTED (-1) or SUCCESS (0) if the IPA of
                              this vCPU's PV data structure is shared to the
                              hypervisor.
    ============= ========    ==========

PV_SCHED_IPA_RELEASE
    ============= ========    ==========
    Function ID:  (uint32)    0xC5000092
    Return value: (int64)     NOT_SUPPORTED (-1) or SUCCESS (0) if the IPA of
                              this vCPU's PV data structure is released.
    ============= ========    ==========

PV sched state
--------------

The structure pointed to by the PV_SCHED_IPA hypercall is as follows:

+-----------+-------------+-------------+-----------------------------------+
| Field     | Byte Length | Byte Offset | Description                       |
+===========+=============+=============+===================================+
| preempted |      4      |      0      | Indicates that the vCPU that owns |
|           |             |             | this struct is running or not.    |
|           |             |             | Non-zero values mean the vCPU has |
|           |             |             | been preempted. Zero means the    |
|           |             |             | vCPU is not preempted.            |
+-----------+-------------+-------------+-----------------------------------+

The preempted field will be updated to 0 by the hypervisor prior to scheduling
a vCPU. When the vCPU is scheduled out, the preempted field will be updated
to 1 by the hypervisor.
+11 −0
Original line number Diff line number Diff line
@@ -1549,6 +1549,17 @@ config PARAVIRT
	  under a hypervisor, potentially improving performance significantly
	  over full virtualization.

config PARAVIRT_SCHED
	bool "Paravirtualization layer for sched"
	depends on PARAVIRT
	help
	  This supports the vCPU preemption check to enhance lock performance on
	  overcommitted hosts (more runnable vCPUs than physical CPUs in the
	  system) as doing busy waits for preempted vCPUs will hurt system
	  performance far worse than early yielding.

	  If you are unsure how to answer this question, answer Y.

config PARAVIRT_TIME_ACCOUNTING
	bool "Paravirtual steal time accounting"
	select PARAVIRT
+1 −0
Original line number Diff line number Diff line
@@ -469,6 +469,7 @@ CONFIG_SCHED_HRTICK=y
CONFIG_ARCH_SPARSEMEM_ENABLE=y
CONFIG_HW_PERF_EVENTS=y
CONFIG_PARAVIRT=y
CONFIG_PARAVIRT_SCHED=y
CONFIG_PARAVIRT_TIME_ACCOUNTING=y
CONFIG_ARCH_SUPPORTS_KEXEC=y
CONFIG_ARCH_SUPPORTS_KEXEC_FILE=y
+34 −0
Original line number Diff line number Diff line
@@ -596,6 +596,13 @@ struct kvm_vcpu_arch {
		gpa_t base;
	} steal;

#ifdef CONFIG_PARAVIRT_SCHED
	/* Guest PV sched state */
	struct {
		gpa_t base;
	} pvsched;
#endif

	/* Per-vcpu CCSIDR override or NULL */
	u32 *ccsidr;

@@ -1072,6 +1079,33 @@ static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch)
	return (vcpu_arch->steal.base != INVALID_GPA);
}

#ifdef CONFIG_PARAVIRT_SCHED
long kvm_hypercall_pvsched_features(struct kvm_vcpu *vcpu);
void kvm_update_pvsched_preempted(struct kvm_vcpu *vcpu, u32 preempted);

static inline void kvm_arm_pvsched_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
{
	vcpu_arch->pvsched.base = INVALID_GPA;
}

static inline bool kvm_arm_is_pvsched_enabled(struct kvm_vcpu_arch *vcpu_arch)
{
	return (vcpu_arch->pvsched.base != INVALID_GPA);
}
#else
static inline long kvm_hypercall_pvsched_features(struct kvm_vcpu *vcpu)
{
	return 0;
}
static inline void kvm_update_pvsched_preempted(struct kvm_vcpu *vcpu,
						u32 preempted) {}
static inline void kvm_arm_pvsched_vcpu_init(struct kvm_vcpu_arch *vcpu_arch) {}
static inline bool kvm_arm_is_pvsched_enabled(struct kvm_vcpu_arch *vcpu_arch)
{
	return false;
}
#endif

void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);

struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
+13 −0
Original line number Diff line number Diff line
@@ -20,9 +20,22 @@ static inline u64 paravirt_steal_clock(int cpu)

int __init pv_time_init(void);

#ifdef CONFIG_PARAVIRT_SCHED
int __init pv_sched_init(void);

__visible bool __native_vcpu_is_preempted(int cpu);
DECLARE_STATIC_CALL(pv_vcpu_preempted, __native_vcpu_is_preempted);

static inline bool pv_vcpu_is_preempted(int cpu)
{
	return static_call(pv_vcpu_preempted)(cpu);
}
#endif /* CONFIG_PARAVIRT_SCHED */

#else

#define pv_time_init() do {} while (0)
#define pv_sched_init() do {} while (0)

#endif // CONFIG_PARAVIRT

Loading