Commit 63042c58 authored by Zengruan Ye's avatar Zengruan Ye Committed by Zheng Zengkai
Browse files

KVM: arm64: Add interface to support vCPU preempted check



virt inclusion
category: feature
bugzilla: 47624
CVE: NA

--------------------------------

This is to fix some lock holder preemption issues. Some other locks
implementation do a spin loop before acquiring the lock itself.
Currently kernel has an interface of bool vcpu_is_preempted(int cpu). It
takes the CPU as parameter and return true if the CPU is preempted.
Then kernel can break the spin loops upon the retval of vcpu_is_preempted.

As kernel has used this interface, So lets support it.

Signed-off-by: default avatarZengruan Ye <yezengruan@huawei.com>
Reviewed-by: default avatarZhanghailiang <zhang.zhanghailiang@huawei.com>
Signed-off-by: default avatarZheng Zengkai <zhengzengkai@huawei.com>
parent 76732c97
Loading
Loading
Loading
Loading
+11 −0
Original line number Diff line number Diff line
@@ -11,8 +11,13 @@ struct pv_time_ops {
	unsigned long long (*steal_clock)(int cpu);
};

struct pv_sched_ops {
	bool (*vcpu_is_preempted)(int cpu);
};

struct paravirt_patch_template {
	struct pv_time_ops time;
	struct pv_sched_ops sched;
};

extern struct paravirt_patch_template pv_ops;
@@ -24,6 +29,12 @@ static inline u64 paravirt_steal_clock(int cpu)

int __init pv_time_init(void);

__visible bool __native_vcpu_is_preempted(int cpu);
static inline bool pv_vcpu_is_preempted(int cpu)
{
	return pv_ops.sched.vcpu_is_preempted(cpu);
}

#else

#define pv_time_init() do {} while (0)
+10 −0
Original line number Diff line number Diff line
@@ -7,6 +7,7 @@

#include <asm/qrwlock.h>
#include <asm/qspinlock.h>
#include <asm/paravirt.h>

/* See include/linux/spinlock.h */
#define smp_mb__after_spinlock()	smp_mb()
@@ -19,9 +20,18 @@
 * https://lore.kernel.org/lkml/20200110100612.GC2827@hirez.programming.kicks-ass.net
 */
#define vcpu_is_preempted vcpu_is_preempted
#ifdef CONFIG_PARAVIRT
static inline bool vcpu_is_preempted(int cpu)
{
	return pv_vcpu_is_preempted(cpu);
}

#else

static inline bool vcpu_is_preempted(int cpu)
{
	return false;
}
#endif /* CONFIG_PARAVIRT */

#endif /* __ASM_SPINLOCK_H */
+1 −1
Original line number Diff line number Diff line
@@ -49,7 +49,7 @@ obj-$(CONFIG_ARMV8_DEPRECATED) += armv8_deprecated.o
obj-$(CONFIG_ACPI)			+= acpi.o
obj-$(CONFIG_ACPI_NUMA)			+= acpi_numa.o
obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL)	+= acpi_parking_protocol.o
obj-$(CONFIG_PARAVIRT)			+= paravirt.o
obj-$(CONFIG_PARAVIRT)			+= paravirt.o paravirt-spinlocks.o
obj-$(CONFIG_RANDOMIZE_BASE)		+= kaslr.o
obj-$(CONFIG_HIBERNATION)		+= hibernate.o hibernate-asm.o
obj-$(CONFIG_KEXEC_CORE)		+= machine_kexec.o relocate_kernel.o	\
+13 −0
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright(c) 2019 Huawei Technologies Co., Ltd
 * Author: Zengruan Ye <yezengruan@huawei.com>
 */

#include <linux/spinlock.h>
#include <asm/paravirt.h>

__visible bool __native_vcpu_is_preempted(int cpu)
{
	return false;
}
+3 −1
Original line number Diff line number Diff line
@@ -26,7 +26,9 @@
struct static_key paravirt_steal_enabled;
struct static_key paravirt_steal_rq_enabled;

struct paravirt_patch_template pv_ops;
struct paravirt_patch_template pv_ops = {
	.sched.vcpu_is_preempted		= __native_vcpu_is_preempted,
};
EXPORT_SYMBOL_GPL(pv_ops);

struct pv_time_stolen_time_region {