Unverified Commit 85668597 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!5815 v2 Support NMI in the virtual machine

Merge Pull Request from: @ci-robot 
 
PR sync from: chenxiang <chenxiang66@hisilicon.com>
https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/ZRW2NMY5DXWIF25JSZRHM7W7XFC3RTQH/ 
From: caijian <caijian11@h-partners.com>

The patches support for nmi in the virtual machine.

https://git.kernel.org/pub/scm/linux/kernel/git/maz/arm-platforms.git/log/?h=arm64/nmi

Marc Zyngier (15):
  KVM: arm64: vgic-v3: Upgrade AP1Rn to 64bit.
  KVM: arm64: vgic-v3: Allow the NMI state to make it into the LRs
  KVM: arm64: vgic-v3: Make NMI priority RES0
  KVM: arm64: vgic-v4: Propagate the NMI state into the GICv4.1 VSGI
    configuration
  KVM: arm64: vgic-v3: Use the NMI attribute as part of the AP-list
    sorting
  KVM: arm64: vgic-v3: Add support for GIC{D,R}_INMIR registers
  KVM: arm64: vgic-v3: Add userspace selection for GICv3.3 NMI
  KVM: arm64: vgic-debug: Add the NMI field to the debug output
  KVM: arm64: Allow userspace to control ID_AA64PFR1_EL1.NMI
  KVM: arm64: Don't trap ALLINT accesses if the vcpu has FEAT_NMI
  KVM: arm64: vgic-v3: Don't inject an NMI if the vcpu doesn't have
    FEAT_NMI
  KVM: arm64: Allow GICv3.3 NMI if the host supports it
  KVM: arm64: Handle traps of ALLINT
  arm64: Decouple KVM from CONFIG_ARM64_NMI
  KVM: arm64: vgic-v3: Handle traps of ICV_NMIAR1_EL1


-- 
2.30.0
 
https://gitee.com/openeuler/kernel/issues/I97WGU 
 
Link:https://gitee.com/openeuler/kernel/pulls/5815

 

Reviewed-by: default avatarKevin Zhu <zhukeqian1@huawei.com>
Reviewed-by: default avatarZhang Jianhua <chris.zjh@huawei.com>
Signed-off-by: default avatarZheng Zengkai <zhengzengkai@huawei.com>
parents fbf23c86 3adab6c4
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -818,7 +818,8 @@ static __always_inline bool system_uses_irq_prio_masking(void)
static __always_inline bool system_uses_nmi(void)
{
	return IS_ENABLED(CONFIG_ARM64_NMI) &&
		cpus_have_const_cap(ARM64_USES_NMI);
		cpus_have_const_cap(ARM64_USES_NMI) &&
		!system_uses_irq_prio_masking();
}

static inline bool system_supports_mte(void)
+2 −0
Original line number Diff line number Diff line
@@ -209,6 +209,8 @@ struct kvm_arch {
	/* VTCR_EL2 value for this VM */
	u64    vtcr;

	u8 pfr1_nmi;

	/* Interrupt controller */
	struct vgic_dist	vgic;

+3 −0
Original line number Diff line number Diff line
@@ -300,6 +300,8 @@
#define SYS_SPSR_EL1			sys_reg(3, 0, 4, 0, 0)
#define SYS_ELR_EL1			sys_reg(3, 0, 4, 0, 1)

#define SYS_ALLINT			sys_reg(3, 0, 4, 3, 0)

#define SYS_ICC_PMR_EL1			sys_reg(3, 0, 4, 6, 0)

#define SYS_AFSR0_EL1			sys_reg(3, 0, 5, 1, 0)
@@ -932,6 +934,7 @@
#define ICH_LR_VIRTUAL_ID_MASK	((1ULL << 32) - 1)

#define ICH_LR_EOI		(1ULL << 41)
#define ICH_LR_NMI		(1ULL << 59)
#define ICH_LR_GROUP		(1ULL << 60)
#define ICH_LR_HW		(1ULL << 61)
#define ICH_LR_STATE		(3ULL << 62)
+9 −4
Original line number Diff line number Diff line
@@ -2189,20 +2189,24 @@ static bool has_gic_prio_relaxed_sync(const struct arm64_cpu_capabilities *entry
}
#endif

#ifdef CONFIG_ARM64_NMI
static bool use_nmi(const struct arm64_cpu_capabilities *entry, int scope)
{
	if (!has_cpuid_feature(entry, scope))
		return false;

	/*
	 * NMI support was not enabled in the kernel, but can still be
	 * used by guests. Let the world know.
	 *
	 * Having both real and pseudo NMIs enabled simultaneously is
	 * likely to cause confusion.  Since pseudo NMIs must be
	 * enabled with an explicit command line option, if the user
	 * has set that option on a system with real NMIs for some
	 * reason assume they know what they're doing.
	 */
	if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && enable_pseudo_nmi) {
	if (!IS_ENABLED(CONFIG_ARM64_NMI))
		pr_info("CONFIG_ARM64_NMI disabled, using NMIs for guests only\n");
	else if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && enable_pseudo_nmi) {
		pr_info("Pseudo NMI enabled, not using architected NMI\n");
		return false;
	}
@@ -2210,6 +2214,7 @@ static bool use_nmi(const struct arm64_cpu_capabilities *entry, int scope)
	return true;
}

#ifdef CONFIG_ARM64_NMI
static void nmi_enable(const struct arm64_cpu_capabilities *__unused)
{
	/*
@@ -2821,7 +2826,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
		.matches = has_cpuid_feature,
		ARM64_CPUID_FIELDS(ID_AA64MMFR2_EL1, EVT, IMP)
	},
#ifdef CONFIG_ARM64_NMI
	{
		.desc = "Non-maskable Interrupts present",
		.capability = ARM64_HAS_NMI,
@@ -2843,9 +2847,10 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
		.field_width = 4,
		.min_field_value = ID_AA64PFR1_EL1_NMI_IMP,
		.matches = use_nmi,
#ifdef CONFIG_ARM64_NMI
		.cpu_enable = nmi_enable,
	},
#endif
	},
#ifdef CONFIG_ARM64_MPAM
	{
		.desc = "Memory Partitioning And Monitoring",
+3 −0
Original line number Diff line number Diff line
@@ -192,6 +192,9 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
	/* The maximum number of VCPUs is limited by the host's GIC model */
	kvm->max_vcpus = kvm_arm_default_max_vcpus();

	if (cpus_have_const_cap(ARM64_HAS_NMI) && !static_branch_unlikely(&vgic_v3_cpuif_trap))
		kvm->arch.pfr1_nmi = ID_AA64PFR1_EL1_NMI_IMP;

	kvm_arm_init_hypercalls(kvm);

	bitmap_zero(kvm->arch.vcpu_features, KVM_VCPU_MAX_FEATURES);
Loading