Loading arch/arm64/include/asm/cpufeature.h +5 −0 Original line number Diff line number Diff line Loading @@ -699,6 +699,11 @@ static inline bool system_supports_generic_auth(void) cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH); } static inline bool system_has_full_ptr_auth(void) { return system_supports_address_auth() && system_supports_generic_auth(); } static __always_inline bool system_uses_irq_prio_masking(void) { return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && Loading arch/arm64/include/asm/kvm_host.h +3 −2 Original line number Diff line number Diff line Loading @@ -58,8 +58,6 @@ int kvm_arm_init_sve(void); int __attribute_const__ kvm_target_cpu(void); int kvm_reset_vcpu(struct kvm_vcpu *vcpu); void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu); int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext); void __extended_idmap_trampoline(phys_addr_t boot_pgd, phys_addr_t idmap_start); struct kvm_vmid { /* The VMID generation used for the virt. memory system */ Loading Loading @@ -89,6 +87,9 @@ struct kvm_s2_mmu { struct kvm *kvm; }; struct kvm_arch_memory_slot { }; struct kvm_arch { struct kvm_s2_mmu mmu; Loading arch/arm64/include/uapi/asm/kvm.h +0 −3 Original line number Diff line number Diff line Loading @@ -156,9 +156,6 @@ struct kvm_sync_regs { __u64 device_irq_level; }; struct kvm_arch_memory_slot { }; /* * PMU filter structure. Describe a range of events with a particular * action. To be used with KVM_ARM_VCPU_PMU_V3_FILTER. Loading arch/arm64/kvm/arm.c +29 −2 Original line number Diff line number Diff line Loading @@ -197,6 +197,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_ARM_IRQ_LINE_LAYOUT_2: case KVM_CAP_ARM_NISV_TO_USER: case KVM_CAP_ARM_INJECT_EXT_DABT: case KVM_CAP_SET_GUEST_DEBUG: case KVM_CAP_VCPU_ATTRIBUTES: r = 1; break; case KVM_CAP_ARM_SET_DEVICE_ADDR: Loading Loading @@ -228,10 +230,35 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_STEAL_TIME: r = kvm_arm_pvtime_supported(); break; default: r = kvm_arch_vm_ioctl_check_extension(kvm, ext); case KVM_CAP_ARM_EL1_32BIT: r = cpus_have_const_cap(ARM64_HAS_32BIT_EL1); break; case KVM_CAP_GUEST_DEBUG_HW_BPS: r = get_num_brps(); break; case KVM_CAP_GUEST_DEBUG_HW_WPS: r = get_num_wrps(); break; case KVM_CAP_ARM_PMU_V3: r = kvm_arm_support_pmu_v3(); break; case KVM_CAP_ARM_INJECT_SERROR_ESR: r = cpus_have_const_cap(ARM64_HAS_RAS_EXTN); break; case KVM_CAP_ARM_VM_IPA_SIZE: r = get_kvm_ipa_limit(); break; case KVM_CAP_ARM_SVE: r = system_supports_sve(); break; case KVM_CAP_ARM_PTRAUTH_ADDRESS: case KVM_CAP_ARM_PTRAUTH_GENERIC: r = system_has_full_ptr_auth(); break; default: r = 0; } return r; } Loading arch/arm64/kvm/reset.c +0 −52 Original line number Diff line number Diff line Loading @@ -41,58 +41,6 @@ static u32 kvm_ipa_limit; #define VCPU_RESET_PSTATE_SVC (PSR_AA32_MODE_SVC | PSR_AA32_A_BIT | \ PSR_AA32_I_BIT | PSR_AA32_F_BIT) static bool system_has_full_ptr_auth(void) { return system_supports_address_auth() && system_supports_generic_auth(); } /** * kvm_arch_vm_ioctl_check_extension * * We currently assume that the number of HW registers is uniform * across all CPUs (see cpuinfo_sanity_check). */ int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext) { int r; switch (ext) { case KVM_CAP_ARM_EL1_32BIT: r = cpus_have_const_cap(ARM64_HAS_32BIT_EL1); break; case KVM_CAP_GUEST_DEBUG_HW_BPS: r = get_num_brps(); break; case KVM_CAP_GUEST_DEBUG_HW_WPS: r = get_num_wrps(); break; case KVM_CAP_ARM_PMU_V3: r = kvm_arm_support_pmu_v3(); break; case KVM_CAP_ARM_INJECT_SERROR_ESR: r = cpus_have_const_cap(ARM64_HAS_RAS_EXTN); break; case KVM_CAP_SET_GUEST_DEBUG: case KVM_CAP_VCPU_ATTRIBUTES: r = 1; break; case KVM_CAP_ARM_VM_IPA_SIZE: r = kvm_ipa_limit; break; case KVM_CAP_ARM_SVE: r = system_supports_sve(); break; case KVM_CAP_ARM_PTRAUTH_ADDRESS: case KVM_CAP_ARM_PTRAUTH_GENERIC: r = system_has_full_ptr_auth(); break; default: r = 0; } return r; } unsigned int kvm_sve_max_vl; int kvm_arm_init_sve(void) Loading Loading
arch/arm64/include/asm/cpufeature.h +5 −0 Original line number Diff line number Diff line Loading @@ -699,6 +699,11 @@ static inline bool system_supports_generic_auth(void) cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH); } static inline bool system_has_full_ptr_auth(void) { return system_supports_address_auth() && system_supports_generic_auth(); } static __always_inline bool system_uses_irq_prio_masking(void) { return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && Loading
arch/arm64/include/asm/kvm_host.h +3 −2 Original line number Diff line number Diff line Loading @@ -58,8 +58,6 @@ int kvm_arm_init_sve(void); int __attribute_const__ kvm_target_cpu(void); int kvm_reset_vcpu(struct kvm_vcpu *vcpu); void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu); int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext); void __extended_idmap_trampoline(phys_addr_t boot_pgd, phys_addr_t idmap_start); struct kvm_vmid { /* The VMID generation used for the virt. memory system */ Loading Loading @@ -89,6 +87,9 @@ struct kvm_s2_mmu { struct kvm *kvm; }; struct kvm_arch_memory_slot { }; struct kvm_arch { struct kvm_s2_mmu mmu; Loading
arch/arm64/include/uapi/asm/kvm.h +0 −3 Original line number Diff line number Diff line Loading @@ -156,9 +156,6 @@ struct kvm_sync_regs { __u64 device_irq_level; }; struct kvm_arch_memory_slot { }; /* * PMU filter structure. Describe a range of events with a particular * action. To be used with KVM_ARM_VCPU_PMU_V3_FILTER. Loading
arch/arm64/kvm/arm.c +29 −2 Original line number Diff line number Diff line Loading @@ -197,6 +197,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_ARM_IRQ_LINE_LAYOUT_2: case KVM_CAP_ARM_NISV_TO_USER: case KVM_CAP_ARM_INJECT_EXT_DABT: case KVM_CAP_SET_GUEST_DEBUG: case KVM_CAP_VCPU_ATTRIBUTES: r = 1; break; case KVM_CAP_ARM_SET_DEVICE_ADDR: Loading Loading @@ -228,10 +230,35 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_STEAL_TIME: r = kvm_arm_pvtime_supported(); break; default: r = kvm_arch_vm_ioctl_check_extension(kvm, ext); case KVM_CAP_ARM_EL1_32BIT: r = cpus_have_const_cap(ARM64_HAS_32BIT_EL1); break; case KVM_CAP_GUEST_DEBUG_HW_BPS: r = get_num_brps(); break; case KVM_CAP_GUEST_DEBUG_HW_WPS: r = get_num_wrps(); break; case KVM_CAP_ARM_PMU_V3: r = kvm_arm_support_pmu_v3(); break; case KVM_CAP_ARM_INJECT_SERROR_ESR: r = cpus_have_const_cap(ARM64_HAS_RAS_EXTN); break; case KVM_CAP_ARM_VM_IPA_SIZE: r = get_kvm_ipa_limit(); break; case KVM_CAP_ARM_SVE: r = system_supports_sve(); break; case KVM_CAP_ARM_PTRAUTH_ADDRESS: case KVM_CAP_ARM_PTRAUTH_GENERIC: r = system_has_full_ptr_auth(); break; default: r = 0; } return r; } Loading
arch/arm64/kvm/reset.c +0 −52 Original line number Diff line number Diff line Loading @@ -41,58 +41,6 @@ static u32 kvm_ipa_limit; #define VCPU_RESET_PSTATE_SVC (PSR_AA32_MODE_SVC | PSR_AA32_A_BIT | \ PSR_AA32_I_BIT | PSR_AA32_F_BIT) static bool system_has_full_ptr_auth(void) { return system_supports_address_auth() && system_supports_generic_auth(); } /** * kvm_arch_vm_ioctl_check_extension * * We currently assume that the number of HW registers is uniform * across all CPUs (see cpuinfo_sanity_check). */ int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext) { int r; switch (ext) { case KVM_CAP_ARM_EL1_32BIT: r = cpus_have_const_cap(ARM64_HAS_32BIT_EL1); break; case KVM_CAP_GUEST_DEBUG_HW_BPS: r = get_num_brps(); break; case KVM_CAP_GUEST_DEBUG_HW_WPS: r = get_num_wrps(); break; case KVM_CAP_ARM_PMU_V3: r = kvm_arm_support_pmu_v3(); break; case KVM_CAP_ARM_INJECT_SERROR_ESR: r = cpus_have_const_cap(ARM64_HAS_RAS_EXTN); break; case KVM_CAP_SET_GUEST_DEBUG: case KVM_CAP_VCPU_ATTRIBUTES: r = 1; break; case KVM_CAP_ARM_VM_IPA_SIZE: r = kvm_ipa_limit; break; case KVM_CAP_ARM_SVE: r = system_supports_sve(); break; case KVM_CAP_ARM_PTRAUTH_ADDRESS: case KVM_CAP_ARM_PTRAUTH_GENERIC: r = system_has_full_ptr_auth(); break; default: r = 0; } return r; } unsigned int kvm_sve_max_vl; int kvm_arm_init_sve(void) Loading