Loading arch/arm64/Kconfig +1 −3 Original line number Diff line number Diff line Loading @@ -1516,7 +1516,6 @@ menu "ARMv8.3 architectural features" config ARM64_PTR_AUTH bool "Enable support for pointer authentication" default y depends on !KVM || ARM64_VHE depends on (CC_HAS_SIGN_RETURN_ADDRESS || CC_HAS_BRANCH_PROT_PAC_RET) && AS_HAS_PAC # Modern compilers insert a .note.gnu.property section note for PAC # which is only understood by binutils starting with version 2.33.1. Loading @@ -1543,8 +1542,7 @@ config ARM64_PTR_AUTH The feature is detected at runtime. If the feature is not present in hardware it will not be advertised to userspace/KVM guest nor will it be enabled. However, KVM guest also require VHE mode and hence CONFIG_ARM64_VHE=y option to use this feature. be enabled. If the feature is present on the boot CPU but not on a late CPU, then the late CPU will be parked. Also, if the boot CPU does not have Loading arch/arm64/include/asm/kvm_ptrauth.h +13 −21 Original line number Diff line number Diff line Loading @@ -61,44 +61,36 @@ /* * Both ptrauth_switch_to_guest and ptrauth_switch_to_host macros will * check for the presence of one of the cpufeature flag * ARM64_HAS_ADDRESS_AUTH_ARCH or ARM64_HAS_ADDRESS_AUTH_IMP_DEF and * check for the presence ARM64_HAS_ADDRESS_AUTH, which is defined as * (ARM64_HAS_ADDRESS_AUTH_ARCH || ARM64_HAS_ADDRESS_AUTH_IMP_DEF) and * then proceed ahead with the save/restore of Pointer Authentication * key registers. * key registers if enabled for the guest. */ .macro ptrauth_switch_to_guest g_ctxt, reg1, reg2, reg3 alternative_if ARM64_HAS_ADDRESS_AUTH_ARCH b 1000f alternative_if_not ARM64_HAS_ADDRESS_AUTH b .L__skip_switch\@ alternative_else_nop_endif alternative_if_not ARM64_HAS_ADDRESS_AUTH_IMP_DEF b 1001f alternative_else_nop_endif 1000: ldr \reg1, [\g_ctxt, #(VCPU_HCR_EL2 - VCPU_CONTEXT)] mrs \reg1, hcr_el2 and \reg1, \reg1, #(HCR_API | HCR_APK) cbz \reg1, 1001f cbz \reg1, .L__skip_switch\@ add \reg1, \g_ctxt, #CPU_APIAKEYLO_EL1 ptrauth_restore_state \reg1, \reg2, \reg3 1001: .L__skip_switch\@: .endm .macro ptrauth_switch_to_host g_ctxt, h_ctxt, reg1, reg2, reg3 alternative_if ARM64_HAS_ADDRESS_AUTH_ARCH b 2000f alternative_else_nop_endif alternative_if_not ARM64_HAS_ADDRESS_AUTH_IMP_DEF b 2001f alternative_if_not ARM64_HAS_ADDRESS_AUTH b .L__skip_switch\@ alternative_else_nop_endif 2000: ldr \reg1, [\g_ctxt, #(VCPU_HCR_EL2 - VCPU_CONTEXT)] mrs \reg1, hcr_el2 and \reg1, \reg1, #(HCR_API | HCR_APK) cbz \reg1, 2001f cbz \reg1, .L__skip_switch\@ add \reg1, \g_ctxt, #CPU_APIAKEYLO_EL1 ptrauth_save_state \reg1, \reg2, \reg3 add \reg1, \h_ctxt, #CPU_APIAKEYLO_EL1 ptrauth_restore_state \reg1, \reg2, \reg3 isb 2001: .L__skip_switch\@: .endm #else /* !CONFIG_ARM64_PTR_AUTH */ Loading arch/arm64/kvm/hyp/nvhe/hyp-init.S +5 −0 Original line number Diff line number Diff line Loading @@ -104,6 +104,11 @@ alternative_else_nop_endif */ mov_q x4, (SCTLR_EL2_RES1 | (SCTLR_ELx_FLAGS & ~SCTLR_ELx_A)) CPU_BE( orr x4, x4, #SCTLR_ELx_EE) alternative_if ARM64_HAS_ADDRESS_AUTH mov_q x5, (SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | \ SCTLR_ELx_ENDA | SCTLR_ELx_ENDB) orr x4, x4, x5 alternative_else_nop_endif msr sctlr_el2, x4 isb Loading arch/arm64/kvm/reset.c +10 −11 Original line number Diff line number Diff line Loading @@ -42,6 +42,11 @@ static u32 kvm_ipa_limit; #define VCPU_RESET_PSTATE_SVC (PSR_AA32_MODE_SVC | PSR_AA32_A_BIT | \ PSR_AA32_I_BIT | PSR_AA32_F_BIT) static bool system_has_full_ptr_auth(void) { return system_supports_address_auth() && system_supports_generic_auth(); } /** * kvm_arch_vm_ioctl_check_extension * Loading Loading @@ -80,8 +85,7 @@ int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext) break; case KVM_CAP_ARM_PTRAUTH_ADDRESS: case KVM_CAP_ARM_PTRAUTH_GENERIC: r = has_vhe() && system_supports_address_auth() && system_supports_generic_auth(); r = system_has_full_ptr_auth(); break; default: r = 0; Loading Loading @@ -205,19 +209,14 @@ static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu) static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu) { /* Support ptrauth only if the system supports these capabilities. */ if (!has_vhe()) return -EINVAL; if (!system_supports_address_auth() || !system_supports_generic_auth()) return -EINVAL; /* * For now make sure that both address/generic pointer authentication * features are requested by the userspace together. * features are requested by the userspace together and the system * supports these capabilities. */ if (!test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) || !test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features)) !test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features) || !system_has_full_ptr_auth()) return -EINVAL; vcpu->arch.flags |= KVM_ARM64_GUEST_HAS_PTRAUTH; Loading Loading
arch/arm64/Kconfig +1 −3 Original line number Diff line number Diff line Loading @@ -1516,7 +1516,6 @@ menu "ARMv8.3 architectural features" config ARM64_PTR_AUTH bool "Enable support for pointer authentication" default y depends on !KVM || ARM64_VHE depends on (CC_HAS_SIGN_RETURN_ADDRESS || CC_HAS_BRANCH_PROT_PAC_RET) && AS_HAS_PAC # Modern compilers insert a .note.gnu.property section note for PAC # which is only understood by binutils starting with version 2.33.1. Loading @@ -1543,8 +1542,7 @@ config ARM64_PTR_AUTH The feature is detected at runtime. If the feature is not present in hardware it will not be advertised to userspace/KVM guest nor will it be enabled. However, KVM guest also require VHE mode and hence CONFIG_ARM64_VHE=y option to use this feature. be enabled. If the feature is present on the boot CPU but not on a late CPU, then the late CPU will be parked. Also, if the boot CPU does not have Loading
arch/arm64/include/asm/kvm_ptrauth.h +13 −21 Original line number Diff line number Diff line Loading @@ -61,44 +61,36 @@ /* * Both ptrauth_switch_to_guest and ptrauth_switch_to_host macros will * check for the presence of one of the cpufeature flag * ARM64_HAS_ADDRESS_AUTH_ARCH or ARM64_HAS_ADDRESS_AUTH_IMP_DEF and * check for the presence ARM64_HAS_ADDRESS_AUTH, which is defined as * (ARM64_HAS_ADDRESS_AUTH_ARCH || ARM64_HAS_ADDRESS_AUTH_IMP_DEF) and * then proceed ahead with the save/restore of Pointer Authentication * key registers. * key registers if enabled for the guest. */ .macro ptrauth_switch_to_guest g_ctxt, reg1, reg2, reg3 alternative_if ARM64_HAS_ADDRESS_AUTH_ARCH b 1000f alternative_if_not ARM64_HAS_ADDRESS_AUTH b .L__skip_switch\@ alternative_else_nop_endif alternative_if_not ARM64_HAS_ADDRESS_AUTH_IMP_DEF b 1001f alternative_else_nop_endif 1000: ldr \reg1, [\g_ctxt, #(VCPU_HCR_EL2 - VCPU_CONTEXT)] mrs \reg1, hcr_el2 and \reg1, \reg1, #(HCR_API | HCR_APK) cbz \reg1, 1001f cbz \reg1, .L__skip_switch\@ add \reg1, \g_ctxt, #CPU_APIAKEYLO_EL1 ptrauth_restore_state \reg1, \reg2, \reg3 1001: .L__skip_switch\@: .endm .macro ptrauth_switch_to_host g_ctxt, h_ctxt, reg1, reg2, reg3 alternative_if ARM64_HAS_ADDRESS_AUTH_ARCH b 2000f alternative_else_nop_endif alternative_if_not ARM64_HAS_ADDRESS_AUTH_IMP_DEF b 2001f alternative_if_not ARM64_HAS_ADDRESS_AUTH b .L__skip_switch\@ alternative_else_nop_endif 2000: ldr \reg1, [\g_ctxt, #(VCPU_HCR_EL2 - VCPU_CONTEXT)] mrs \reg1, hcr_el2 and \reg1, \reg1, #(HCR_API | HCR_APK) cbz \reg1, 2001f cbz \reg1, .L__skip_switch\@ add \reg1, \g_ctxt, #CPU_APIAKEYLO_EL1 ptrauth_save_state \reg1, \reg2, \reg3 add \reg1, \h_ctxt, #CPU_APIAKEYLO_EL1 ptrauth_restore_state \reg1, \reg2, \reg3 isb 2001: .L__skip_switch\@: .endm #else /* !CONFIG_ARM64_PTR_AUTH */ Loading
arch/arm64/kvm/hyp/nvhe/hyp-init.S +5 −0 Original line number Diff line number Diff line Loading @@ -104,6 +104,11 @@ alternative_else_nop_endif */ mov_q x4, (SCTLR_EL2_RES1 | (SCTLR_ELx_FLAGS & ~SCTLR_ELx_A)) CPU_BE( orr x4, x4, #SCTLR_ELx_EE) alternative_if ARM64_HAS_ADDRESS_AUTH mov_q x5, (SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | \ SCTLR_ELx_ENDA | SCTLR_ELx_ENDB) orr x4, x4, x5 alternative_else_nop_endif msr sctlr_el2, x4 isb Loading
arch/arm64/kvm/reset.c +10 −11 Original line number Diff line number Diff line Loading @@ -42,6 +42,11 @@ static u32 kvm_ipa_limit; #define VCPU_RESET_PSTATE_SVC (PSR_AA32_MODE_SVC | PSR_AA32_A_BIT | \ PSR_AA32_I_BIT | PSR_AA32_F_BIT) static bool system_has_full_ptr_auth(void) { return system_supports_address_auth() && system_supports_generic_auth(); } /** * kvm_arch_vm_ioctl_check_extension * Loading Loading @@ -80,8 +85,7 @@ int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext) break; case KVM_CAP_ARM_PTRAUTH_ADDRESS: case KVM_CAP_ARM_PTRAUTH_GENERIC: r = has_vhe() && system_supports_address_auth() && system_supports_generic_auth(); r = system_has_full_ptr_auth(); break; default: r = 0; Loading Loading @@ -205,19 +209,14 @@ static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu) static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu) { /* Support ptrauth only if the system supports these capabilities. */ if (!has_vhe()) return -EINVAL; if (!system_supports_address_auth() || !system_supports_generic_auth()) return -EINVAL; /* * For now make sure that both address/generic pointer authentication * features are requested by the userspace together. * features are requested by the userspace together and the system * supports these capabilities. */ if (!test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) || !test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features)) !test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features) || !system_has_full_ptr_auth()) return -EINVAL; vcpu->arch.flags |= KVM_ARM64_GUEST_HAS_PTRAUTH; Loading