Loading arch/arm64/Kconfig +0 −26 Original line number Diff line number Diff line Loading @@ -1165,32 +1165,6 @@ config UNMAP_KERNEL_AT_EL0 If unsure, say Y. config HARDEN_BRANCH_PREDICTOR bool "Harden the branch predictor against aliasing attacks" if EXPERT default y help Speculation attacks against some high-performance processors rely on being able to manipulate the branch predictor for a victim context by executing aliasing branches in the attacker context. Such attacks can be partially mitigated against by clearing internal branch predictor state and limiting the prediction logic in some situations. This config option will take CPU-specific actions to harden the branch predictor against aliasing attacks and may rely on specific instruction sequences or control bits being set by the system firmware. If unsure, say Y. config ARM64_SSBD bool "Speculative Store Bypass Disable" if EXPERT default y help This enables mitigation of the bypassing of previous stores by speculative loads. If unsure, say Y. config RODATA_FULL_DEFAULT_ENABLED bool "Apply r/o permissions of VM areas also to their linear aliases" default y Loading arch/arm64/include/asm/cpucaps.h +2 −2 Original line number Diff line number Diff line Loading @@ -31,13 +31,13 @@ #define ARM64_HAS_DCPOP 21 #define ARM64_SVE 22 #define ARM64_UNMAP_KERNEL_AT_EL0 23 #define ARM64_HARDEN_BRANCH_PREDICTOR 24 #define ARM64_SPECTRE_V2 24 #define ARM64_HAS_RAS_EXTN 25 #define ARM64_WORKAROUND_843419 26 #define ARM64_HAS_CACHE_IDC 27 #define ARM64_HAS_CACHE_DIC 28 #define ARM64_HW_DBM 29 #define ARM64_SSBD 30 #define ARM64_SPECTRE_V4 30 #define ARM64_MISMATCHED_CACHE_TYPE 31 #define ARM64_HAS_STAGE2_FWB 32 #define ARM64_HAS_CRC32 33 Loading arch/arm64/include/asm/cpufeature.h +0 −24 Original line number Diff line number Diff line Loading @@ -698,30 +698,6 @@ static inline bool system_supports_tlb_range(void) cpus_have_const_cap(ARM64_HAS_TLB_RANGE); } #define ARM64_BP_HARDEN_UNKNOWN -1 #define ARM64_BP_HARDEN_WA_NEEDED 0 #define ARM64_BP_HARDEN_NOT_REQUIRED 1 int get_spectre_v2_workaround_state(void); #define ARM64_SSBD_UNKNOWN -1 #define ARM64_SSBD_FORCE_DISABLE 0 #define ARM64_SSBD_KERNEL 1 #define ARM64_SSBD_FORCE_ENABLE 2 #define ARM64_SSBD_MITIGATED 3 static inline int arm64_get_ssbd_state(void) { #ifdef CONFIG_ARM64_SSBD extern int ssbd_state; return ssbd_state; #else return ARM64_SSBD_UNKNOWN; #endif } void arm64_set_ssbd_mitigation(bool state); extern int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt); static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange) Loading arch/arm64/include/asm/kvm_asm.h +0 −5 Original line number Diff line number Diff line Loading @@ -10,9 +10,6 @@ #include <asm/hyp_image.h> #include <asm/virt.h> #define VCPU_WORKAROUND_2_FLAG_SHIFT 0 #define VCPU_WORKAROUND_2_FLAG (_AC(1, UL) << VCPU_WORKAROUND_2_FLAG_SHIFT) #define ARM_EXIT_WITH_SERROR_BIT 31 #define ARM_EXCEPTION_CODE(x) ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT)) #define ARM_EXCEPTION_IS_TRAP(x) (ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_TRAP) Loading Loading @@ -132,11 +129,9 @@ extern unsigned long kvm_arm_hyp_percpu_base[NR_CPUS]; DECLARE_KVM_NVHE_SYM(__per_cpu_start); DECLARE_KVM_NVHE_SYM(__per_cpu_end); #ifdef CONFIG_KVM_INDIRECT_VECTORS extern atomic_t arm64_el2_vector_last_slot; DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs); #define __bp_harden_hyp_vecs CHOOSE_HYP_SYM(__bp_harden_hyp_vecs) #endif extern void __kvm_flush_vm_context(void); extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa, Loading arch/arm64/include/asm/kvm_emulate.h +0 −14 Original line number Diff line number Diff line Loading @@ -383,20 +383,6 @@ static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu) return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK; } static inline bool kvm_arm_get_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu) { return vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG; } static inline void kvm_arm_set_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu, bool flag) { if (flag) vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG; else vcpu->arch.workaround_flags &= ~VCPU_WORKAROUND_2_FLAG; } static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu) { if (vcpu_mode_is_32bit(vcpu)) { Loading Loading
arch/arm64/Kconfig +0 −26 Original line number Diff line number Diff line Loading @@ -1165,32 +1165,6 @@ config UNMAP_KERNEL_AT_EL0 If unsure, say Y. config HARDEN_BRANCH_PREDICTOR bool "Harden the branch predictor against aliasing attacks" if EXPERT default y help Speculation attacks against some high-performance processors rely on being able to manipulate the branch predictor for a victim context by executing aliasing branches in the attacker context. Such attacks can be partially mitigated against by clearing internal branch predictor state and limiting the prediction logic in some situations. This config option will take CPU-specific actions to harden the branch predictor against aliasing attacks and may rely on specific instruction sequences or control bits being set by the system firmware. If unsure, say Y. config ARM64_SSBD bool "Speculative Store Bypass Disable" if EXPERT default y help This enables mitigation of the bypassing of previous stores by speculative loads. If unsure, say Y. config RODATA_FULL_DEFAULT_ENABLED bool "Apply r/o permissions of VM areas also to their linear aliases" default y Loading
arch/arm64/include/asm/cpucaps.h +2 −2 Original line number Diff line number Diff line Loading @@ -31,13 +31,13 @@ #define ARM64_HAS_DCPOP 21 #define ARM64_SVE 22 #define ARM64_UNMAP_KERNEL_AT_EL0 23 #define ARM64_HARDEN_BRANCH_PREDICTOR 24 #define ARM64_SPECTRE_V2 24 #define ARM64_HAS_RAS_EXTN 25 #define ARM64_WORKAROUND_843419 26 #define ARM64_HAS_CACHE_IDC 27 #define ARM64_HAS_CACHE_DIC 28 #define ARM64_HW_DBM 29 #define ARM64_SSBD 30 #define ARM64_SPECTRE_V4 30 #define ARM64_MISMATCHED_CACHE_TYPE 31 #define ARM64_HAS_STAGE2_FWB 32 #define ARM64_HAS_CRC32 33 Loading
arch/arm64/include/asm/cpufeature.h +0 −24 Original line number Diff line number Diff line Loading @@ -698,30 +698,6 @@ static inline bool system_supports_tlb_range(void) cpus_have_const_cap(ARM64_HAS_TLB_RANGE); } #define ARM64_BP_HARDEN_UNKNOWN -1 #define ARM64_BP_HARDEN_WA_NEEDED 0 #define ARM64_BP_HARDEN_NOT_REQUIRED 1 int get_spectre_v2_workaround_state(void); #define ARM64_SSBD_UNKNOWN -1 #define ARM64_SSBD_FORCE_DISABLE 0 #define ARM64_SSBD_KERNEL 1 #define ARM64_SSBD_FORCE_ENABLE 2 #define ARM64_SSBD_MITIGATED 3 static inline int arm64_get_ssbd_state(void) { #ifdef CONFIG_ARM64_SSBD extern int ssbd_state; return ssbd_state; #else return ARM64_SSBD_UNKNOWN; #endif } void arm64_set_ssbd_mitigation(bool state); extern int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt); static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange) Loading
arch/arm64/include/asm/kvm_asm.h +0 −5 Original line number Diff line number Diff line Loading @@ -10,9 +10,6 @@ #include <asm/hyp_image.h> #include <asm/virt.h> #define VCPU_WORKAROUND_2_FLAG_SHIFT 0 #define VCPU_WORKAROUND_2_FLAG (_AC(1, UL) << VCPU_WORKAROUND_2_FLAG_SHIFT) #define ARM_EXIT_WITH_SERROR_BIT 31 #define ARM_EXCEPTION_CODE(x) ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT)) #define ARM_EXCEPTION_IS_TRAP(x) (ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_TRAP) Loading Loading @@ -132,11 +129,9 @@ extern unsigned long kvm_arm_hyp_percpu_base[NR_CPUS]; DECLARE_KVM_NVHE_SYM(__per_cpu_start); DECLARE_KVM_NVHE_SYM(__per_cpu_end); #ifdef CONFIG_KVM_INDIRECT_VECTORS extern atomic_t arm64_el2_vector_last_slot; DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs); #define __bp_harden_hyp_vecs CHOOSE_HYP_SYM(__bp_harden_hyp_vecs) #endif extern void __kvm_flush_vm_context(void); extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa, Loading
arch/arm64/include/asm/kvm_emulate.h +0 −14 Original line number Diff line number Diff line Loading @@ -383,20 +383,6 @@ static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu) return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK; } static inline bool kvm_arm_get_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu) { return vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG; } static inline void kvm_arm_set_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu, bool flag) { if (flag) vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG; else vcpu->arch.workaround_flags &= ~VCPU_WORKAROUND_2_FLAG; } static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu) { if (vcpu_mode_is_32bit(vcpu)) { Loading