Loading arch/arm64/include/asm/kvm_asm.h +30 −3 Original line number Diff line number Diff line Loading @@ -81,12 +81,39 @@ extern u32 __kvm_get_mdcr_el2(void); extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ]; /* Home-grown __this_cpu_{ptr,read} variants that always work at HYP */ /* * Obtain the PC-relative address of a kernel symbol * s: symbol * * The goal of this macro is to return a symbol's address based on a * PC-relative computation, as opposed to a loading the VA from a * constant pool or something similar. This works well for HYP, as an * absolute VA is guaranteed to be wrong. Only use this if trying to * obtain the address of a symbol (i.e. not something you obtained by * following a pointer). */ #define hyp_symbol_addr(s) \ ({ \ typeof(s) *addr; \ asm("adrp %0, %1\n" \ "add %0, %0, :lo12:%1\n" \ : "=r" (addr) : "S" (&s)); \ addr; \ }) /* * Home-grown __this_cpu_{ptr,read} variants that always work at HYP, * provided that sym is really a *symbol* and not a pointer obtained from * a data structure. As for SHIFT_PERCPU_PTR(), the creative casting keeps * sparse quiet. */ #define __hyp_this_cpu_ptr(sym) \ ({ \ void *__ptr = hyp_symbol_addr(sym); \ void *__ptr; \ __verify_pcpu_ptr(&sym); \ __ptr = hyp_symbol_addr(sym); \ __ptr += read_sysreg(tpidr_el2); \ (typeof(&sym))__ptr; \ (typeof(sym) __kernel __force *)__ptr; \ }) #define __hyp_this_cpu_read(sym) \ Loading arch/arm64/include/asm/kvm_emulate.h +0 −6 Original line number Diff line number Diff line Loading @@ -112,12 +112,6 @@ static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu) vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK); } static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu) { if (vcpu_has_ptrauth(vcpu)) vcpu_ptrauth_disable(vcpu); } static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu) { return vcpu->arch.vsesr_el2; Loading arch/arm64/include/asm/kvm_host.h +0 −3 Original line number Diff line number Diff line Loading @@ -284,9 +284,6 @@ struct kvm_vcpu_arch { struct kvm_guest_debug_arch vcpu_debug_state; struct kvm_guest_debug_arch external_debug_state; /* Pointer to host CPU context */ struct kvm_cpu_context *host_cpu_context; struct thread_info *host_thread_info; /* hyp VA */ struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */ Loading arch/arm64/include/asm/kvm_mmu.h +0 −20 Original line number Diff line number Diff line Loading @@ -107,26 +107,6 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v) #define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v)))) /* * Obtain the PC-relative address of a kernel symbol * s: symbol * * The goal of this macro is to return a symbol's address based on a * PC-relative computation, as opposed to a loading the VA from a * constant pool or something similar. This works well for HYP, as an * absolute VA is guaranteed to be wrong. Only use this if trying to * obtain the address of a symbol (i.e. not something you obtained by * following a pointer). */ #define hyp_symbol_addr(s) \ ({ \ typeof(s) *addr; \ asm("adrp %0, %1\n" \ "add %0, %0, :lo12:%1\n" \ : "=r" (addr) : "S" (&s)); \ addr; \ }) /* * We currently support using a VM-specified IPA size. For backward * compatibility, the default IPA size is fixed to 40bits. Loading arch/arm64/kvm/arm.c +2 −4 Original line number Diff line number Diff line Loading @@ -340,10 +340,8 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { int *last_ran; kvm_host_data_t *cpu_data; last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran); cpu_data = this_cpu_ptr(&kvm_host_data); /* * We might get preempted before the vCPU actually runs, but Loading @@ -355,7 +353,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) } vcpu->cpu = cpu; vcpu->arch.host_cpu_context = &cpu_data->host_ctxt; kvm_vgic_load(vcpu); kvm_timer_vcpu_load(vcpu); Loading @@ -370,7 +367,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) else vcpu_set_wfx_traps(vcpu); vcpu_ptrauth_setup_lazy(vcpu); if (vcpu_has_ptrauth(vcpu)) vcpu_ptrauth_disable(vcpu); } void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) Loading Loading
arch/arm64/include/asm/kvm_asm.h +30 −3 Original line number Diff line number Diff line Loading @@ -81,12 +81,39 @@ extern u32 __kvm_get_mdcr_el2(void); extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ]; /* Home-grown __this_cpu_{ptr,read} variants that always work at HYP */ /* * Obtain the PC-relative address of a kernel symbol * s: symbol * * The goal of this macro is to return a symbol's address based on a * PC-relative computation, as opposed to a loading the VA from a * constant pool or something similar. This works well for HYP, as an * absolute VA is guaranteed to be wrong. Only use this if trying to * obtain the address of a symbol (i.e. not something you obtained by * following a pointer). */ #define hyp_symbol_addr(s) \ ({ \ typeof(s) *addr; \ asm("adrp %0, %1\n" \ "add %0, %0, :lo12:%1\n" \ : "=r" (addr) : "S" (&s)); \ addr; \ }) /* * Home-grown __this_cpu_{ptr,read} variants that always work at HYP, * provided that sym is really a *symbol* and not a pointer obtained from * a data structure. As for SHIFT_PERCPU_PTR(), the creative casting keeps * sparse quiet. */ #define __hyp_this_cpu_ptr(sym) \ ({ \ void *__ptr = hyp_symbol_addr(sym); \ void *__ptr; \ __verify_pcpu_ptr(&sym); \ __ptr = hyp_symbol_addr(sym); \ __ptr += read_sysreg(tpidr_el2); \ (typeof(&sym))__ptr; \ (typeof(sym) __kernel __force *)__ptr; \ }) #define __hyp_this_cpu_read(sym) \ Loading
arch/arm64/include/asm/kvm_emulate.h +0 −6 Original line number Diff line number Diff line Loading @@ -112,12 +112,6 @@ static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu) vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK); } static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu) { if (vcpu_has_ptrauth(vcpu)) vcpu_ptrauth_disable(vcpu); } static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu) { return vcpu->arch.vsesr_el2; Loading
arch/arm64/include/asm/kvm_host.h +0 −3 Original line number Diff line number Diff line Loading @@ -284,9 +284,6 @@ struct kvm_vcpu_arch { struct kvm_guest_debug_arch vcpu_debug_state; struct kvm_guest_debug_arch external_debug_state; /* Pointer to host CPU context */ struct kvm_cpu_context *host_cpu_context; struct thread_info *host_thread_info; /* hyp VA */ struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */ Loading
arch/arm64/include/asm/kvm_mmu.h +0 −20 Original line number Diff line number Diff line Loading @@ -107,26 +107,6 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v) #define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v)))) /* * Obtain the PC-relative address of a kernel symbol * s: symbol * * The goal of this macro is to return a symbol's address based on a * PC-relative computation, as opposed to a loading the VA from a * constant pool or something similar. This works well for HYP, as an * absolute VA is guaranteed to be wrong. Only use this if trying to * obtain the address of a symbol (i.e. not something you obtained by * following a pointer). */ #define hyp_symbol_addr(s) \ ({ \ typeof(s) *addr; \ asm("adrp %0, %1\n" \ "add %0, %0, :lo12:%1\n" \ : "=r" (addr) : "S" (&s)); \ addr; \ }) /* * We currently support using a VM-specified IPA size. For backward * compatibility, the default IPA size is fixed to 40bits. Loading
arch/arm64/kvm/arm.c +2 −4 Original line number Diff line number Diff line Loading @@ -340,10 +340,8 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { int *last_ran; kvm_host_data_t *cpu_data; last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran); cpu_data = this_cpu_ptr(&kvm_host_data); /* * We might get preempted before the vCPU actually runs, but Loading @@ -355,7 +353,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) } vcpu->cpu = cpu; vcpu->arch.host_cpu_context = &cpu_data->host_ctxt; kvm_vgic_load(vcpu); kvm_timer_vcpu_load(vcpu); Loading @@ -370,7 +367,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) else vcpu_set_wfx_traps(vcpu); vcpu_ptrauth_setup_lazy(vcpu); if (vcpu_has_ptrauth(vcpu)) vcpu_ptrauth_disable(vcpu); } void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) Loading