Loading arch/arm64/include/asm/kvm_asm.h +0 −2 Original line number Diff line number Diff line Loading @@ -189,8 +189,6 @@ extern void __kvm_timer_set_cntvoff(u64 cntvoff); extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); extern void __kvm_enable_ssbs(void); extern u64 __vgic_v3_get_ich_vtr_el2(void); extern u64 __vgic_v3_read_vmcr(void); extern void __vgic_v3_write_vmcr(u32 vmcr); Loading arch/arm64/include/asm/kvm_mmu.h +40 −0 Original line number Diff line number Diff line Loading @@ -72,6 +72,28 @@ alternative_cb kvm_update_va_mask alternative_cb_end .endm /* * Convert a kernel image address to a PA * reg: kernel address to be converted in place * tmp: temporary register * * The actual code generation takes place in kvm_get_kimage_voffset, and * the instructions below are only there to reserve the space and * perform the register allocation (kvm_get_kimage_voffset uses the * specific registers encoded in the instructions). */ .macro kimg_pa reg, tmp alternative_cb kvm_get_kimage_voffset movz \tmp, #0 movk \tmp, #0, lsl #16 movk \tmp, #0, lsl #32 movk \tmp, #0, lsl #48 alternative_cb_end /* reg = __pa(reg) */ sub \reg, \reg, \tmp .endm #else #include <linux/pgtable.h> Loading @@ -98,6 +120,24 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v) #define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v)))) static __always_inline unsigned long __kimg_hyp_va(unsigned long v) { unsigned long offset; asm volatile(ALTERNATIVE_CB("movz %0, #0\n" "movk %0, #0, lsl #16\n" "movk %0, #0, lsl #32\n" "movk %0, #0, lsl #48\n", kvm_update_kimg_phys_offset) : "=r" (offset)); return __kern_hyp_va((v - offset) | PAGE_OFFSET); } #define kimg_fn_hyp_va(v) ((typeof(*v))(__kimg_hyp_va((unsigned long)(v)))) #define kimg_fn_ptr(x) (typeof(x) **)(x) /* * We currently support using a VM-specified IPA size. For backward * compatibility, the default IPA size is fixed to 40bits. Loading arch/arm64/include/asm/sysreg.h +1 −0 Original line number Diff line number Diff line Loading @@ -465,6 +465,7 @@ #define SYS_PMCCFILTR_EL0 sys_reg(3, 3, 14, 15, 7) #define SYS_SCTLR_EL2 sys_reg(3, 4, 1, 0, 0) #define SYS_ZCR_EL2 sys_reg(3, 4, 1, 2, 0) #define SYS_DACR32_EL2 sys_reg(3, 4, 3, 0, 0) #define SYS_SPSR_EL2 sys_reg(3, 4, 4, 0, 0) Loading arch/arm64/kernel/image-vars.h +2 −3 Original line number Diff line number Diff line Loading @@ -64,13 +64,12 @@ __efistub__ctype = _ctype; /* Alternative callbacks for init-time patching of nVHE hyp code. */ KVM_NVHE_ALIAS(kvm_patch_vector_branch); KVM_NVHE_ALIAS(kvm_update_va_mask); KVM_NVHE_ALIAS(kvm_update_kimg_phys_offset); KVM_NVHE_ALIAS(kvm_get_kimage_voffset); /* Global kernel state accessed by nVHE hyp code. */ KVM_NVHE_ALIAS(kvm_vgic_global_state); /* Kernel constant needed to compute idmap addresses. */ KVM_NVHE_ALIAS(kimage_voffset); /* Kernel symbols used to call panic() from nVHE hyp code (via ERET). */ KVM_NVHE_ALIAS(__hyp_panic_string); KVM_NVHE_ALIAS(panic); Loading arch/arm64/kvm/hyp/nvhe/host.S +4 −7 Original line number Diff line number Diff line Loading @@ -13,8 +13,6 @@ .text SYM_FUNC_START(__host_exit) stp x0, x1, [sp, #-16]! get_host_ctxt x0, x1 /* Store the host regs x2 and x3 */ Loading Loading @@ -99,13 +97,15 @@ SYM_FUNC_END(__hyp_do_panic) mrs x0, esr_el2 lsr x0, x0, #ESR_ELx_EC_SHIFT cmp x0, #ESR_ELx_EC_HVC64 ldp x0, x1, [sp], #16 b.ne __host_exit ldp x0, x1, [sp] // Don't fixup the stack yet /* Check for a stub HVC call */ cmp x0, #HVC_STUB_HCALL_NR b.hs __host_exit add sp, sp, #16 /* * Compute the idmap address of __kvm_handle_stub_hvc and * jump there. Since we use kimage_voffset, do not use the Loading @@ -115,10 +115,7 @@ SYM_FUNC_END(__hyp_do_panic) * Preserve x0-x4, which may contain stub parameters. */ ldr x5, =__kvm_handle_stub_hvc ldr_l x6, kimage_voffset /* x5 = __pa(x5) */ sub x5, x5, x6 kimg_pa x5, x6 br x5 .L__vect_end\@: .if ((.L__vect_end\@ - .L__vect_start\@) > 0x80) Loading Loading
arch/arm64/include/asm/kvm_asm.h +0 −2 Original line number Diff line number Diff line Loading @@ -189,8 +189,6 @@ extern void __kvm_timer_set_cntvoff(u64 cntvoff); extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); extern void __kvm_enable_ssbs(void); extern u64 __vgic_v3_get_ich_vtr_el2(void); extern u64 __vgic_v3_read_vmcr(void); extern void __vgic_v3_write_vmcr(u32 vmcr); Loading
arch/arm64/include/asm/kvm_mmu.h +40 −0 Original line number Diff line number Diff line Loading @@ -72,6 +72,28 @@ alternative_cb kvm_update_va_mask alternative_cb_end .endm /* * Convert a kernel image address to a PA * reg: kernel address to be converted in place * tmp: temporary register * * The actual code generation takes place in kvm_get_kimage_voffset, and * the instructions below are only there to reserve the space and * perform the register allocation (kvm_get_kimage_voffset uses the * specific registers encoded in the instructions). */ .macro kimg_pa reg, tmp alternative_cb kvm_get_kimage_voffset movz \tmp, #0 movk \tmp, #0, lsl #16 movk \tmp, #0, lsl #32 movk \tmp, #0, lsl #48 alternative_cb_end /* reg = __pa(reg) */ sub \reg, \reg, \tmp .endm #else #include <linux/pgtable.h> Loading @@ -98,6 +120,24 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v) #define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v)))) static __always_inline unsigned long __kimg_hyp_va(unsigned long v) { unsigned long offset; asm volatile(ALTERNATIVE_CB("movz %0, #0\n" "movk %0, #0, lsl #16\n" "movk %0, #0, lsl #32\n" "movk %0, #0, lsl #48\n", kvm_update_kimg_phys_offset) : "=r" (offset)); return __kern_hyp_va((v - offset) | PAGE_OFFSET); } #define kimg_fn_hyp_va(v) ((typeof(*v))(__kimg_hyp_va((unsigned long)(v)))) #define kimg_fn_ptr(x) (typeof(x) **)(x) /* * We currently support using a VM-specified IPA size. For backward * compatibility, the default IPA size is fixed to 40bits. Loading
arch/arm64/include/asm/sysreg.h +1 −0 Original line number Diff line number Diff line Loading @@ -465,6 +465,7 @@ #define SYS_PMCCFILTR_EL0 sys_reg(3, 3, 14, 15, 7) #define SYS_SCTLR_EL2 sys_reg(3, 4, 1, 0, 0) #define SYS_ZCR_EL2 sys_reg(3, 4, 1, 2, 0) #define SYS_DACR32_EL2 sys_reg(3, 4, 3, 0, 0) #define SYS_SPSR_EL2 sys_reg(3, 4, 4, 0, 0) Loading
arch/arm64/kernel/image-vars.h +2 −3 Original line number Diff line number Diff line Loading @@ -64,13 +64,12 @@ __efistub__ctype = _ctype; /* Alternative callbacks for init-time patching of nVHE hyp code. */ KVM_NVHE_ALIAS(kvm_patch_vector_branch); KVM_NVHE_ALIAS(kvm_update_va_mask); KVM_NVHE_ALIAS(kvm_update_kimg_phys_offset); KVM_NVHE_ALIAS(kvm_get_kimage_voffset); /* Global kernel state accessed by nVHE hyp code. */ KVM_NVHE_ALIAS(kvm_vgic_global_state); /* Kernel constant needed to compute idmap addresses. */ KVM_NVHE_ALIAS(kimage_voffset); /* Kernel symbols used to call panic() from nVHE hyp code (via ERET). */ KVM_NVHE_ALIAS(__hyp_panic_string); KVM_NVHE_ALIAS(panic); Loading
arch/arm64/kvm/hyp/nvhe/host.S +4 −7 Original line number Diff line number Diff line Loading @@ -13,8 +13,6 @@ .text SYM_FUNC_START(__host_exit) stp x0, x1, [sp, #-16]! get_host_ctxt x0, x1 /* Store the host regs x2 and x3 */ Loading Loading @@ -99,13 +97,15 @@ SYM_FUNC_END(__hyp_do_panic) mrs x0, esr_el2 lsr x0, x0, #ESR_ELx_EC_SHIFT cmp x0, #ESR_ELx_EC_HVC64 ldp x0, x1, [sp], #16 b.ne __host_exit ldp x0, x1, [sp] // Don't fixup the stack yet /* Check for a stub HVC call */ cmp x0, #HVC_STUB_HCALL_NR b.hs __host_exit add sp, sp, #16 /* * Compute the idmap address of __kvm_handle_stub_hvc and * jump there. Since we use kimage_voffset, do not use the Loading @@ -115,10 +115,7 @@ SYM_FUNC_END(__hyp_do_panic) * Preserve x0-x4, which may contain stub parameters. */ ldr x5, =__kvm_handle_stub_hvc ldr_l x6, kimage_voffset /* x5 = __pa(x5) */ sub x5, x5, x6 kimg_pa x5, x6 br x5 .L__vect_end\@: .if ((.L__vect_end\@ - .L__vect_start\@) > 0x80) Loading