Loading arch/arm64/include/asm/cpucaps.h +2 −1 Original line number Diff line number Diff line Loading @@ -62,7 +62,8 @@ #define ARM64_HAS_GENERIC_AUTH 52 #define ARM64_HAS_32BIT_EL1 53 #define ARM64_BTI 54 #define ARM64_HAS_ARMv8_4_TTL 55 #define ARM64_NCAPS 55 #define ARM64_NCAPS 56 #endif /* __ASM_CPUCAPS_H */ arch/arm64/include/asm/kvm_asm.h +5 −3 Original line number Diff line number Diff line Loading @@ -95,6 +95,7 @@ extern void *__nvhe_undefined_symbol; struct kvm; struct kvm_vcpu; struct kvm_s2_mmu; DECLARE_KVM_NVHE_SYM(__kvm_hyp_init); DECLARE_KVM_HYP_SYM(__kvm_hyp_vector); Loading @@ -108,9 +109,10 @@ DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs); #endif extern void __kvm_flush_vm_context(void); extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); extern void __kvm_tlb_flush_vmid(struct kvm *kvm); extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu); extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa, int level); extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu); extern void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu); extern void __kvm_timer_set_cntvoff(u64 cntvoff); Loading arch/arm64/include/asm/kvm_emulate.h +8 −29 Original line number Diff line number Diff line Loading @@ -124,33 +124,12 @@ static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr) static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu) { return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc; } static inline unsigned long *__vcpu_elr_el1(const struct kvm_vcpu *vcpu) { return (unsigned long *)&vcpu_gp_regs(vcpu)->elr_el1; } static inline unsigned long vcpu_read_elr_el1(const struct kvm_vcpu *vcpu) { if (vcpu->arch.sysregs_loaded_on_cpu) return read_sysreg_el1(SYS_ELR); else return *__vcpu_elr_el1(vcpu); } static inline void vcpu_write_elr_el1(const struct kvm_vcpu *vcpu, unsigned long v) { if (vcpu->arch.sysregs_loaded_on_cpu) write_sysreg_el1(v, SYS_ELR); else *__vcpu_elr_el1(vcpu) = v; return (unsigned long *)&vcpu_gp_regs(vcpu)->pc; } static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu) { return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pstate; return (unsigned long *)&vcpu_gp_regs(vcpu)->pstate; } static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu) Loading Loading @@ -179,14 +158,14 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) static __always_inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu, u8 reg_num) { return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs.regs[reg_num]; return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs[reg_num]; } static __always_inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num, unsigned long val) { if (reg_num != 31) vcpu_gp_regs(vcpu)->regs.regs[reg_num] = val; vcpu_gp_regs(vcpu)->regs[reg_num] = val; } static inline unsigned long vcpu_read_spsr(const struct kvm_vcpu *vcpu) Loading @@ -197,7 +176,7 @@ static inline unsigned long vcpu_read_spsr(const struct kvm_vcpu *vcpu) if (vcpu->arch.sysregs_loaded_on_cpu) return read_sysreg_el1(SYS_SPSR); else return vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1]; return __vcpu_sys_reg(vcpu, SPSR_EL1); } static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v) Loading @@ -210,7 +189,7 @@ static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v) if (vcpu->arch.sysregs_loaded_on_cpu) write_sysreg_el1(v, SYS_SPSR); else vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1] = v; __vcpu_sys_reg(vcpu, SPSR_EL1) = v; } /* Loading Loading @@ -519,11 +498,11 @@ static __always_inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_i static __always_inline void __kvm_skip_instr(struct kvm_vcpu *vcpu) { *vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR); vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(SYS_SPSR); vcpu_gp_regs(vcpu)->pstate = read_sysreg_el2(SYS_SPSR); kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); write_sysreg_el2(vcpu->arch.ctxt.gp_regs.regs.pstate, SYS_SPSR); write_sysreg_el2(vcpu_gp_regs(vcpu)->pstate, SYS_SPSR); write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR); } Loading arch/arm64/include/asm/kvm_host.h +56 −15 Original line number Diff line number Diff line Loading @@ -66,19 +66,34 @@ struct kvm_vmid { u32 vmid; }; struct kvm_arch { struct kvm_s2_mmu { struct kvm_vmid vmid; /* stage2 entry level table */ /* * stage2 entry level table * * Two kvm_s2_mmu structures in the same VM can point to the same * pgd here. This happens when running a guest using a * translation regime that isn't affected by its own stage-2 * translation, such as a non-VHE hypervisor running at vEL2, or * for vEL1/EL0 with vHCR_EL2.VM == 0. In that case, we use the * canonical stage-2 page tables. */ pgd_t *pgd; phys_addr_t pgd_phys; /* VTCR_EL2 value for this VM */ u64 vtcr; /* The last vcpu id that ran on each physical CPU */ int __percpu *last_vcpu_ran; struct kvm *kvm; }; struct kvm_arch { struct kvm_s2_mmu mmu; /* VTCR_EL2 value for this VM */ u64 vtcr; /* The maximum number of vCPUs depends on the used GIC model */ int max_vcpus; Loading Loading @@ -170,6 +185,16 @@ enum vcpu_sysreg { APGAKEYLO_EL1, APGAKEYHI_EL1, ELR_EL1, SP_EL1, SPSR_EL1, CNTVOFF_EL2, CNTV_CVAL_EL0, CNTV_CTL_EL0, CNTP_CVAL_EL0, CNTP_CTL_EL0, /* 32bit specific registers. Keep them at the end of the range */ DACR32_EL2, /* Domain Access Control Register */ IFSR32_EL2, /* Instruction Fault Status Register */ Loading Loading @@ -221,7 +246,15 @@ enum vcpu_sysreg { #define NR_COPRO_REGS (NR_SYS_REGS * 2) struct kvm_cpu_context { struct kvm_regs gp_regs; struct user_pt_regs regs; /* sp = sp_el0 */ u64 spsr_abt; u64 spsr_und; u64 spsr_irq; u64 spsr_fiq; struct user_fpsimd_state fp_regs; union { u64 sys_regs[NR_SYS_REGS]; u32 copro[NR_COPRO_REGS]; Loading Loading @@ -254,6 +287,9 @@ struct kvm_vcpu_arch { void *sve_state; unsigned int sve_max_vl; /* Stage 2 paging state used by the hardware on next switch */ struct kvm_s2_mmu *hw_mmu; /* HYP configuration */ u64 hcr_el2; u32 mdcr_el2; Loading Loading @@ -384,15 +420,20 @@ struct kvm_vcpu_arch { system_supports_generic_auth()) && \ ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH)) #define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs) #define vcpu_gp_regs(v) (&(v)->arch.ctxt.regs) /* * Only use __vcpu_sys_reg if you know you want the memory backed version of a * register, and not the one most recently accessed by a running VCPU. For * example, for userspace access or for system registers that are never context * switched, but only emulated. * Only use __vcpu_sys_reg/ctxt_sys_reg if you know you want the * memory backed version of a register, and not the one most recently * accessed by a running VCPU. For example, for userspace access or * for system registers that are never context switched, but only * emulated. */ #define __vcpu_sys_reg(v,r) ((v)->arch.ctxt.sys_regs[(r)]) #define __ctxt_sys_reg(c,r) (&(c)->sys_regs[(r)]) #define ctxt_sys_reg(c,r) (*__ctxt_sys_reg(c,r)) #define __vcpu_sys_reg(v,r) (ctxt_sys_reg(&(v)->arch.ctxt, (r))) u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg); void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg); Loading Loading @@ -538,7 +579,7 @@ DECLARE_PER_CPU(kvm_host_data_t, kvm_host_data); static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt) { /* The host's MPIDR is immutable, so let's set it up at boot time */ cpu_ctxt->sys_regs[MPIDR_EL1] = read_cpuid_mpidr(); ctxt_sys_reg(cpu_ctxt, MPIDR_EL1) = read_cpuid_mpidr(); } static inline bool kvm_arch_requires_vhe(void) Loading arch/arm64/include/asm/kvm_mmu.h +8 −8 Original line number Diff line number Diff line Loading @@ -134,8 +134,8 @@ int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size, void free_hyp_pgds(void); void stage2_unmap_vm(struct kvm *kvm); int kvm_alloc_stage2_pgd(struct kvm *kvm); void kvm_free_stage2_pgd(struct kvm *kvm); int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu); void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu); int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, phys_addr_t pa, unsigned long size, bool writable); Loading Loading @@ -577,13 +577,13 @@ static inline u64 kvm_vttbr_baddr_mask(struct kvm *kvm) return vttbr_baddr_mask(kvm_phys_shift(kvm), kvm_stage2_levels(kvm)); } static __always_inline u64 kvm_get_vttbr(struct kvm *kvm) static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu) { struct kvm_vmid *vmid = &kvm->arch.vmid; struct kvm_vmid *vmid = &mmu->vmid; u64 vmid_field, baddr; u64 cnp = system_supports_cnp() ? VTTBR_CNP_BIT : 0; baddr = kvm->arch.pgd_phys; baddr = mmu->pgd_phys; vmid_field = (u64)vmid->vmid << VTTBR_VMID_SHIFT; return kvm_phys_to_vttbr(baddr) | vmid_field | cnp; } Loading @@ -592,10 +592,10 @@ static __always_inline u64 kvm_get_vttbr(struct kvm *kvm) * Must be called from hyp code running at EL2 with an updated VTTBR * and interrupts disabled. */ static __always_inline void __load_guest_stage2(struct kvm *kvm) static __always_inline void __load_guest_stage2(struct kvm_s2_mmu *mmu) { write_sysreg(kvm->arch.vtcr, vtcr_el2); write_sysreg(kvm_get_vttbr(kvm), vttbr_el2); write_sysreg(kern_hyp_va(mmu->kvm)->arch.vtcr, vtcr_el2); write_sysreg(kvm_get_vttbr(mmu), vttbr_el2); /* * ARM errata 1165522 and 1530923 require the actual execution of the Loading Loading
arch/arm64/include/asm/cpucaps.h +2 −1 Original line number Diff line number Diff line Loading @@ -62,7 +62,8 @@ #define ARM64_HAS_GENERIC_AUTH 52 #define ARM64_HAS_32BIT_EL1 53 #define ARM64_BTI 54 #define ARM64_HAS_ARMv8_4_TTL 55 #define ARM64_NCAPS 55 #define ARM64_NCAPS 56 #endif /* __ASM_CPUCAPS_H */
arch/arm64/include/asm/kvm_asm.h +5 −3 Original line number Diff line number Diff line Loading @@ -95,6 +95,7 @@ extern void *__nvhe_undefined_symbol; struct kvm; struct kvm_vcpu; struct kvm_s2_mmu; DECLARE_KVM_NVHE_SYM(__kvm_hyp_init); DECLARE_KVM_HYP_SYM(__kvm_hyp_vector); Loading @@ -108,9 +109,10 @@ DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs); #endif extern void __kvm_flush_vm_context(void); extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); extern void __kvm_tlb_flush_vmid(struct kvm *kvm); extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu); extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa, int level); extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu); extern void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu); extern void __kvm_timer_set_cntvoff(u64 cntvoff); Loading
arch/arm64/include/asm/kvm_emulate.h +8 −29 Original line number Diff line number Diff line Loading @@ -124,33 +124,12 @@ static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr) static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu) { return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc; } static inline unsigned long *__vcpu_elr_el1(const struct kvm_vcpu *vcpu) { return (unsigned long *)&vcpu_gp_regs(vcpu)->elr_el1; } static inline unsigned long vcpu_read_elr_el1(const struct kvm_vcpu *vcpu) { if (vcpu->arch.sysregs_loaded_on_cpu) return read_sysreg_el1(SYS_ELR); else return *__vcpu_elr_el1(vcpu); } static inline void vcpu_write_elr_el1(const struct kvm_vcpu *vcpu, unsigned long v) { if (vcpu->arch.sysregs_loaded_on_cpu) write_sysreg_el1(v, SYS_ELR); else *__vcpu_elr_el1(vcpu) = v; return (unsigned long *)&vcpu_gp_regs(vcpu)->pc; } static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu) { return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pstate; return (unsigned long *)&vcpu_gp_regs(vcpu)->pstate; } static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu) Loading Loading @@ -179,14 +158,14 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) static __always_inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu, u8 reg_num) { return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs.regs[reg_num]; return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs[reg_num]; } static __always_inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num, unsigned long val) { if (reg_num != 31) vcpu_gp_regs(vcpu)->regs.regs[reg_num] = val; vcpu_gp_regs(vcpu)->regs[reg_num] = val; } static inline unsigned long vcpu_read_spsr(const struct kvm_vcpu *vcpu) Loading @@ -197,7 +176,7 @@ static inline unsigned long vcpu_read_spsr(const struct kvm_vcpu *vcpu) if (vcpu->arch.sysregs_loaded_on_cpu) return read_sysreg_el1(SYS_SPSR); else return vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1]; return __vcpu_sys_reg(vcpu, SPSR_EL1); } static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v) Loading @@ -210,7 +189,7 @@ static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v) if (vcpu->arch.sysregs_loaded_on_cpu) write_sysreg_el1(v, SYS_SPSR); else vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1] = v; __vcpu_sys_reg(vcpu, SPSR_EL1) = v; } /* Loading Loading @@ -519,11 +498,11 @@ static __always_inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_i static __always_inline void __kvm_skip_instr(struct kvm_vcpu *vcpu) { *vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR); vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(SYS_SPSR); vcpu_gp_regs(vcpu)->pstate = read_sysreg_el2(SYS_SPSR); kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); write_sysreg_el2(vcpu->arch.ctxt.gp_regs.regs.pstate, SYS_SPSR); write_sysreg_el2(vcpu_gp_regs(vcpu)->pstate, SYS_SPSR); write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR); } Loading
arch/arm64/include/asm/kvm_host.h +56 −15 Original line number Diff line number Diff line Loading @@ -66,19 +66,34 @@ struct kvm_vmid { u32 vmid; }; struct kvm_arch { struct kvm_s2_mmu { struct kvm_vmid vmid; /* stage2 entry level table */ /* * stage2 entry level table * * Two kvm_s2_mmu structures in the same VM can point to the same * pgd here. This happens when running a guest using a * translation regime that isn't affected by its own stage-2 * translation, such as a non-VHE hypervisor running at vEL2, or * for vEL1/EL0 with vHCR_EL2.VM == 0. In that case, we use the * canonical stage-2 page tables. */ pgd_t *pgd; phys_addr_t pgd_phys; /* VTCR_EL2 value for this VM */ u64 vtcr; /* The last vcpu id that ran on each physical CPU */ int __percpu *last_vcpu_ran; struct kvm *kvm; }; struct kvm_arch { struct kvm_s2_mmu mmu; /* VTCR_EL2 value for this VM */ u64 vtcr; /* The maximum number of vCPUs depends on the used GIC model */ int max_vcpus; Loading Loading @@ -170,6 +185,16 @@ enum vcpu_sysreg { APGAKEYLO_EL1, APGAKEYHI_EL1, ELR_EL1, SP_EL1, SPSR_EL1, CNTVOFF_EL2, CNTV_CVAL_EL0, CNTV_CTL_EL0, CNTP_CVAL_EL0, CNTP_CTL_EL0, /* 32bit specific registers. Keep them at the end of the range */ DACR32_EL2, /* Domain Access Control Register */ IFSR32_EL2, /* Instruction Fault Status Register */ Loading Loading @@ -221,7 +246,15 @@ enum vcpu_sysreg { #define NR_COPRO_REGS (NR_SYS_REGS * 2) struct kvm_cpu_context { struct kvm_regs gp_regs; struct user_pt_regs regs; /* sp = sp_el0 */ u64 spsr_abt; u64 spsr_und; u64 spsr_irq; u64 spsr_fiq; struct user_fpsimd_state fp_regs; union { u64 sys_regs[NR_SYS_REGS]; u32 copro[NR_COPRO_REGS]; Loading Loading @@ -254,6 +287,9 @@ struct kvm_vcpu_arch { void *sve_state; unsigned int sve_max_vl; /* Stage 2 paging state used by the hardware on next switch */ struct kvm_s2_mmu *hw_mmu; /* HYP configuration */ u64 hcr_el2; u32 mdcr_el2; Loading Loading @@ -384,15 +420,20 @@ struct kvm_vcpu_arch { system_supports_generic_auth()) && \ ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH)) #define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs) #define vcpu_gp_regs(v) (&(v)->arch.ctxt.regs) /* * Only use __vcpu_sys_reg if you know you want the memory backed version of a * register, and not the one most recently accessed by a running VCPU. For * example, for userspace access or for system registers that are never context * switched, but only emulated. * Only use __vcpu_sys_reg/ctxt_sys_reg if you know you want the * memory backed version of a register, and not the one most recently * accessed by a running VCPU. For example, for userspace access or * for system registers that are never context switched, but only * emulated. */ #define __vcpu_sys_reg(v,r) ((v)->arch.ctxt.sys_regs[(r)]) #define __ctxt_sys_reg(c,r) (&(c)->sys_regs[(r)]) #define ctxt_sys_reg(c,r) (*__ctxt_sys_reg(c,r)) #define __vcpu_sys_reg(v,r) (ctxt_sys_reg(&(v)->arch.ctxt, (r))) u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg); void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg); Loading Loading @@ -538,7 +579,7 @@ DECLARE_PER_CPU(kvm_host_data_t, kvm_host_data); static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt) { /* The host's MPIDR is immutable, so let's set it up at boot time */ cpu_ctxt->sys_regs[MPIDR_EL1] = read_cpuid_mpidr(); ctxt_sys_reg(cpu_ctxt, MPIDR_EL1) = read_cpuid_mpidr(); } static inline bool kvm_arch_requires_vhe(void) Loading
arch/arm64/include/asm/kvm_mmu.h +8 −8 Original line number Diff line number Diff line Loading @@ -134,8 +134,8 @@ int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size, void free_hyp_pgds(void); void stage2_unmap_vm(struct kvm *kvm); int kvm_alloc_stage2_pgd(struct kvm *kvm); void kvm_free_stage2_pgd(struct kvm *kvm); int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu); void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu); int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, phys_addr_t pa, unsigned long size, bool writable); Loading Loading @@ -577,13 +577,13 @@ static inline u64 kvm_vttbr_baddr_mask(struct kvm *kvm) return vttbr_baddr_mask(kvm_phys_shift(kvm), kvm_stage2_levels(kvm)); } static __always_inline u64 kvm_get_vttbr(struct kvm *kvm) static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu) { struct kvm_vmid *vmid = &kvm->arch.vmid; struct kvm_vmid *vmid = &mmu->vmid; u64 vmid_field, baddr; u64 cnp = system_supports_cnp() ? VTTBR_CNP_BIT : 0; baddr = kvm->arch.pgd_phys; baddr = mmu->pgd_phys; vmid_field = (u64)vmid->vmid << VTTBR_VMID_SHIFT; return kvm_phys_to_vttbr(baddr) | vmid_field | cnp; } Loading @@ -592,10 +592,10 @@ static __always_inline u64 kvm_get_vttbr(struct kvm *kvm) * Must be called from hyp code running at EL2 with an updated VTTBR * and interrupts disabled. */ static __always_inline void __load_guest_stage2(struct kvm *kvm) static __always_inline void __load_guest_stage2(struct kvm_s2_mmu *mmu) { write_sysreg(kvm->arch.vtcr, vtcr_el2); write_sysreg(kvm_get_vttbr(kvm), vttbr_el2); write_sysreg(kern_hyp_va(mmu->kvm)->arch.vtcr, vtcr_el2); write_sysreg(kvm_get_vttbr(mmu), vttbr_el2); /* * ARM errata 1165522 and 1530923 require the actual execution of the Loading