Loading arch/arm64/include/asm/kvm_emulate.h +1 −1 Original line number Diff line number Diff line Loading @@ -345,7 +345,7 @@ static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vc return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_TYPE; } static __always_inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu) static __always_inline bool kvm_vcpu_abt_issea(const struct kvm_vcpu *vcpu) { switch (kvm_vcpu_trap_get_fault(vcpu)) { case FSC_SEA: Loading arch/arm64/kvm/hyp/include/hyp/switch.h +1 −1 Original line number Diff line number Diff line Loading @@ -444,7 +444,7 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code) valid = kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_DABT_LOW && kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT && kvm_vcpu_dabt_isvalid(vcpu) && !kvm_vcpu_dabt_isextabt(vcpu) && !kvm_vcpu_abt_issea(vcpu) && !kvm_vcpu_dabt_iss1tw(vcpu); if (valid) { Loading arch/arm64/kvm/mmio.c +0 −6 Original line number Diff line number Diff line Loading @@ -145,12 +145,6 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, return -ENOSYS; } /* Page table accesses IO mem: tell guest to fix its TTBR */ if (kvm_vcpu_dabt_iss1tw(vcpu)) { kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu)); return 1; } /* * Prepare MMIO operation. First decode the syndrome data we get * from the CPU. Then try if some in-kernel emulation feels Loading arch/arm64/kvm/mmu.c +17 −9 Original line number Diff line number Diff line Loading @@ -2111,19 +2111,16 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) is_iabt = kvm_vcpu_trap_is_iabt(vcpu); /* Synchronous External Abort? */ if (kvm_vcpu_dabt_isextabt(vcpu)) { if (kvm_vcpu_abt_issea(vcpu)) { /* * For RAS the host kernel may handle this abort. * There is no need to pass the error into the guest. */ if (!kvm_handle_guest_sea(fault_ipa, kvm_vcpu_get_esr(vcpu))) return 1; if (unlikely(!is_iabt)) { if (kvm_handle_guest_sea(fault_ipa, kvm_vcpu_get_esr(vcpu))) kvm_inject_vabt(vcpu); return 1; } } trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_esr(vcpu), kvm_vcpu_get_hfar(vcpu), fault_ipa); Loading @@ -2145,12 +2142,23 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable); write_fault = kvm_is_write_fault(vcpu); if (kvm_is_error_hva(hva) || (write_fault && !writable)) { /* * The guest has put either its instructions or its page-tables * somewhere it shouldn't have. Userspace won't be able to do * anything about this (there's no syndrome for a start), so * re-inject the abort back into the guest. */ if (is_iabt) { /* Prefetch Abort on I/O address */ ret = -ENOEXEC; goto out; } if (kvm_vcpu_dabt_iss1tw(vcpu)) { kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu)); ret = 1; goto out_unlock; } /* * Check for a cache maintenance operation. Since we * ended-up here, we know it is outside of any memory Loading @@ -2161,7 +2169,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) * So let's assume that the guest is just being * cautious, and skip the instruction. */ if (kvm_vcpu_dabt_is_cm(vcpu)) { if (kvm_is_error_hva(hva) && kvm_vcpu_dabt_is_cm(vcpu)) { kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); ret = 1; goto out_unlock; Loading include/trace/events/kvm.h +1 −1 Original line number Diff line number Diff line Loading @@ -17,7 +17,7 @@ ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI), ERSN(PAPR_HCALL), \ ERSN(S390_UCONTROL), ERSN(WATCHDOG), ERSN(S390_TSCH), ERSN(EPR),\ ERSN(SYSTEM_EVENT), ERSN(S390_STSI), ERSN(IOAPIC_EOI), \ ERSN(HYPERV) ERSN(HYPERV), ERSN(ARM_NISV) TRACE_EVENT(kvm_userspace_exit, TP_PROTO(__u32 reason, int errno), Loading Loading
arch/arm64/include/asm/kvm_emulate.h +1 −1 Original line number Diff line number Diff line Loading @@ -345,7 +345,7 @@ static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vc return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_TYPE; } static __always_inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu) static __always_inline bool kvm_vcpu_abt_issea(const struct kvm_vcpu *vcpu) { switch (kvm_vcpu_trap_get_fault(vcpu)) { case FSC_SEA: Loading
arch/arm64/kvm/hyp/include/hyp/switch.h +1 −1 Original line number Diff line number Diff line Loading @@ -444,7 +444,7 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code) valid = kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_DABT_LOW && kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT && kvm_vcpu_dabt_isvalid(vcpu) && !kvm_vcpu_dabt_isextabt(vcpu) && !kvm_vcpu_abt_issea(vcpu) && !kvm_vcpu_dabt_iss1tw(vcpu); if (valid) { Loading
arch/arm64/kvm/mmio.c +0 −6 Original line number Diff line number Diff line Loading @@ -145,12 +145,6 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, return -ENOSYS; } /* Page table accesses IO mem: tell guest to fix its TTBR */ if (kvm_vcpu_dabt_iss1tw(vcpu)) { kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu)); return 1; } /* * Prepare MMIO operation. First decode the syndrome data we get * from the CPU. Then try if some in-kernel emulation feels Loading
arch/arm64/kvm/mmu.c +17 −9 Original line number Diff line number Diff line Loading @@ -2111,19 +2111,16 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) is_iabt = kvm_vcpu_trap_is_iabt(vcpu); /* Synchronous External Abort? */ if (kvm_vcpu_dabt_isextabt(vcpu)) { if (kvm_vcpu_abt_issea(vcpu)) { /* * For RAS the host kernel may handle this abort. * There is no need to pass the error into the guest. */ if (!kvm_handle_guest_sea(fault_ipa, kvm_vcpu_get_esr(vcpu))) return 1; if (unlikely(!is_iabt)) { if (kvm_handle_guest_sea(fault_ipa, kvm_vcpu_get_esr(vcpu))) kvm_inject_vabt(vcpu); return 1; } } trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_esr(vcpu), kvm_vcpu_get_hfar(vcpu), fault_ipa); Loading @@ -2145,12 +2142,23 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable); write_fault = kvm_is_write_fault(vcpu); if (kvm_is_error_hva(hva) || (write_fault && !writable)) { /* * The guest has put either its instructions or its page-tables * somewhere it shouldn't have. Userspace won't be able to do * anything about this (there's no syndrome for a start), so * re-inject the abort back into the guest. */ if (is_iabt) { /* Prefetch Abort on I/O address */ ret = -ENOEXEC; goto out; } if (kvm_vcpu_dabt_iss1tw(vcpu)) { kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu)); ret = 1; goto out_unlock; } /* * Check for a cache maintenance operation. Since we * ended-up here, we know it is outside of any memory Loading @@ -2161,7 +2169,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) * So let's assume that the guest is just being * cautious, and skip the instruction. */ if (kvm_vcpu_dabt_is_cm(vcpu)) { if (kvm_is_error_hva(hva) && kvm_vcpu_dabt_is_cm(vcpu)) { kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); ret = 1; goto out_unlock; Loading
include/trace/events/kvm.h +1 −1 Original line number Diff line number Diff line Loading @@ -17,7 +17,7 @@ ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI), ERSN(PAPR_HCALL), \ ERSN(S390_UCONTROL), ERSN(WATCHDOG), ERSN(S390_TSCH), ERSN(EPR),\ ERSN(SYSTEM_EVENT), ERSN(S390_STSI), ERSN(IOAPIC_EOI), \ ERSN(HYPERV) ERSN(HYPERV), ERSN(ARM_NISV) TRACE_EVENT(kvm_userspace_exit, TP_PROTO(__u32 reason, int errno), Loading