Loading Documentation/virtual/kvm/locking.txt +11 −1 Original line number Diff line number Diff line Loading @@ -4,7 +4,17 @@ KVM Lock Overview 1. Acquisition Orders --------------------- (to be written) The acquisition orders for mutexes are as follows: - kvm->lock is taken outside vcpu->mutex - kvm->lock is taken outside kvm->slots_lock and kvm->irq_lock - kvm->slots_lock is taken outside kvm->irq_lock, though acquiring them together is quite rare. For spinlocks, kvm_lock is taken outside kvm->mmu_lock. Everything else is a leaf: no other lock is taken inside the critical sections. 2: Exception ------------ Loading arch/mips/include/asm/kvm_host.h +4 −3 Original line number Diff line number Diff line Loading @@ -293,7 +293,10 @@ struct kvm_vcpu_arch { /* Host KSEG0 address of the EI/DI offset */ void *kseg0_commpage; u32 io_gpr; /* GPR used as IO source/target */ /* Resume PC after MMIO completion */ unsigned long io_pc; /* GPR used as IO source/target */ u32 io_gpr; struct hrtimer comparecount_timer; /* Count timer control KVM register */ Loading @@ -315,8 +318,6 @@ struct kvm_vcpu_arch { /* Bitmask of pending exceptions to be cleared */ unsigned long pending_exceptions_clr; u32 pending_load_cause; /* Save/Restore the entryhi register when are are preempted/scheduled back in */ unsigned long preempt_entryhi; Loading arch/mips/kvm/emulate.c +19 −13 Original line number Diff line number Diff line Loading @@ -790,15 +790,15 @@ enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu) struct mips_coproc *cop0 = vcpu->arch.cop0; enum emulation_result er = EMULATE_DONE; if (kvm_read_c0_guest_status(cop0) & ST0_EXL) { if (kvm_read_c0_guest_status(cop0) & ST0_ERL) { kvm_clear_c0_guest_status(cop0, ST0_ERL); vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0); } else if (kvm_read_c0_guest_status(cop0) & ST0_EXL) { kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc, kvm_read_c0_guest_epc(cop0)); kvm_clear_c0_guest_status(cop0, ST0_EXL); vcpu->arch.pc = kvm_read_c0_guest_epc(cop0); } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) { kvm_clear_c0_guest_status(cop0, ST0_ERL); vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0); } else { kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n", vcpu->arch.pc); Loading Loading @@ -1528,13 +1528,25 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst, struct kvm_vcpu *vcpu) { enum emulation_result er = EMULATE_DO_MMIO; unsigned long curr_pc; u32 op, rt; u32 bytes; rt = inst.i_format.rt; op = inst.i_format.opcode; vcpu->arch.pending_load_cause = cause; /* * Find the resume PC now while we have safe and easy access to the * prior branch instruction, and save it for * kvm_mips_complete_mmio_load() to restore later. */ curr_pc = vcpu->arch.pc; er = update_pc(vcpu, cause); if (er == EMULATE_FAIL) return er; vcpu->arch.io_pc = vcpu->arch.pc; vcpu->arch.pc = curr_pc; vcpu->arch.io_gpr = rt; switch (op) { Loading Loading @@ -2494,9 +2506,8 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, goto done; } er = update_pc(vcpu, vcpu->arch.pending_load_cause); if (er == EMULATE_FAIL) return er; /* Restore saved resume PC */ vcpu->arch.pc = vcpu->arch.io_pc; switch (run->mmio.len) { case 4: Loading @@ -2518,11 +2529,6 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, break; } if (vcpu->arch.pending_load_cause & CAUSEF_BD) kvm_debug("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n", vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr, vcpu->mmio_needed); done: return er; } Loading arch/mips/kvm/mips.c +4 −1 Original line number Diff line number Diff line Loading @@ -426,7 +426,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, static void kvm_mips_check_asids(struct kvm_vcpu *vcpu) { struct mips_coproc *cop0 = vcpu->arch.cop0; int cpu = smp_processor_id(); int i, cpu = smp_processor_id(); unsigned int gasid; /* Loading @@ -442,6 +442,9 @@ static void kvm_mips_check_asids(struct kvm_vcpu *vcpu) vcpu); vcpu->arch.guest_user_asid[cpu] = vcpu->arch.guest_user_mm.context.asid[cpu]; for_each_possible_cpu(i) if (i != cpu) vcpu->arch.guest_user_asid[cpu] = 0; vcpu->arch.last_user_gasid = gasid; } } Loading arch/mips/kvm/mmu.c +0 −4 Original line number Diff line number Diff line Loading @@ -260,13 +260,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) if ((vcpu->arch.guest_user_asid[cpu] ^ asid_cache(cpu)) & asid_version_mask(cpu)) { u32 gasid = kvm_read_c0_guest_entryhi(vcpu->arch.cop0) & KVM_ENTRYHI_ASID; kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu); vcpu->arch.guest_user_asid[cpu] = vcpu->arch.guest_user_mm.context.asid[cpu]; vcpu->arch.last_user_gasid = gasid; newasid++; kvm_debug("[%d]: cpu_context: %#lx\n", cpu, Loading Loading
Documentation/virtual/kvm/locking.txt +11 −1 Original line number Diff line number Diff line Loading @@ -4,7 +4,17 @@ KVM Lock Overview 1. Acquisition Orders --------------------- (to be written) The acquisition orders for mutexes are as follows: - kvm->lock is taken outside vcpu->mutex - kvm->lock is taken outside kvm->slots_lock and kvm->irq_lock - kvm->slots_lock is taken outside kvm->irq_lock, though acquiring them together is quite rare. For spinlocks, kvm_lock is taken outside kvm->mmu_lock. Everything else is a leaf: no other lock is taken inside the critical sections. 2: Exception ------------ Loading
arch/mips/include/asm/kvm_host.h +4 −3 Original line number Diff line number Diff line Loading @@ -293,7 +293,10 @@ struct kvm_vcpu_arch { /* Host KSEG0 address of the EI/DI offset */ void *kseg0_commpage; u32 io_gpr; /* GPR used as IO source/target */ /* Resume PC after MMIO completion */ unsigned long io_pc; /* GPR used as IO source/target */ u32 io_gpr; struct hrtimer comparecount_timer; /* Count timer control KVM register */ Loading @@ -315,8 +318,6 @@ struct kvm_vcpu_arch { /* Bitmask of pending exceptions to be cleared */ unsigned long pending_exceptions_clr; u32 pending_load_cause; /* Save/Restore the entryhi register when are are preempted/scheduled back in */ unsigned long preempt_entryhi; Loading
arch/mips/kvm/emulate.c +19 −13 Original line number Diff line number Diff line Loading @@ -790,15 +790,15 @@ enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu) struct mips_coproc *cop0 = vcpu->arch.cop0; enum emulation_result er = EMULATE_DONE; if (kvm_read_c0_guest_status(cop0) & ST0_EXL) { if (kvm_read_c0_guest_status(cop0) & ST0_ERL) { kvm_clear_c0_guest_status(cop0, ST0_ERL); vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0); } else if (kvm_read_c0_guest_status(cop0) & ST0_EXL) { kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc, kvm_read_c0_guest_epc(cop0)); kvm_clear_c0_guest_status(cop0, ST0_EXL); vcpu->arch.pc = kvm_read_c0_guest_epc(cop0); } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) { kvm_clear_c0_guest_status(cop0, ST0_ERL); vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0); } else { kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n", vcpu->arch.pc); Loading Loading @@ -1528,13 +1528,25 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst, struct kvm_vcpu *vcpu) { enum emulation_result er = EMULATE_DO_MMIO; unsigned long curr_pc; u32 op, rt; u32 bytes; rt = inst.i_format.rt; op = inst.i_format.opcode; vcpu->arch.pending_load_cause = cause; /* * Find the resume PC now while we have safe and easy access to the * prior branch instruction, and save it for * kvm_mips_complete_mmio_load() to restore later. */ curr_pc = vcpu->arch.pc; er = update_pc(vcpu, cause); if (er == EMULATE_FAIL) return er; vcpu->arch.io_pc = vcpu->arch.pc; vcpu->arch.pc = curr_pc; vcpu->arch.io_gpr = rt; switch (op) { Loading Loading @@ -2494,9 +2506,8 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, goto done; } er = update_pc(vcpu, vcpu->arch.pending_load_cause); if (er == EMULATE_FAIL) return er; /* Restore saved resume PC */ vcpu->arch.pc = vcpu->arch.io_pc; switch (run->mmio.len) { case 4: Loading @@ -2518,11 +2529,6 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, break; } if (vcpu->arch.pending_load_cause & CAUSEF_BD) kvm_debug("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n", vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr, vcpu->mmio_needed); done: return er; } Loading
arch/mips/kvm/mips.c +4 −1 Original line number Diff line number Diff line Loading @@ -426,7 +426,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, static void kvm_mips_check_asids(struct kvm_vcpu *vcpu) { struct mips_coproc *cop0 = vcpu->arch.cop0; int cpu = smp_processor_id(); int i, cpu = smp_processor_id(); unsigned int gasid; /* Loading @@ -442,6 +442,9 @@ static void kvm_mips_check_asids(struct kvm_vcpu *vcpu) vcpu); vcpu->arch.guest_user_asid[cpu] = vcpu->arch.guest_user_mm.context.asid[cpu]; for_each_possible_cpu(i) if (i != cpu) vcpu->arch.guest_user_asid[cpu] = 0; vcpu->arch.last_user_gasid = gasid; } } Loading
arch/mips/kvm/mmu.c +0 −4 Original line number Diff line number Diff line Loading @@ -260,13 +260,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) if ((vcpu->arch.guest_user_asid[cpu] ^ asid_cache(cpu)) & asid_version_mask(cpu)) { u32 gasid = kvm_read_c0_guest_entryhi(vcpu->arch.cop0) & KVM_ENTRYHI_ASID; kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu); vcpu->arch.guest_user_asid[cpu] = vcpu->arch.guest_user_mm.context.asid[cpu]; vcpu->arch.last_user_gasid = gasid; newasid++; kvm_debug("[%d]: cpu_context: %#lx\n", cpu, Loading