Loading arch/arm/include/asm/kvm_emulate.h +5 −0 Original line number Diff line number Diff line Loading @@ -157,4 +157,9 @@ static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu) return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK; } static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu) { return vcpu->arch.cp15[c0_MPIDR]; } #endif /* __ARM_KVM_EMULATE_H__ */ arch/arm/kvm/coproc.c +10 −4 Original line number Diff line number Diff line Loading @@ -74,11 +74,13 @@ int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run) static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) { /* * Compute guest MPIDR. No need to mess around with different clusters * but we read the 'U' bit from the underlying hardware directly. * Compute guest MPIDR. We build a virtual cluster out of the * vcpu_id, but we read the 'U' bit from the underlying * hardware directly. */ vcpu->arch.cp15[c0_MPIDR] = (read_cpuid_mpidr() & MPIDR_SMP_BITMASK) | vcpu->vcpu_id; vcpu->arch.cp15[c0_MPIDR] = ((read_cpuid_mpidr() & MPIDR_SMP_BITMASK) | ((vcpu->vcpu_id >> 2) << MPIDR_LEVEL_BITS) | (vcpu->vcpu_id & 3)); } /* TRM entries A7:4.3.31 A15:4.3.28 - RO WI */ Loading Loading @@ -122,6 +124,10 @@ static void reset_l2ctlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) asm volatile("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr)); l2ctlr &= ~(3 << 24); ncores = atomic_read(&vcpu->kvm->online_vcpus) - 1; /* How many cores in the current cluster and the next ones */ ncores -= (vcpu->vcpu_id & ~3); /* Cap it to the maximum number of cores in a single cluster */ ncores = min(ncores, 3U); l2ctlr |= (ncores & 3) << 24; vcpu->arch.cp15[c9_L2CTLR] = l2ctlr; Loading arch/arm/kvm/psci.c +13 −4 Original line number Diff line number Diff line Loading @@ -18,6 +18,7 @@ #include <linux/kvm_host.h> #include <linux/wait.h> #include <asm/cputype.h> #include <asm/kvm_emulate.h> #include <asm/kvm_psci.h> Loading @@ -34,22 +35,30 @@ static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu) static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) { struct kvm *kvm = source_vcpu->kvm; struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu = NULL, *tmp; wait_queue_head_t *wq; unsigned long cpu_id; unsigned long mpidr; phys_addr_t target_pc; int i; cpu_id = *vcpu_reg(source_vcpu, 1); if (vcpu_mode_is_32bit(source_vcpu)) cpu_id &= ~((u32) 0); if (cpu_id >= atomic_read(&kvm->online_vcpus)) kvm_for_each_vcpu(i, tmp, kvm) { mpidr = kvm_vcpu_get_mpidr(tmp); if ((mpidr & MPIDR_HWID_BITMASK) == (cpu_id & MPIDR_HWID_BITMASK)) { vcpu = tmp; break; } } if (!vcpu) return KVM_PSCI_RET_INVAL; target_pc = *vcpu_reg(source_vcpu, 2); vcpu = kvm_get_vcpu(kvm, cpu_id); wq = kvm_arch_vcpu_wq(vcpu); if (!waitqueue_active(wq)) return KVM_PSCI_RET_INVAL; Loading arch/arm/kvm/reset.c +0 −4 Original line number Diff line number Diff line Loading @@ -33,8 +33,6 @@ * Cortex-A15 and Cortex-A7 Reset Values */ static const int cortexa_max_cpu_idx = 3; static struct kvm_regs cortexa_regs_reset = { .usr_regs.ARM_cpsr = SVC_MODE | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT, }; Loading Loading @@ -64,8 +62,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) switch (vcpu->arch.target) { case KVM_ARM_TARGET_CORTEX_A7: case KVM_ARM_TARGET_CORTEX_A15: if (vcpu->vcpu_id > cortexa_max_cpu_idx) return -EINVAL; reset_regs = &cortexa_regs_reset; vcpu->arch.midr = read_cpuid_id(); cpu_vtimer_irq = &cortexa_vtimer_irq; Loading arch/arm64/include/asm/kvm_emulate.h +5 −0 Original line number Diff line number Diff line Loading @@ -177,4 +177,9 @@ static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu) return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC_TYPE; } static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu) { return vcpu_sys_reg(vcpu, MPIDR_EL1); } #endif /* __ARM64_KVM_EMULATE_H__ */ Loading
arch/arm/include/asm/kvm_emulate.h +5 −0 Original line number Diff line number Diff line Loading @@ -157,4 +157,9 @@ static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu) return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK; } static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu) { return vcpu->arch.cp15[c0_MPIDR]; } #endif /* __ARM_KVM_EMULATE_H__ */
arch/arm/kvm/coproc.c +10 −4 Original line number Diff line number Diff line Loading @@ -74,11 +74,13 @@ int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run) static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) { /* * Compute guest MPIDR. No need to mess around with different clusters * but we read the 'U' bit from the underlying hardware directly. * Compute guest MPIDR. We build a virtual cluster out of the * vcpu_id, but we read the 'U' bit from the underlying * hardware directly. */ vcpu->arch.cp15[c0_MPIDR] = (read_cpuid_mpidr() & MPIDR_SMP_BITMASK) | vcpu->vcpu_id; vcpu->arch.cp15[c0_MPIDR] = ((read_cpuid_mpidr() & MPIDR_SMP_BITMASK) | ((vcpu->vcpu_id >> 2) << MPIDR_LEVEL_BITS) | (vcpu->vcpu_id & 3)); } /* TRM entries A7:4.3.31 A15:4.3.28 - RO WI */ Loading Loading @@ -122,6 +124,10 @@ static void reset_l2ctlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) asm volatile("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr)); l2ctlr &= ~(3 << 24); ncores = atomic_read(&vcpu->kvm->online_vcpus) - 1; /* How many cores in the current cluster and the next ones */ ncores -= (vcpu->vcpu_id & ~3); /* Cap it to the maximum number of cores in a single cluster */ ncores = min(ncores, 3U); l2ctlr |= (ncores & 3) << 24; vcpu->arch.cp15[c9_L2CTLR] = l2ctlr; Loading
arch/arm/kvm/psci.c +13 −4 Original line number Diff line number Diff line Loading @@ -18,6 +18,7 @@ #include <linux/kvm_host.h> #include <linux/wait.h> #include <asm/cputype.h> #include <asm/kvm_emulate.h> #include <asm/kvm_psci.h> Loading @@ -34,22 +35,30 @@ static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu) static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) { struct kvm *kvm = source_vcpu->kvm; struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu = NULL, *tmp; wait_queue_head_t *wq; unsigned long cpu_id; unsigned long mpidr; phys_addr_t target_pc; int i; cpu_id = *vcpu_reg(source_vcpu, 1); if (vcpu_mode_is_32bit(source_vcpu)) cpu_id &= ~((u32) 0); if (cpu_id >= atomic_read(&kvm->online_vcpus)) kvm_for_each_vcpu(i, tmp, kvm) { mpidr = kvm_vcpu_get_mpidr(tmp); if ((mpidr & MPIDR_HWID_BITMASK) == (cpu_id & MPIDR_HWID_BITMASK)) { vcpu = tmp; break; } } if (!vcpu) return KVM_PSCI_RET_INVAL; target_pc = *vcpu_reg(source_vcpu, 2); vcpu = kvm_get_vcpu(kvm, cpu_id); wq = kvm_arch_vcpu_wq(vcpu); if (!waitqueue_active(wq)) return KVM_PSCI_RET_INVAL; Loading
arch/arm/kvm/reset.c +0 −4 Original line number Diff line number Diff line Loading @@ -33,8 +33,6 @@ * Cortex-A15 and Cortex-A7 Reset Values */ static const int cortexa_max_cpu_idx = 3; static struct kvm_regs cortexa_regs_reset = { .usr_regs.ARM_cpsr = SVC_MODE | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT, }; Loading Loading @@ -64,8 +62,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) switch (vcpu->arch.target) { case KVM_ARM_TARGET_CORTEX_A7: case KVM_ARM_TARGET_CORTEX_A15: if (vcpu->vcpu_id > cortexa_max_cpu_idx) return -EINVAL; reset_regs = &cortexa_regs_reset; vcpu->arch.midr = read_cpuid_id(); cpu_vtimer_irq = &cortexa_vtimer_irq; Loading
arch/arm64/include/asm/kvm_emulate.h +5 −0 Original line number Diff line number Diff line Loading @@ -177,4 +177,9 @@ static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu) return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC_TYPE; } static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu) { return vcpu_sys_reg(vcpu, MPIDR_EL1); } #endif /* __ARM64_KVM_EMULATE_H__ */