Commit 2837dbce authored by Michael Ellerman's avatar Michael Ellerman
Browse files

Merge branch 'topic/ppc-kvm' into next

Merge our KVM topic branch to bring some KVM commits into next for wider
testing.
parents 87b626a6 a3800ef9
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -758,7 +758,7 @@ struct kvm_vcpu_arch {
	u8 prodded;
	u8 doorbell_request;
	u8 irq_pending; /* Used by XIVE to signal pending guest irqs */
	u32 last_inst;
	unsigned long last_inst;

	struct rcuwait wait;
	struct rcuwait *waitp;
@@ -818,7 +818,7 @@ struct kvm_vcpu_arch {
	u64 busy_stolen;
	u64 busy_preempt;

	u32 emul_inst;
	u64 emul_inst;

	u32 online;

+45 −19
Original line number Diff line number Diff line
@@ -28,6 +28,7 @@
#include <asm/xive.h>
#include <asm/cpu_has_feature.h>
#endif
#include <asm/inst.h>

/*
 * KVMPPC_INST_SW_BREAKPOINT is debug Instruction
@@ -84,7 +85,8 @@ extern int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu,
				int is_default_endian);

extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
				 enum instruction_fetch_type type, u32 *inst);
				 enum instruction_fetch_type type,
				 unsigned long *inst);

extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
		     bool data);
@@ -126,25 +128,34 @@ extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);

extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong flags);

extern void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu,
					    ulong srr1_flags);
extern void kvmppc_core_queue_syscall(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
extern void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu,
				      ulong srr1_flags);
extern void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu,
					ulong srr1_flags);
extern void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu,
					  ulong srr1_flags);
extern void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu,
					  ulong srr1_flags);
extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
                                       struct kvm_interrupt *irq);
extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, ulong dear_flags,
					ulong esr_flags);
extern void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
extern void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu,
					ulong dear_flags,
					ulong esr_flags);
extern void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
					   ulong srr1_flags,
					   ulong dar,
					   ulong dsisr);
extern void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
					   ulong esr_flags);
					   ulong srr1_flags);

extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu);
extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);

@@ -315,7 +326,7 @@ extern struct kvmppc_ops *kvmppc_hv_ops;
extern struct kvmppc_ops *kvmppc_pr_ops;

static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu,
				enum instruction_fetch_type type, u32 *inst)
				enum instruction_fetch_type type, ppc_inst_t *inst)
{
	int ret = EMULATE_DONE;
	u32 fetched_inst;
@@ -326,15 +337,30 @@ static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu,
		ret = kvmppc_load_last_inst(vcpu, type, &vcpu->arch.last_inst);

	/*  Write fetch_failed unswapped if the fetch failed */
	if (ret == EMULATE_DONE)
	if (ret != EMULATE_DONE) {
		*inst = ppc_inst(KVM_INST_FETCH_FAILED);
		return ret;
	}

#ifdef CONFIG_PPC64
	/* Is this a prefixed instruction? */
	if ((vcpu->arch.last_inst >> 32) != 0) {
		u32 prefix = vcpu->arch.last_inst >> 32;
		u32 suffix = vcpu->arch.last_inst;
		if (kvmppc_need_byteswap(vcpu)) {
			prefix = swab32(prefix);
			suffix = swab32(suffix);
		}
		*inst = ppc_inst_prefix(prefix, suffix);
		return EMULATE_DONE;
	}
#endif

	fetched_inst = kvmppc_need_byteswap(vcpu) ?
		swab32(vcpu->arch.last_inst) :
		vcpu->arch.last_inst;
	else
		fetched_inst = vcpu->arch.last_inst;

	*inst = fetched_inst;
	return ret;
	*inst = ppc_inst(fetched_inst);
	return EMULATE_DONE;
}

static inline bool is_kvmppc_hv_enabled(struct kvm *kvm)
+1 −0
Original line number Diff line number Diff line
@@ -417,6 +417,7 @@
#define   FSCR_DSCR	__MASK(FSCR_DSCR_LG)
#define   FSCR_INTR_CAUSE (ASM_CONST(0xFF) << 56)	/* interrupt cause */
#define SPRN_HFSCR	0xbe	/* HV=1 Facility Status & Control Register */
#define   HFSCR_PREFIX	__MASK(FSCR_PREFIX_LG)
#define   HFSCR_MSGP	__MASK(FSCR_MSGP_LG)
#define   HFSCR_TAR	__MASK(FSCR_TAR_LG)
#define   HFSCR_EBB	__MASK(FSCR_EBB_LG)
+43 −21
Original line number Diff line number Diff line
@@ -188,10 +188,10 @@ void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
}
EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio);

void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong flags)
void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong srr1_flags)
{
	/* might as well deliver this straight away */
	kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_MACHINE_CHECK, flags);
	kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_MACHINE_CHECK, srr1_flags);
}
EXPORT_SYMBOL_GPL(kvmppc_core_queue_machine_check);

@@ -201,29 +201,29 @@ void kvmppc_core_queue_syscall(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL(kvmppc_core_queue_syscall);

void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)
void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong srr1_flags)
{
	/* might as well deliver this straight away */
	kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, flags);
	kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, srr1_flags);
}
EXPORT_SYMBOL_GPL(kvmppc_core_queue_program);

void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu)
void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu, ulong srr1_flags)
{
	/* might as well deliver this straight away */
	kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, 0);
	kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, srr1_flags);
}

void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu)
void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu, ulong srr1_flags)
{
	/* might as well deliver this straight away */
	kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_ALTIVEC, 0);
	kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_ALTIVEC, srr1_flags);
}

void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu)
void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu, ulong srr1_flags)
{
	/* might as well deliver this straight away */
	kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_VSX, 0);
	kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_VSX, srr1_flags);
}

void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
@@ -278,18 +278,18 @@ void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
	kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
}

void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, ulong dar,
				    ulong flags)
void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, ulong srr1_flags,
				    ulong dar, ulong dsisr)
{
	kvmppc_set_dar(vcpu, dar);
	kvmppc_set_dsisr(vcpu, flags);
	kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE, 0);
	kvmppc_set_dsisr(vcpu, dsisr);
	kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE, srr1_flags);
}
EXPORT_SYMBOL_GPL(kvmppc_core_queue_data_storage);

void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong flags)
void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong srr1_flags)
{
	kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_INST_STORAGE, flags);
	kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_INST_STORAGE, srr1_flags);
}
EXPORT_SYMBOL_GPL(kvmppc_core_queue_inst_storage);

@@ -481,20 +481,42 @@ int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid,
	return r;
}

/*
 * Returns prefixed instructions with the prefix in the high 32 bits
 * of *inst and suffix in the low 32 bits.  This is the same convention
 * as used in HEIR, vcpu->arch.last_inst and vcpu->arch.emul_inst.
 * Like vcpu->arch.last_inst but unlike vcpu->arch.emul_inst, each
 * half of the value needs byte-swapping if the guest endianness is
 * different from the host endianness.
 */
int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
		enum instruction_fetch_type type, u32 *inst)
		enum instruction_fetch_type type, unsigned long *inst)
{
	ulong pc = kvmppc_get_pc(vcpu);
	int r;
	u32 iw;

	if (type == INST_SC)
		pc -= 4;

	r = kvmppc_ld(vcpu, &pc, sizeof(u32), inst, false);
	if (r == EMULATE_DONE)
		return r;
	else
	r = kvmppc_ld(vcpu, &pc, sizeof(u32), &iw, false);
	if (r != EMULATE_DONE)
		return EMULATE_AGAIN;
	/*
	 * If [H]SRR1 indicates that the instruction that caused the
	 * current interrupt is a prefixed instruction, get the suffix.
	 */
	if (kvmppc_get_msr(vcpu) & SRR1_PREFIXED) {
		u32 suffix;
		pc += 4;
		r = kvmppc_ld(vcpu, &pc, sizeof(u32), &suffix, false);
		if (r != EMULATE_DONE)
			return EMULATE_AGAIN;
		*inst = ((u64)iw << 32) | suffix;
	} else {
		*inst = iw;
	}
	return r;
}
EXPORT_SYMBOL_GPL(kvmppc_load_last_inst);

+20 −6
Original line number Diff line number Diff line
@@ -415,20 +415,25 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
 * embodied here.)  If the instruction isn't a load or store, then
 * this doesn't return anything useful.
 */
static int instruction_is_store(unsigned int instr)
static int instruction_is_store(ppc_inst_t instr)
{
	unsigned int mask;
	unsigned int suffix;

	mask = 0x10000000;
	if ((instr & 0xfc000000) == 0x7c000000)
	suffix = ppc_inst_val(instr);
	if (ppc_inst_prefixed(instr))
		suffix = ppc_inst_suffix(instr);
	else if ((suffix & 0xfc000000) == 0x7c000000)
		mask = 0x100;		/* major opcode 31 */
	return (instr & mask) != 0;
	return (suffix & mask) != 0;
}

int kvmppc_hv_emulate_mmio(struct kvm_vcpu *vcpu,
			   unsigned long gpa, gva_t ea, int is_store)
{
	u32 last_inst;
	ppc_inst_t last_inst;
	bool is_prefixed = !!(kvmppc_get_msr(vcpu) & SRR1_PREFIXED);

	/*
	 * Fast path - check if the guest physical address corresponds to a
@@ -443,7 +448,7 @@ int kvmppc_hv_emulate_mmio(struct kvm_vcpu *vcpu,
				       NULL);
		srcu_read_unlock(&vcpu->kvm->srcu, idx);
		if (!ret) {
			kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
			kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + (is_prefixed ? 8 : 4));
			return RESUME_GUEST;
		}
	}
@@ -458,7 +463,16 @@ int kvmppc_hv_emulate_mmio(struct kvm_vcpu *vcpu,
	/*
	 * WARNING: We do not know for sure whether the instruction we just
	 * read from memory is the same that caused the fault in the first
	 * place.  If the instruction we read is neither an load or a store,
	 * place.
	 *
	 * If the fault is prefixed but the instruction is not or vice
	 * versa, try again so that we don't advance pc the wrong amount.
	 */
	if (ppc_inst_prefixed(last_inst) != is_prefixed)
		return RESUME_GUEST;

	/*
	 * If the instruction we read is neither an load or a store,
	 * then it can't access memory, so we don't need to worry about
	 * enforcing access permissions.  So, assuming it is a load or
	 * store, we just check that its direction (load or store) is
Loading