Commit 7ec21d9d authored by Tianjia Zhang's avatar Tianjia Zhang Committed by Paul Mackerras
Browse files

KVM: PPC: Clean up redundant kvm_run parameters in assembly



In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu'
structure. For historical reasons, many kvm-related function parameters
retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time. This
patch does a unified cleanup of these remaining redundant parameters.

[paulus@ozlabs.org - Fixed places that were missed in book3s_interrupts.S]

Signed-off-by: default avatarTianjia Zhang <tianjia.zhang@linux.alibaba.com>
Signed-off-by: default avatarPaul Mackerras <paulus@ozlabs.org>
parent 1508c22f
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -59,7 +59,7 @@ enum xlate_readwrite {
};

extern int kvmppc_vcpu_run(struct kvm_vcpu *vcpu);
extern int __kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
extern int __kvmppc_vcpu_run(struct kvm_vcpu *vcpu);
extern void kvmppc_handler_highmem(void);

extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
+27 −29
Original line number Diff line number Diff line
@@ -55,8 +55,7 @@
 ****************************************************************************/

/* Registers:
 *  r3: kvm_run pointer
 *  r4: vcpu pointer
 *  r3: vcpu pointer
 */
_GLOBAL(__kvmppc_vcpu_run)

@@ -68,8 +67,8 @@ kvm_start_entry:
	/* Save host state to the stack */
	PPC_STLU r1, -SWITCH_FRAME_SIZE(r1)

	/* Save r3 (kvm_run) and r4 (vcpu) */
	SAVE_2GPRS(3, r1)
	/* Save r3 (vcpu) */
	SAVE_GPR(3, r1)

	/* Save non-volatile registers (r14 - r31) */
	SAVE_NVGPRS(r1)
@@ -82,47 +81,46 @@ kvm_start_entry:
	PPC_STL	r0, _LINK(r1)

	/* Load non-volatile guest state from the vcpu */
	VCPU_LOAD_NVGPRS(r4)
	VCPU_LOAD_NVGPRS(r3)

kvm_start_lightweight:
	/* Copy registers into shadow vcpu so we can access them in real mode */
	mr	r3, r4
	bl	FUNC(kvmppc_copy_to_svcpu)
	nop
	REST_GPR(4, r1)
	REST_GPR(3, r1)

#ifdef CONFIG_PPC_BOOK3S_64
	/* Get the dcbz32 flag */
	PPC_LL	r3, VCPU_HFLAGS(r4)
	rldicl	r3, r3, 0, 63		/* r3 &= 1 */
	stb	r3, HSTATE_RESTORE_HID5(r13)
	PPC_LL	r0, VCPU_HFLAGS(r3)
	rldicl	r0, r0, 0, 63		/* r3 &= 1 */
	stb	r0, HSTATE_RESTORE_HID5(r13)

	/* Load up guest SPRG3 value, since it's user readable */
	lwz	r3, VCPU_SHAREDBE(r4)
	cmpwi	r3, 0
	ld	r5, VCPU_SHARED(r4)
	lbz	r4, VCPU_SHAREDBE(r3)
	cmpwi	r4, 0
	ld	r5, VCPU_SHARED(r3)
	beq	sprg3_little_endian
sprg3_big_endian:
#ifdef __BIG_ENDIAN__
	ld	r3, VCPU_SHARED_SPRG3(r5)
	ld	r4, VCPU_SHARED_SPRG3(r5)
#else
	addi	r5, r5, VCPU_SHARED_SPRG3
	ldbrx	r3, 0, r5
	ldbrx	r4, 0, r5
#endif
	b	after_sprg3_load
sprg3_little_endian:
#ifdef __LITTLE_ENDIAN__
	ld	r3, VCPU_SHARED_SPRG3(r5)
	ld	r4, VCPU_SHARED_SPRG3(r5)
#else
	addi	r5, r5, VCPU_SHARED_SPRG3
	ldbrx	r3, 0, r5
	ldbrx	r4, 0, r5
#endif

after_sprg3_load:
	mtspr	SPRN_SPRG3, r3
	mtspr	SPRN_SPRG3, r4
#endif /* CONFIG_PPC_BOOK3S_64 */

	PPC_LL	r4, VCPU_SHADOW_MSR(r4)	/* get shadow_msr */
	PPC_LL	r4, VCPU_SHADOW_MSR(r3)	/* get shadow_msr */

	/* Jump to segment patching handler and into our guest */
	bl	FUNC(kvmppc_entry_trampoline)
@@ -146,7 +144,7 @@ after_sprg3_load:
	 *
	 */

	PPC_LL	r3, GPR4(r1)		/* vcpu pointer */
	PPC_LL	r3, GPR3(r1)		/* vcpu pointer */

	/*
	 * kvmppc_copy_from_svcpu can clobber volatile registers, save
@@ -169,7 +167,7 @@ after_sprg3_load:
#endif /* CONFIG_PPC_BOOK3S_64 */

	/* R7 = vcpu */
	PPC_LL	r7, GPR4(r1)
	PPC_LL	r7, GPR3(r1)

	PPC_STL	r14, VCPU_GPR(R14)(r7)
	PPC_STL	r15, VCPU_GPR(R15)(r7)
@@ -190,11 +188,11 @@ after_sprg3_load:
	PPC_STL	r30, VCPU_GPR(R30)(r7)
	PPC_STL	r31, VCPU_GPR(R31)(r7)

	/* Pass the exit number as 3rd argument to kvmppc_handle_exit */
	lwz	r5, VCPU_TRAP(r7)
	/* Pass the exit number as 2nd argument to kvmppc_handle_exit */
	lwz	r4, VCPU_TRAP(r7)

	/* Restore r3 (kvm_run) and r4 (vcpu) */
	REST_2GPRS(3, r1)
	/* Restore r3 (vcpu) */
	REST_GPR(3, r1)
	bl	FUNC(kvmppc_handle_exit_pr)

	/* If RESUME_GUEST, get back in the loop */
@@ -223,11 +221,11 @@ kvm_loop_heavyweight:
	PPC_LL	r4, _LINK(r1)
	PPC_STL r4, (PPC_LR_STKOFF + SWITCH_FRAME_SIZE)(r1)

	/* Load vcpu and cpu_run */
	REST_2GPRS(3, r1)
	/* Load vcpu */
	REST_GPR(3, r1)

	/* Load non-volatile guest state from the vcpu */
	VCPU_LOAD_NVGPRS(r4)
	VCPU_LOAD_NVGPRS(r3)

	/* Jump back into the beginning of this function */
	b	kvm_start_lightweight
@@ -235,7 +233,7 @@ kvm_loop_heavyweight:
kvm_loop_lightweight:

	/* We'll need the vcpu pointer */
	REST_GPR(4, r1)
	REST_GPR(3, r1)

	/* Jump back into the beginning of this function */
	b	kvm_start_lightweight
+4 −5
Original line number Diff line number Diff line
@@ -1151,9 +1151,9 @@ static int kvmppc_exit_pr_progint(struct kvm_vcpu *vcpu, unsigned int exit_nr)
	return r;
}

int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
			  unsigned int exit_nr)
int kvmppc_handle_exit_pr(struct kvm_vcpu *vcpu, unsigned int exit_nr)
{
	struct kvm_run *run = vcpu->run;
	int r = RESUME_HOST;
	int s;

@@ -1826,7 +1826,6 @@ static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)

static int kvmppc_vcpu_run_pr(struct kvm_vcpu *vcpu)
{
	struct kvm_run *run = vcpu->run;
	int ret;
#ifdef CONFIG_ALTIVEC
	unsigned long uninitialized_var(vrsave);
@@ -1834,7 +1833,7 @@ static int kvmppc_vcpu_run_pr(struct kvm_vcpu *vcpu)

	/* Check if we can run the vcpu at all */
	if (!vcpu->arch.sane) {
		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
		ret = -EINVAL;
		goto out;
	}
@@ -1861,7 +1860,7 @@ static int kvmppc_vcpu_run_pr(struct kvm_vcpu *vcpu)

	kvmppc_fix_ee_before_entry();

	ret = __kvmppc_vcpu_run(run, vcpu);
	ret = __kvmppc_vcpu_run(vcpu);

	kvmppc_clear_debug(vcpu);

+4 −5
Original line number Diff line number Diff line
@@ -731,12 +731,11 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)

int kvmppc_vcpu_run(struct kvm_vcpu *vcpu)
{
	struct kvm_run *run = vcpu->run;
	int ret, s;
	struct debug_reg debug;

	if (!vcpu->arch.sane) {
		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
		return -EINVAL;
	}

@@ -778,7 +777,7 @@ int kvmppc_vcpu_run(struct kvm_vcpu *vcpu)
	vcpu->arch.pgdir = vcpu->kvm->mm->pgd;
	kvmppc_fix_ee_before_entry();

	ret = __kvmppc_vcpu_run(run, vcpu);
	ret = __kvmppc_vcpu_run(vcpu);

	/* No need for guest_exit. It's done in handle_exit.
	   We also get here with interrupts enabled. */
@@ -982,9 +981,9 @@ static int kvmppc_resume_inst_load(struct kvm_vcpu *vcpu,
 *
 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
 */
int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
                       unsigned int exit_nr)
int kvmppc_handle_exit(struct kvm_vcpu *vcpu, unsigned int exit_nr)
{
	struct kvm_run *run = vcpu->run;
	int r = RESUME_HOST;
	int s;
	int idx;
+4 −5
Original line number Diff line number Diff line
@@ -237,7 +237,7 @@ _GLOBAL(kvmppc_resume_host)
	/* Switch to kernel stack and jump to handler. */
	LOAD_REG_ADDR(r3, kvmppc_handle_exit)
	mtctr	r3
	lwz	r3, HOST_RUN(r1)
	mr	r3, r4
	lwz	r2, HOST_R2(r1)
	mr	r14, r4 /* Save vcpu pointer. */

@@ -337,15 +337,14 @@ heavyweight_exit:


/* Registers:
 *  r3: kvm_run pointer
 *  r4: vcpu pointer
 *  r3: vcpu pointer
 */
_GLOBAL(__kvmppc_vcpu_run)
	stwu	r1, -HOST_STACK_SIZE(r1)
	stw	r1, VCPU_HOST_STACK(r4)	/* Save stack pointer to vcpu. */
	stw	r1, VCPU_HOST_STACK(r3)	/* Save stack pointer to vcpu. */

	/* Save host state to stack. */
	stw	r3, HOST_RUN(r1)
	mr	r4, r3
	mflr	r3
	stw	r3, HOST_STACK_LR(r1)
	mfcr	r5
Loading