Commit 34bde9d8 authored by Anup Patel's avatar Anup Patel Committed by Anup Patel
Browse files

RISC-V: KVM: Implement VCPU world-switch



This patch implements the VCPU world-switch for KVM RISC-V.

The KVM RISC-V world-switch (i.e. __kvm_riscv_switch_to()) mostly
switches general purpose registers, SSTATUS, STVEC, SSCRATCH and
HSTATUS CSRs. Other CSRs are switched via vcpu_load() and vcpu_put()
interface in kvm_arch_vcpu_load() and kvm_arch_vcpu_put() functions
respectively.

Signed-off-by: default avatarAnup Patel <anup.patel@wdc.com>
Acked-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Reviewed-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Reviewed-by: default avatarAlexander Graf <graf@amazon.com>
Acked-by: default avatarPalmer Dabbelt <palmerdabbelt@google.com>
parent 92ad8200
Loading
Loading
Loading
Loading
+9 −1
Original line number Diff line number Diff line
@@ -115,6 +115,14 @@ struct kvm_vcpu_arch {
	/* ISA feature bits (similar to MISA) */
	unsigned long isa;

	/* SSCRATCH, STVEC, and SCOUNTEREN of Host */
	unsigned long host_sscratch;
	unsigned long host_stvec;
	unsigned long host_scounteren;

	/* CPU context of Host */
	struct kvm_cpu_context host_context;

	/* CPU context of Guest VCPU */
	struct kvm_cpu_context guest_context;

@@ -163,7 +171,7 @@ int kvm_riscv_vcpu_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
			struct kvm_cpu_trap *trap);

static inline void __kvm_riscv_switch_to(struct kvm_vcpu_arch *vcpu_arch) {}
void __kvm_riscv_switch_to(struct kvm_vcpu_arch *vcpu_arch);

int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq);
int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq);
+78 −0
Original line number Diff line number Diff line
@@ -7,7 +7,9 @@
#define GENERATING_ASM_OFFSETS

#include <linux/kbuild.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <asm/kvm_host.h>
#include <asm/thread_info.h>
#include <asm/ptrace.h>

@@ -111,6 +113,82 @@ void asm_offsets(void)
	OFFSET(PT_BADADDR, pt_regs, badaddr);
	OFFSET(PT_CAUSE, pt_regs, cause);

	OFFSET(KVM_ARCH_GUEST_ZERO, kvm_vcpu_arch, guest_context.zero);
	OFFSET(KVM_ARCH_GUEST_RA, kvm_vcpu_arch, guest_context.ra);
	OFFSET(KVM_ARCH_GUEST_SP, kvm_vcpu_arch, guest_context.sp);
	OFFSET(KVM_ARCH_GUEST_GP, kvm_vcpu_arch, guest_context.gp);
	OFFSET(KVM_ARCH_GUEST_TP, kvm_vcpu_arch, guest_context.tp);
	OFFSET(KVM_ARCH_GUEST_T0, kvm_vcpu_arch, guest_context.t0);
	OFFSET(KVM_ARCH_GUEST_T1, kvm_vcpu_arch, guest_context.t1);
	OFFSET(KVM_ARCH_GUEST_T2, kvm_vcpu_arch, guest_context.t2);
	OFFSET(KVM_ARCH_GUEST_S0, kvm_vcpu_arch, guest_context.s0);
	OFFSET(KVM_ARCH_GUEST_S1, kvm_vcpu_arch, guest_context.s1);
	OFFSET(KVM_ARCH_GUEST_A0, kvm_vcpu_arch, guest_context.a0);
	OFFSET(KVM_ARCH_GUEST_A1, kvm_vcpu_arch, guest_context.a1);
	OFFSET(KVM_ARCH_GUEST_A2, kvm_vcpu_arch, guest_context.a2);
	OFFSET(KVM_ARCH_GUEST_A3, kvm_vcpu_arch, guest_context.a3);
	OFFSET(KVM_ARCH_GUEST_A4, kvm_vcpu_arch, guest_context.a4);
	OFFSET(KVM_ARCH_GUEST_A5, kvm_vcpu_arch, guest_context.a5);
	OFFSET(KVM_ARCH_GUEST_A6, kvm_vcpu_arch, guest_context.a6);
	OFFSET(KVM_ARCH_GUEST_A7, kvm_vcpu_arch, guest_context.a7);
	OFFSET(KVM_ARCH_GUEST_S2, kvm_vcpu_arch, guest_context.s2);
	OFFSET(KVM_ARCH_GUEST_S3, kvm_vcpu_arch, guest_context.s3);
	OFFSET(KVM_ARCH_GUEST_S4, kvm_vcpu_arch, guest_context.s4);
	OFFSET(KVM_ARCH_GUEST_S5, kvm_vcpu_arch, guest_context.s5);
	OFFSET(KVM_ARCH_GUEST_S6, kvm_vcpu_arch, guest_context.s6);
	OFFSET(KVM_ARCH_GUEST_S7, kvm_vcpu_arch, guest_context.s7);
	OFFSET(KVM_ARCH_GUEST_S8, kvm_vcpu_arch, guest_context.s8);
	OFFSET(KVM_ARCH_GUEST_S9, kvm_vcpu_arch, guest_context.s9);
	OFFSET(KVM_ARCH_GUEST_S10, kvm_vcpu_arch, guest_context.s10);
	OFFSET(KVM_ARCH_GUEST_S11, kvm_vcpu_arch, guest_context.s11);
	OFFSET(KVM_ARCH_GUEST_T3, kvm_vcpu_arch, guest_context.t3);
	OFFSET(KVM_ARCH_GUEST_T4, kvm_vcpu_arch, guest_context.t4);
	OFFSET(KVM_ARCH_GUEST_T5, kvm_vcpu_arch, guest_context.t5);
	OFFSET(KVM_ARCH_GUEST_T6, kvm_vcpu_arch, guest_context.t6);
	OFFSET(KVM_ARCH_GUEST_SEPC, kvm_vcpu_arch, guest_context.sepc);
	OFFSET(KVM_ARCH_GUEST_SSTATUS, kvm_vcpu_arch, guest_context.sstatus);
	OFFSET(KVM_ARCH_GUEST_HSTATUS, kvm_vcpu_arch, guest_context.hstatus);
	OFFSET(KVM_ARCH_GUEST_SCOUNTEREN, kvm_vcpu_arch, guest_csr.scounteren);

	OFFSET(KVM_ARCH_HOST_ZERO, kvm_vcpu_arch, host_context.zero);
	OFFSET(KVM_ARCH_HOST_RA, kvm_vcpu_arch, host_context.ra);
	OFFSET(KVM_ARCH_HOST_SP, kvm_vcpu_arch, host_context.sp);
	OFFSET(KVM_ARCH_HOST_GP, kvm_vcpu_arch, host_context.gp);
	OFFSET(KVM_ARCH_HOST_TP, kvm_vcpu_arch, host_context.tp);
	OFFSET(KVM_ARCH_HOST_T0, kvm_vcpu_arch, host_context.t0);
	OFFSET(KVM_ARCH_HOST_T1, kvm_vcpu_arch, host_context.t1);
	OFFSET(KVM_ARCH_HOST_T2, kvm_vcpu_arch, host_context.t2);
	OFFSET(KVM_ARCH_HOST_S0, kvm_vcpu_arch, host_context.s0);
	OFFSET(KVM_ARCH_HOST_S1, kvm_vcpu_arch, host_context.s1);
	OFFSET(KVM_ARCH_HOST_A0, kvm_vcpu_arch, host_context.a0);
	OFFSET(KVM_ARCH_HOST_A1, kvm_vcpu_arch, host_context.a1);
	OFFSET(KVM_ARCH_HOST_A2, kvm_vcpu_arch, host_context.a2);
	OFFSET(KVM_ARCH_HOST_A3, kvm_vcpu_arch, host_context.a3);
	OFFSET(KVM_ARCH_HOST_A4, kvm_vcpu_arch, host_context.a4);
	OFFSET(KVM_ARCH_HOST_A5, kvm_vcpu_arch, host_context.a5);
	OFFSET(KVM_ARCH_HOST_A6, kvm_vcpu_arch, host_context.a6);
	OFFSET(KVM_ARCH_HOST_A7, kvm_vcpu_arch, host_context.a7);
	OFFSET(KVM_ARCH_HOST_S2, kvm_vcpu_arch, host_context.s2);
	OFFSET(KVM_ARCH_HOST_S3, kvm_vcpu_arch, host_context.s3);
	OFFSET(KVM_ARCH_HOST_S4, kvm_vcpu_arch, host_context.s4);
	OFFSET(KVM_ARCH_HOST_S5, kvm_vcpu_arch, host_context.s5);
	OFFSET(KVM_ARCH_HOST_S6, kvm_vcpu_arch, host_context.s6);
	OFFSET(KVM_ARCH_HOST_S7, kvm_vcpu_arch, host_context.s7);
	OFFSET(KVM_ARCH_HOST_S8, kvm_vcpu_arch, host_context.s8);
	OFFSET(KVM_ARCH_HOST_S9, kvm_vcpu_arch, host_context.s9);
	OFFSET(KVM_ARCH_HOST_S10, kvm_vcpu_arch, host_context.s10);
	OFFSET(KVM_ARCH_HOST_S11, kvm_vcpu_arch, host_context.s11);
	OFFSET(KVM_ARCH_HOST_T3, kvm_vcpu_arch, host_context.t3);
	OFFSET(KVM_ARCH_HOST_T4, kvm_vcpu_arch, host_context.t4);
	OFFSET(KVM_ARCH_HOST_T5, kvm_vcpu_arch, host_context.t5);
	OFFSET(KVM_ARCH_HOST_T6, kvm_vcpu_arch, host_context.t6);
	OFFSET(KVM_ARCH_HOST_SEPC, kvm_vcpu_arch, host_context.sepc);
	OFFSET(KVM_ARCH_HOST_SSTATUS, kvm_vcpu_arch, host_context.sstatus);
	OFFSET(KVM_ARCH_HOST_HSTATUS, kvm_vcpu_arch, host_context.hstatus);
	OFFSET(KVM_ARCH_HOST_SSCRATCH, kvm_vcpu_arch, host_sscratch);
	OFFSET(KVM_ARCH_HOST_STVEC, kvm_vcpu_arch, host_stvec);
	OFFSET(KVM_ARCH_HOST_SCOUNTEREN, kvm_vcpu_arch, host_scounteren);

	/*
	 * THREAD_{F,X}* might be larger than a S-type offset can handle, but
	 * these are used in performance-sensitive assembly so we can't resort
+1 −1
Original line number Diff line number Diff line
@@ -10,4 +10,4 @@ KVM := ../../../virt/kvm
obj-$(CONFIG_KVM) += kvm.o

kvm-y += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/binary_stats.o \
	 main.o vm.o mmu.o vcpu.o vcpu_exit.o
	 main.o vm.o mmu.o vcpu.o vcpu_exit.o vcpu_switch.o
+28 −2
Original line number Diff line number Diff line
@@ -565,14 +565,40 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,

void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
	/* TODO: */
	struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;

	csr_write(CSR_VSSTATUS, csr->vsstatus);
	csr_write(CSR_VSIE, csr->vsie);
	csr_write(CSR_VSTVEC, csr->vstvec);
	csr_write(CSR_VSSCRATCH, csr->vsscratch);
	csr_write(CSR_VSEPC, csr->vsepc);
	csr_write(CSR_VSCAUSE, csr->vscause);
	csr_write(CSR_VSTVAL, csr->vstval);
	csr_write(CSR_HVIP, csr->hvip);
	csr_write(CSR_VSATP, csr->vsatp);

	kvm_riscv_stage2_update_hgatp(vcpu);

	vcpu->cpu = cpu;
}

void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
	/* TODO: */
	struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;

	vcpu->cpu = -1;

	csr_write(CSR_HGATP, 0);

	csr->vsstatus = csr_read(CSR_VSSTATUS);
	csr->vsie = csr_read(CSR_VSIE);
	csr->vstvec = csr_read(CSR_VSTVEC);
	csr->vsscratch = csr_read(CSR_VSSCRATCH);
	csr->vsepc = csr_read(CSR_VSEPC);
	csr->vscause = csr_read(CSR_VSCAUSE);
	csr->vstval = csr_read(CSR_VSTVAL);
	csr->hvip = csr_read(CSR_HVIP);
	csr->vsatp = csr_read(CSR_VSATP);
}

static void kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu)
+203 −0
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0 */
/*
 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
 *
 * Authors:
 *     Anup Patel <anup.patel@wdc.com>
 */

#include <linux/linkage.h>
#include <asm/asm.h>
#include <asm/asm-offsets.h>
#include <asm/csr.h>

	.text
	.altmacro
	.option norelax

ENTRY(__kvm_riscv_switch_to)
	/* Save Host GPRs (except A0 and T0-T6) */
	REG_S	ra, (KVM_ARCH_HOST_RA)(a0)
	REG_S	sp, (KVM_ARCH_HOST_SP)(a0)
	REG_S	gp, (KVM_ARCH_HOST_GP)(a0)
	REG_S	tp, (KVM_ARCH_HOST_TP)(a0)
	REG_S	s0, (KVM_ARCH_HOST_S0)(a0)
	REG_S	s1, (KVM_ARCH_HOST_S1)(a0)
	REG_S	a1, (KVM_ARCH_HOST_A1)(a0)
	REG_S	a2, (KVM_ARCH_HOST_A2)(a0)
	REG_S	a3, (KVM_ARCH_HOST_A3)(a0)
	REG_S	a4, (KVM_ARCH_HOST_A4)(a0)
	REG_S	a5, (KVM_ARCH_HOST_A5)(a0)
	REG_S	a6, (KVM_ARCH_HOST_A6)(a0)
	REG_S	a7, (KVM_ARCH_HOST_A7)(a0)
	REG_S	s2, (KVM_ARCH_HOST_S2)(a0)
	REG_S	s3, (KVM_ARCH_HOST_S3)(a0)
	REG_S	s4, (KVM_ARCH_HOST_S4)(a0)
	REG_S	s5, (KVM_ARCH_HOST_S5)(a0)
	REG_S	s6, (KVM_ARCH_HOST_S6)(a0)
	REG_S	s7, (KVM_ARCH_HOST_S7)(a0)
	REG_S	s8, (KVM_ARCH_HOST_S8)(a0)
	REG_S	s9, (KVM_ARCH_HOST_S9)(a0)
	REG_S	s10, (KVM_ARCH_HOST_S10)(a0)
	REG_S	s11, (KVM_ARCH_HOST_S11)(a0)

	/* Save Host and Restore Guest SSTATUS */
	REG_L	t0, (KVM_ARCH_GUEST_SSTATUS)(a0)
	csrrw	t0, CSR_SSTATUS, t0
	REG_S	t0, (KVM_ARCH_HOST_SSTATUS)(a0)

	/* Save Host and Restore Guest HSTATUS */
	REG_L	t1, (KVM_ARCH_GUEST_HSTATUS)(a0)
	csrrw	t1, CSR_HSTATUS, t1
	REG_S	t1, (KVM_ARCH_HOST_HSTATUS)(a0)

	/* Save Host and Restore Guest SCOUNTEREN */
	REG_L	t2, (KVM_ARCH_GUEST_SCOUNTEREN)(a0)
	csrrw	t2, CSR_SCOUNTEREN, t2
	REG_S	t2, (KVM_ARCH_HOST_SCOUNTEREN)(a0)

	/* Save Host SSCRATCH and change it to struct kvm_vcpu_arch pointer */
	csrrw	t3, CSR_SSCRATCH, a0
	REG_S	t3, (KVM_ARCH_HOST_SSCRATCH)(a0)

	/* Save Host STVEC and change it to return path */
	la	t4, __kvm_switch_return
	csrrw	t4, CSR_STVEC, t4
	REG_S	t4, (KVM_ARCH_HOST_STVEC)(a0)

	/* Restore Guest SEPC */
	REG_L	t0, (KVM_ARCH_GUEST_SEPC)(a0)
	csrw	CSR_SEPC, t0

	/* Restore Guest GPRs (except A0) */
	REG_L	ra, (KVM_ARCH_GUEST_RA)(a0)
	REG_L	sp, (KVM_ARCH_GUEST_SP)(a0)
	REG_L	gp, (KVM_ARCH_GUEST_GP)(a0)
	REG_L	tp, (KVM_ARCH_GUEST_TP)(a0)
	REG_L	t0, (KVM_ARCH_GUEST_T0)(a0)
	REG_L	t1, (KVM_ARCH_GUEST_T1)(a0)
	REG_L	t2, (KVM_ARCH_GUEST_T2)(a0)
	REG_L	s0, (KVM_ARCH_GUEST_S0)(a0)
	REG_L	s1, (KVM_ARCH_GUEST_S1)(a0)
	REG_L	a1, (KVM_ARCH_GUEST_A1)(a0)
	REG_L	a2, (KVM_ARCH_GUEST_A2)(a0)
	REG_L	a3, (KVM_ARCH_GUEST_A3)(a0)
	REG_L	a4, (KVM_ARCH_GUEST_A4)(a0)
	REG_L	a5, (KVM_ARCH_GUEST_A5)(a0)
	REG_L	a6, (KVM_ARCH_GUEST_A6)(a0)
	REG_L	a7, (KVM_ARCH_GUEST_A7)(a0)
	REG_L	s2, (KVM_ARCH_GUEST_S2)(a0)
	REG_L	s3, (KVM_ARCH_GUEST_S3)(a0)
	REG_L	s4, (KVM_ARCH_GUEST_S4)(a0)
	REG_L	s5, (KVM_ARCH_GUEST_S5)(a0)
	REG_L	s6, (KVM_ARCH_GUEST_S6)(a0)
	REG_L	s7, (KVM_ARCH_GUEST_S7)(a0)
	REG_L	s8, (KVM_ARCH_GUEST_S8)(a0)
	REG_L	s9, (KVM_ARCH_GUEST_S9)(a0)
	REG_L	s10, (KVM_ARCH_GUEST_S10)(a0)
	REG_L	s11, (KVM_ARCH_GUEST_S11)(a0)
	REG_L	t3, (KVM_ARCH_GUEST_T3)(a0)
	REG_L	t4, (KVM_ARCH_GUEST_T4)(a0)
	REG_L	t5, (KVM_ARCH_GUEST_T5)(a0)
	REG_L	t6, (KVM_ARCH_GUEST_T6)(a0)

	/* Restore Guest A0 */
	REG_L	a0, (KVM_ARCH_GUEST_A0)(a0)

	/* Resume Guest */
	sret

	/* Back to Host */
	.align 2
__kvm_switch_return:
	/* Swap Guest A0 with SSCRATCH */
	csrrw	a0, CSR_SSCRATCH, a0

	/* Save Guest GPRs (except A0) */
	REG_S	ra, (KVM_ARCH_GUEST_RA)(a0)
	REG_S	sp, (KVM_ARCH_GUEST_SP)(a0)
	REG_S	gp, (KVM_ARCH_GUEST_GP)(a0)
	REG_S	tp, (KVM_ARCH_GUEST_TP)(a0)
	REG_S	t0, (KVM_ARCH_GUEST_T0)(a0)
	REG_S	t1, (KVM_ARCH_GUEST_T1)(a0)
	REG_S	t2, (KVM_ARCH_GUEST_T2)(a0)
	REG_S	s0, (KVM_ARCH_GUEST_S0)(a0)
	REG_S	s1, (KVM_ARCH_GUEST_S1)(a0)
	REG_S	a1, (KVM_ARCH_GUEST_A1)(a0)
	REG_S	a2, (KVM_ARCH_GUEST_A2)(a0)
	REG_S	a3, (KVM_ARCH_GUEST_A3)(a0)
	REG_S	a4, (KVM_ARCH_GUEST_A4)(a0)
	REG_S	a5, (KVM_ARCH_GUEST_A5)(a0)
	REG_S	a6, (KVM_ARCH_GUEST_A6)(a0)
	REG_S	a7, (KVM_ARCH_GUEST_A7)(a0)
	REG_S	s2, (KVM_ARCH_GUEST_S2)(a0)
	REG_S	s3, (KVM_ARCH_GUEST_S3)(a0)
	REG_S	s4, (KVM_ARCH_GUEST_S4)(a0)
	REG_S	s5, (KVM_ARCH_GUEST_S5)(a0)
	REG_S	s6, (KVM_ARCH_GUEST_S6)(a0)
	REG_S	s7, (KVM_ARCH_GUEST_S7)(a0)
	REG_S	s8, (KVM_ARCH_GUEST_S8)(a0)
	REG_S	s9, (KVM_ARCH_GUEST_S9)(a0)
	REG_S	s10, (KVM_ARCH_GUEST_S10)(a0)
	REG_S	s11, (KVM_ARCH_GUEST_S11)(a0)
	REG_S	t3, (KVM_ARCH_GUEST_T3)(a0)
	REG_S	t4, (KVM_ARCH_GUEST_T4)(a0)
	REG_S	t5, (KVM_ARCH_GUEST_T5)(a0)
	REG_S	t6, (KVM_ARCH_GUEST_T6)(a0)

	/* Save Guest SEPC */
	csrr	t0, CSR_SEPC
	REG_S	t0, (KVM_ARCH_GUEST_SEPC)(a0)

	/* Restore Host STVEC */
	REG_L	t1, (KVM_ARCH_HOST_STVEC)(a0)
	csrw	CSR_STVEC, t1

	/* Save Guest A0 and Restore Host SSCRATCH */
	REG_L	t2, (KVM_ARCH_HOST_SSCRATCH)(a0)
	csrrw	t2, CSR_SSCRATCH, t2
	REG_S	t2, (KVM_ARCH_GUEST_A0)(a0)

	/* Save Guest and Restore Host SCOUNTEREN */
	REG_L	t3, (KVM_ARCH_HOST_SCOUNTEREN)(a0)
	csrrw	t3, CSR_SCOUNTEREN, t3
	REG_S	t3, (KVM_ARCH_GUEST_SCOUNTEREN)(a0)

	/* Save Guest and Restore Host HSTATUS */
	REG_L	t4, (KVM_ARCH_HOST_HSTATUS)(a0)
	csrrw	t4, CSR_HSTATUS, t4
	REG_S	t4, (KVM_ARCH_GUEST_HSTATUS)(a0)

	/* Save Guest and Restore Host SSTATUS */
	REG_L	t5, (KVM_ARCH_HOST_SSTATUS)(a0)
	csrrw	t5, CSR_SSTATUS, t5
	REG_S	t5, (KVM_ARCH_GUEST_SSTATUS)(a0)

	/* Restore Host GPRs (except A0 and T0-T6) */
	REG_L	ra, (KVM_ARCH_HOST_RA)(a0)
	REG_L	sp, (KVM_ARCH_HOST_SP)(a0)
	REG_L	gp, (KVM_ARCH_HOST_GP)(a0)
	REG_L	tp, (KVM_ARCH_HOST_TP)(a0)
	REG_L	s0, (KVM_ARCH_HOST_S0)(a0)
	REG_L	s1, (KVM_ARCH_HOST_S1)(a0)
	REG_L	a1, (KVM_ARCH_HOST_A1)(a0)
	REG_L	a2, (KVM_ARCH_HOST_A2)(a0)
	REG_L	a3, (KVM_ARCH_HOST_A3)(a0)
	REG_L	a4, (KVM_ARCH_HOST_A4)(a0)
	REG_L	a5, (KVM_ARCH_HOST_A5)(a0)
	REG_L	a6, (KVM_ARCH_HOST_A6)(a0)
	REG_L	a7, (KVM_ARCH_HOST_A7)(a0)
	REG_L	s2, (KVM_ARCH_HOST_S2)(a0)
	REG_L	s3, (KVM_ARCH_HOST_S3)(a0)
	REG_L	s4, (KVM_ARCH_HOST_S4)(a0)
	REG_L	s5, (KVM_ARCH_HOST_S5)(a0)
	REG_L	s6, (KVM_ARCH_HOST_S6)(a0)
	REG_L	s7, (KVM_ARCH_HOST_S7)(a0)
	REG_L	s8, (KVM_ARCH_HOST_S8)(a0)
	REG_L	s9, (KVM_ARCH_HOST_S9)(a0)
	REG_L	s10, (KVM_ARCH_HOST_S10)(a0)
	REG_L	s11, (KVM_ARCH_HOST_S11)(a0)

	/* Return to C code */
	ret
ENDPROC(__kvm_riscv_switch_to)