Commit 0a86512d authored by Anup Patel's avatar Anup Patel Committed by Paolo Bonzini
Browse files

RISC-V: KVM: Factor-out FP virtualization into separate sources



The timer and SBI virtualization is already in separate sources.
In future, we will have vector and AIA virtualization also added
as separate sources.

To align with above described modularity, we factor-out FP
virtualization into separate sources.

Signed-off-by: default avatarAnup Patel <anup.patel@wdc.com>
Message-Id: <20211026170136.2147619-3-anup.patel@wdc.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 4e338684
Loading
Loading
Loading
Loading
+1 −4
Original line number Diff line number Diff line
@@ -12,6 +12,7 @@
#include <linux/types.h>
#include <linux/kvm.h>
#include <linux/kvm_types.h>
#include <asm/kvm_vcpu_fp.h>
#include <asm/kvm_vcpu_timer.h>

#ifdef CONFIG_64BIT
@@ -247,10 +248,6 @@ int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
			struct kvm_cpu_trap *trap);

void __kvm_riscv_switch_to(struct kvm_vcpu_arch *vcpu_arch);
void __kvm_riscv_fp_f_save(struct kvm_cpu_context *context);
void __kvm_riscv_fp_f_restore(struct kvm_cpu_context *context);
void __kvm_riscv_fp_d_save(struct kvm_cpu_context *context);
void __kvm_riscv_fp_d_restore(struct kvm_cpu_context *context);

int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq);
int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq);
+59 −0
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0-only */
/*
 * Copyright (C) 2021 Western Digital Corporation or its affiliates.
 *
 * Authors:
 *     Atish Patra <atish.patra@wdc.com>
 *     Anup Patel <anup.patel@wdc.com>
 */

#ifndef __KVM_VCPU_RISCV_FP_H
#define __KVM_VCPU_RISCV_FP_H

#include <linux/types.h>

struct kvm_cpu_context;

#ifdef CONFIG_FPU
void __kvm_riscv_fp_f_save(struct kvm_cpu_context *context);
void __kvm_riscv_fp_f_restore(struct kvm_cpu_context *context);
void __kvm_riscv_fp_d_save(struct kvm_cpu_context *context);
void __kvm_riscv_fp_d_restore(struct kvm_cpu_context *context);

void kvm_riscv_vcpu_fp_reset(struct kvm_vcpu *vcpu);
void kvm_riscv_vcpu_guest_fp_save(struct kvm_cpu_context *cntx,
				  unsigned long isa);
void kvm_riscv_vcpu_guest_fp_restore(struct kvm_cpu_context *cntx,
				     unsigned long isa);
void kvm_riscv_vcpu_host_fp_save(struct kvm_cpu_context *cntx);
void kvm_riscv_vcpu_host_fp_restore(struct kvm_cpu_context *cntx);
#else
static inline void kvm_riscv_vcpu_fp_reset(struct kvm_vcpu *vcpu)
{
}
static inline void kvm_riscv_vcpu_guest_fp_save(struct kvm_cpu_context *cntx,
						unsigned long isa)
{
}
static inline void kvm_riscv_vcpu_guest_fp_restore(
					struct kvm_cpu_context *cntx,
					unsigned long isa)
{
}
static inline void kvm_riscv_vcpu_host_fp_save(struct kvm_cpu_context *cntx)
{
}
static inline void kvm_riscv_vcpu_host_fp_restore(
					struct kvm_cpu_context *cntx)
{
}
#endif

int kvm_riscv_vcpu_get_reg_fp(struct kvm_vcpu *vcpu,
			      const struct kvm_one_reg *reg,
			      unsigned long rtype);
int kvm_riscv_vcpu_set_reg_fp(struct kvm_vcpu *vcpu,
			      const struct kvm_one_reg *reg,
			      unsigned long rtype);

#endif
+1 −0
Original line number Diff line number Diff line
@@ -20,6 +20,7 @@ kvm-y += tlb.o
kvm-y += mmu.o
kvm-y += vcpu.o
kvm-y += vcpu_exit.o
kvm-y += vcpu_fp.o
kvm-y += vcpu_switch.o
kvm-y += vcpu_sbi.o
kvm-y += vcpu_timer.o
+0 −172
Original line number Diff line number Diff line
@@ -38,86 +38,6 @@ const struct kvm_stats_header kvm_vcpu_stats_header = {
		       sizeof(kvm_vcpu_stats_desc),
};

#ifdef CONFIG_FPU
static void kvm_riscv_vcpu_fp_reset(struct kvm_vcpu *vcpu)
{
	unsigned long isa = vcpu->arch.isa;
	struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;

	cntx->sstatus &= ~SR_FS;
	if (riscv_isa_extension_available(&isa, f) ||
	    riscv_isa_extension_available(&isa, d))
		cntx->sstatus |= SR_FS_INITIAL;
	else
		cntx->sstatus |= SR_FS_OFF;
}

static void kvm_riscv_vcpu_fp_clean(struct kvm_cpu_context *cntx)
{
	cntx->sstatus &= ~SR_FS;
	cntx->sstatus |= SR_FS_CLEAN;
}

static void kvm_riscv_vcpu_guest_fp_save(struct kvm_cpu_context *cntx,
					 unsigned long isa)
{
	if ((cntx->sstatus & SR_FS) == SR_FS_DIRTY) {
		if (riscv_isa_extension_available(&isa, d))
			__kvm_riscv_fp_d_save(cntx);
		else if (riscv_isa_extension_available(&isa, f))
			__kvm_riscv_fp_f_save(cntx);
		kvm_riscv_vcpu_fp_clean(cntx);
	}
}

static void kvm_riscv_vcpu_guest_fp_restore(struct kvm_cpu_context *cntx,
					    unsigned long isa)
{
	if ((cntx->sstatus & SR_FS) != SR_FS_OFF) {
		if (riscv_isa_extension_available(&isa, d))
			__kvm_riscv_fp_d_restore(cntx);
		else if (riscv_isa_extension_available(&isa, f))
			__kvm_riscv_fp_f_restore(cntx);
		kvm_riscv_vcpu_fp_clean(cntx);
	}
}

static void kvm_riscv_vcpu_host_fp_save(struct kvm_cpu_context *cntx)
{
	/* No need to check host sstatus as it can be modified outside */
	if (riscv_isa_extension_available(NULL, d))
		__kvm_riscv_fp_d_save(cntx);
	else if (riscv_isa_extension_available(NULL, f))
		__kvm_riscv_fp_f_save(cntx);
}

static void kvm_riscv_vcpu_host_fp_restore(struct kvm_cpu_context *cntx)
{
	if (riscv_isa_extension_available(NULL, d))
		__kvm_riscv_fp_d_restore(cntx);
	else if (riscv_isa_extension_available(NULL, f))
		__kvm_riscv_fp_f_restore(cntx);
}
#else
static void kvm_riscv_vcpu_fp_reset(struct kvm_vcpu *vcpu)
{
}
static void kvm_riscv_vcpu_guest_fp_save(struct kvm_cpu_context *cntx,
					 unsigned long isa)
{
}
static void kvm_riscv_vcpu_guest_fp_restore(struct kvm_cpu_context *cntx,
					    unsigned long isa)
{
}
static void kvm_riscv_vcpu_host_fp_save(struct kvm_cpu_context *cntx)
{
}
static void kvm_riscv_vcpu_host_fp_restore(struct kvm_cpu_context *cntx)
{
}
#endif

#define KVM_RISCV_ISA_ALLOWED	(riscv_isa_extension_mask(a) | \
				 riscv_isa_extension_mask(c) | \
				 riscv_isa_extension_mask(d) | \
@@ -414,98 +334,6 @@ static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu,
	return 0;
}

static int kvm_riscv_vcpu_get_reg_fp(struct kvm_vcpu *vcpu,
				     const struct kvm_one_reg *reg,
				     unsigned long rtype)
{
	struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
	unsigned long isa = vcpu->arch.isa;
	unsigned long __user *uaddr =
			(unsigned long __user *)(unsigned long)reg->addr;
	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
					    KVM_REG_SIZE_MASK |
					    rtype);
	void *reg_val;

	if ((rtype == KVM_REG_RISCV_FP_F) &&
	    riscv_isa_extension_available(&isa, f)) {
		if (KVM_REG_SIZE(reg->id) != sizeof(u32))
			return -EINVAL;
		if (reg_num == KVM_REG_RISCV_FP_F_REG(fcsr))
			reg_val = &cntx->fp.f.fcsr;
		else if ((KVM_REG_RISCV_FP_F_REG(f[0]) <= reg_num) &&
			  reg_num <= KVM_REG_RISCV_FP_F_REG(f[31]))
			reg_val = &cntx->fp.f.f[reg_num];
		else
			return -EINVAL;
	} else if ((rtype == KVM_REG_RISCV_FP_D) &&
		   riscv_isa_extension_available(&isa, d)) {
		if (reg_num == KVM_REG_RISCV_FP_D_REG(fcsr)) {
			if (KVM_REG_SIZE(reg->id) != sizeof(u32))
				return -EINVAL;
			reg_val = &cntx->fp.d.fcsr;
		} else if ((KVM_REG_RISCV_FP_D_REG(f[0]) <= reg_num) &&
			   reg_num <= KVM_REG_RISCV_FP_D_REG(f[31])) {
			if (KVM_REG_SIZE(reg->id) != sizeof(u64))
				return -EINVAL;
			reg_val = &cntx->fp.d.f[reg_num];
		} else
			return -EINVAL;
	} else
		return -EINVAL;

	if (copy_to_user(uaddr, reg_val, KVM_REG_SIZE(reg->id)))
		return -EFAULT;

	return 0;
}

static int kvm_riscv_vcpu_set_reg_fp(struct kvm_vcpu *vcpu,
				     const struct kvm_one_reg *reg,
				     unsigned long rtype)
{
	struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
	unsigned long isa = vcpu->arch.isa;
	unsigned long __user *uaddr =
			(unsigned long __user *)(unsigned long)reg->addr;
	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
					    KVM_REG_SIZE_MASK |
					    rtype);
	void *reg_val;

	if ((rtype == KVM_REG_RISCV_FP_F) &&
	    riscv_isa_extension_available(&isa, f)) {
		if (KVM_REG_SIZE(reg->id) != sizeof(u32))
			return -EINVAL;
		if (reg_num == KVM_REG_RISCV_FP_F_REG(fcsr))
			reg_val = &cntx->fp.f.fcsr;
		else if ((KVM_REG_RISCV_FP_F_REG(f[0]) <= reg_num) &&
			  reg_num <= KVM_REG_RISCV_FP_F_REG(f[31]))
			reg_val = &cntx->fp.f.f[reg_num];
		else
			return -EINVAL;
	} else if ((rtype == KVM_REG_RISCV_FP_D) &&
		   riscv_isa_extension_available(&isa, d)) {
		if (reg_num == KVM_REG_RISCV_FP_D_REG(fcsr)) {
			if (KVM_REG_SIZE(reg->id) != sizeof(u32))
				return -EINVAL;
			reg_val = &cntx->fp.d.fcsr;
		} else if ((KVM_REG_RISCV_FP_D_REG(f[0]) <= reg_num) &&
			   reg_num <= KVM_REG_RISCV_FP_D_REG(f[31])) {
			if (KVM_REG_SIZE(reg->id) != sizeof(u64))
				return -EINVAL;
			reg_val = &cntx->fp.d.f[reg_num];
		} else
			return -EINVAL;
	} else
		return -EINVAL;

	if (copy_from_user(reg_val, uaddr, KVM_REG_SIZE(reg->id)))
		return -EFAULT;

	return 0;
}

static int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
				  const struct kvm_one_reg *reg)
{
+167 −0
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (C) 2021 Western Digital Corporation or its affiliates.
 *
 * Authors:
 *     Atish Patra <atish.patra@wdc.com>
 *     Anup Patel <anup.patel@wdc.com>
 */

#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kvm_host.h>
#include <linux/uaccess.h>

#ifdef CONFIG_FPU
void kvm_riscv_vcpu_fp_reset(struct kvm_vcpu *vcpu)
{
	unsigned long isa = vcpu->arch.isa;
	struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;

	cntx->sstatus &= ~SR_FS;
	if (riscv_isa_extension_available(&isa, f) ||
	    riscv_isa_extension_available(&isa, d))
		cntx->sstatus |= SR_FS_INITIAL;
	else
		cntx->sstatus |= SR_FS_OFF;
}

void kvm_riscv_vcpu_fp_clean(struct kvm_cpu_context *cntx)
{
	cntx->sstatus &= ~SR_FS;
	cntx->sstatus |= SR_FS_CLEAN;
}

void kvm_riscv_vcpu_guest_fp_save(struct kvm_cpu_context *cntx,
				  unsigned long isa)
{
	if ((cntx->sstatus & SR_FS) == SR_FS_DIRTY) {
		if (riscv_isa_extension_available(&isa, d))
			__kvm_riscv_fp_d_save(cntx);
		else if (riscv_isa_extension_available(&isa, f))
			__kvm_riscv_fp_f_save(cntx);
		kvm_riscv_vcpu_fp_clean(cntx);
	}
}

void kvm_riscv_vcpu_guest_fp_restore(struct kvm_cpu_context *cntx,
				     unsigned long isa)
{
	if ((cntx->sstatus & SR_FS) != SR_FS_OFF) {
		if (riscv_isa_extension_available(&isa, d))
			__kvm_riscv_fp_d_restore(cntx);
		else if (riscv_isa_extension_available(&isa, f))
			__kvm_riscv_fp_f_restore(cntx);
		kvm_riscv_vcpu_fp_clean(cntx);
	}
}

void kvm_riscv_vcpu_host_fp_save(struct kvm_cpu_context *cntx)
{
	/* No need to check host sstatus as it can be modified outside */
	if (riscv_isa_extension_available(NULL, d))
		__kvm_riscv_fp_d_save(cntx);
	else if (riscv_isa_extension_available(NULL, f))
		__kvm_riscv_fp_f_save(cntx);
}

void kvm_riscv_vcpu_host_fp_restore(struct kvm_cpu_context *cntx)
{
	if (riscv_isa_extension_available(NULL, d))
		__kvm_riscv_fp_d_restore(cntx);
	else if (riscv_isa_extension_available(NULL, f))
		__kvm_riscv_fp_f_restore(cntx);
}
#endif

int kvm_riscv_vcpu_get_reg_fp(struct kvm_vcpu *vcpu,
			      const struct kvm_one_reg *reg,
			      unsigned long rtype)
{
	struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
	unsigned long isa = vcpu->arch.isa;
	unsigned long __user *uaddr =
			(unsigned long __user *)(unsigned long)reg->addr;
	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
					    KVM_REG_SIZE_MASK |
					    rtype);
	void *reg_val;

	if ((rtype == KVM_REG_RISCV_FP_F) &&
	    riscv_isa_extension_available(&isa, f)) {
		if (KVM_REG_SIZE(reg->id) != sizeof(u32))
			return -EINVAL;
		if (reg_num == KVM_REG_RISCV_FP_F_REG(fcsr))
			reg_val = &cntx->fp.f.fcsr;
		else if ((KVM_REG_RISCV_FP_F_REG(f[0]) <= reg_num) &&
			  reg_num <= KVM_REG_RISCV_FP_F_REG(f[31]))
			reg_val = &cntx->fp.f.f[reg_num];
		else
			return -EINVAL;
	} else if ((rtype == KVM_REG_RISCV_FP_D) &&
		   riscv_isa_extension_available(&isa, d)) {
		if (reg_num == KVM_REG_RISCV_FP_D_REG(fcsr)) {
			if (KVM_REG_SIZE(reg->id) != sizeof(u32))
				return -EINVAL;
			reg_val = &cntx->fp.d.fcsr;
		} else if ((KVM_REG_RISCV_FP_D_REG(f[0]) <= reg_num) &&
			   reg_num <= KVM_REG_RISCV_FP_D_REG(f[31])) {
			if (KVM_REG_SIZE(reg->id) != sizeof(u64))
				return -EINVAL;
			reg_val = &cntx->fp.d.f[reg_num];
		} else
			return -EINVAL;
	} else
		return -EINVAL;

	if (copy_to_user(uaddr, reg_val, KVM_REG_SIZE(reg->id)))
		return -EFAULT;

	return 0;
}

int kvm_riscv_vcpu_set_reg_fp(struct kvm_vcpu *vcpu,
			      const struct kvm_one_reg *reg,
			      unsigned long rtype)
{
	struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
	unsigned long isa = vcpu->arch.isa;
	unsigned long __user *uaddr =
			(unsigned long __user *)(unsigned long)reg->addr;
	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
					    KVM_REG_SIZE_MASK |
					    rtype);
	void *reg_val;

	if ((rtype == KVM_REG_RISCV_FP_F) &&
	    riscv_isa_extension_available(&isa, f)) {
		if (KVM_REG_SIZE(reg->id) != sizeof(u32))
			return -EINVAL;
		if (reg_num == KVM_REG_RISCV_FP_F_REG(fcsr))
			reg_val = &cntx->fp.f.fcsr;
		else if ((KVM_REG_RISCV_FP_F_REG(f[0]) <= reg_num) &&
			  reg_num <= KVM_REG_RISCV_FP_F_REG(f[31]))
			reg_val = &cntx->fp.f.f[reg_num];
		else
			return -EINVAL;
	} else if ((rtype == KVM_REG_RISCV_FP_D) &&
		   riscv_isa_extension_available(&isa, d)) {
		if (reg_num == KVM_REG_RISCV_FP_D_REG(fcsr)) {
			if (KVM_REG_SIZE(reg->id) != sizeof(u32))
				return -EINVAL;
			reg_val = &cntx->fp.d.fcsr;
		} else if ((KVM_REG_RISCV_FP_D_REG(f[0]) <= reg_num) &&
			   reg_num <= KVM_REG_RISCV_FP_D_REG(f[31])) {
			if (KVM_REG_SIZE(reg->id) != sizeof(u64))
				return -EINVAL;
			reg_val = &cntx->fp.d.f[reg_num];
		} else
			return -EINVAL;
	} else
		return -EINVAL;

	if (copy_from_user(reg_val, uaddr, KVM_REG_SIZE(reg->id)))
		return -EFAULT;

	return 0;
}