Commit 622f37d3 authored by qemudev's avatar qemudev Committed by Hongchen Zhang
Browse files

LoongArch: kvm: add initial kvm support

LoongArch inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I6BWFP



--------------------------------

add initial kvm support.

Signed-off-by: default avatarqemudev <qemudev@loongson.cn>
Change-Id: I1f6d361dd54299d97748a360686171e2c34a203c
parent d25fd25f
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
obj-y += kernel/
obj-y += mm/
obj-y += vdso/
obj-$(CONFIG_KVM)	+= kvm/

# for cleaning
subdir- += boot
+2 −0
Original line number Diff line number Diff line
@@ -106,6 +106,7 @@ config LOONGARCH
	select HAVE_SETUP_PER_CPU_AREA if NUMA
	select HAVE_SYSCALL_TRACEPOINTS
	select HAVE_TIF_NOHZ
	select HAVE_KVM
	select HAVE_VIRT_CPU_ACCOUNTING_GEN if !SMP
	select IRQ_FORCED_THREADING
	select IRQ_LOONGARCH_CPU
@@ -539,3 +540,4 @@ source "drivers/cpufreq/Kconfig"
endmenu

source "drivers/firmware/Kconfig"
source "arch/loongarch/kvm/Kconfig"
+356 −0
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0 */
/*
 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
 */

#ifndef __LOONGARCH_KVM_HOST_H__
#define __LOONGARCH_KVM_HOST_H__

#include <linux/cpumask.h>
#include <linux/mutex.h>
#include <linux/hrtimer.h>
#include <linux/interrupt.h>
#include <linux/types.h>
#include <linux/kvm.h>
#include <linux/kvm_types.h>
#include <linux/threads.h>
#include <linux/spinlock.h>
#include <asm/inst.h>

/* Loongarch KVM register ids */
#define LOONGARCH_CSR_32(_R, _S)					\
	(KVM_REG_LOONGARCH_CSR | KVM_REG_SIZE_U32 | (8 * (_R) + (_S)))

#define LOONGARCH_CSR_64(_R, _S)					\
	(KVM_REG_LOONGARCH_CSR | KVM_REG_SIZE_U64 | (8 * (_R) + (_S)))

#define KVM_IOC_CSRID(id)	LOONGARCH_CSR_64(id, 0)
#define KVM_GET_IOC_CSRIDX(id)	((id & KVM_CSR_IDX_MASK) >> 3)

#define LOONGSON_VIRT_REG_BASE	0x1f000000
#define KVM_MAX_VCPUS		256
#define KVM_USER_MEM_SLOTS	256
/* memory slots that does not exposed to userspace */
#define KVM_PRIVATE_MEM_SLOTS	0

#define KVM_HALT_POLL_NS_DEFAULT 500000

#define KVM_REQ_RECORD_STEAL	KVM_ARCH_REQ(1)
#define KVM_INVALID_ADDR		0xdeadbeef
#define KVM_HVA_ERR_BAD			(-1UL)
#define KVM_HVA_ERR_RO_BAD		(-2UL)
static inline bool kvm_is_error_hva(unsigned long addr)
{
	return IS_ERR_VALUE(addr);
}

struct kvm_vm_stat {
	ulong remote_tlb_flush;
	u64 vm_ioctl_irq_line;
	u64 ls7a_ioapic_update;
	u64 ls7a_ioapic_set_irq;
	u64 ioapic_reg_write;
	u64 ioapic_reg_read;
	u64 set_ls7a_ioapic;
	u64 get_ls7a_ioapic;
	u64 set_ls3a_ext_irq;
	u64 get_ls3a_ext_irq;
	u64 trigger_ls3a_ext_irq;
	u64 pip_read_exits;
	u64 pip_write_exits;
	u64 ls7a_msi_irq;
};
struct kvm_vcpu_stat {
	u64 excep_exits[EXCCODE_INT_START];
	u64 idle_exits;
	u64 signal_exits;
	u64 int_exits;
	u64 rdcsr_cpu_feature_exits;
	u64 rdcsr_misc_func_exits;
	u64 rdcsr_ipi_access_exits;
	u64 cpucfg_exits;
	u64 huge_dec_exits;
	u64 huge_thp_exits;
	u64 huge_adjust_exits;
	u64 huge_set_exits;
	u64 huge_merge_exits;
	u64 halt_successful_poll;
	u64 halt_attempted_poll;
	u64 halt_poll_invalid;
	u64 halt_wakeup;
};

#define KVM_MEMSLOT_DISABLE_THP		(1UL << 17)
struct kvm_arch_memory_slot {
	unsigned int flags;
};

enum {
	IOCSR_FEATURES,
	IOCSR_VENDOR,
	IOCSR_CPUNAME,
	IOCSR_NODECNT,
	IOCSR_MISC_FUNC,
	IOCSR_MAX
};

struct kvm_context {
	unsigned long gid_mask;
	unsigned long gid_ver_mask;
	unsigned long gid_fisrt_ver;
	unsigned long vpid_cache;
	struct kvm_vcpu *last_vcpu;
};

struct kvm_arch {
	/* Guest physical mm */
	struct mm_struct gpa_mm;
	/* Mask of CPUs needing GPA ASID flush */
	cpumask_t asid_flush_mask;

	unsigned char online_vcpus;
	unsigned char is_migrate;
	s64 stablecounter_gftoffset;
	u32 cpucfg_lasx;
	struct ls7a_kvm_ioapic *v_ioapic;
	struct ls3a_kvm_ipi *v_gipi;
	struct ls3a_kvm_routerirq *v_routerirq;
	struct ls3a_kvm_extirq *v_extirq;
	spinlock_t iocsr_lock;
	struct kvm_iocsr_entry iocsr[IOCSR_MAX];
	struct kvm_cpucfg cpucfgs;
	struct kvm_context __percpu *vmcs;
};


#define LOONGARCH_CSRS	0x100
#define CSR_UCWIN_BASE	0x100
#define CSR_UCWIN_SIZE	0x10
#define CSR_DMWIN_BASE	0x180
#define CSR_DMWIN_SIZE	0x4
#define CSR_PERF_BASE	0x200
#define CSR_PERF_SIZE	0x8
#define CSR_DEBUG_BASE	0x500
#define CSR_DEBUG_SIZE	0x3
#define CSR_ALL_SIZE	0x800

struct loongarch_csrs {
	unsigned long csrs[CSR_ALL_SIZE];
};

/* Resume Flags */
#define RESUME_FLAG_DR		(1<<0)	/* Reload guest nonvolatile state? */
#define RESUME_FLAG_HOST	(1<<1)	/* Resume host? */

#define RESUME_GUEST		0
#define RESUME_GUEST_DR		RESUME_FLAG_DR
#define RESUME_HOST		RESUME_FLAG_HOST

enum emulation_result {
	EMULATE_DONE,		/* no further processing */
	EMULATE_DO_MMIO,	/* kvm_run filled with MMIO request */
	EMULATE_FAIL,		/* can't emulate this instruction */
	EMULATE_WAIT,		/* WAIT instruction */
	EMULATE_PRIV_FAIL,
	EMULATE_EXCEPT,		/* A guest exception has been generated */
	EMULATE_PV_HYPERCALL,	/* HYPCALL instruction */
	EMULATE_DEBUG,		/* Emulate guest kernel debug */
	EMULATE_DO_IOCSR,	/* handle IOCSR request */
};

#define KVM_NR_MEM_OBJS     4
/*
 * We don't want allocation failures within the mmu code, so we preallocate
 * enough memory for a single page fault in a cache.
 */
struct kvm_mmu_memory_cache {
	int nobjs;
	void *objects[KVM_NR_MEM_OBJS];
};

#if defined(CONFIG_CPU_HAS_LASX)
#define FPU_ALIGN		__aligned(32)
#elif defined(CONFIG_CPU_HAS_LSX)
#define FPU_ALIGN		__aligned(16)
#else
#define FPU_ALIGN
#endif
#define KVM_LARCH_FPU		(0x1 << 0)
#define KVM_LARCH_LSX		(0x1 << 1)
#define KVM_LARCH_LASX		(0x1 << 2)
#define KVM_LARCH_DATA_HWBP	(0x1 << 3)
#define KVM_LARCH_INST_HWBP	(0x1 << 4)
#define KVM_LARCH_HWBP		(KVM_LARCH_DATA_HWBP | KVM_LARCH_INST_HWBP)
#define KVM_LARCH_RESET		(0x1 << 5)
#define KVM_LARCH_PERF		(0x1 << 6)

struct kvm_vcpu_arch {
	unsigned long guest_eentry;
	unsigned long host_eentry;
	int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
	int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu);

	/* Host registers preserved across guest mode execution */
	unsigned long host_stack;
	unsigned long host_gp;
	unsigned long host_pgd;
	unsigned long host_pgdhi;
	unsigned long host_entryhi;

	/* Host CSR registers used when handling exits from guest */
	unsigned long badv;
	unsigned long host_estat;
	unsigned long badi;
	unsigned long host_ecfg;
	unsigned long host_percpu;

	u32 is_hypcall;
	/* GPRS */
	unsigned long gprs[32];
	unsigned long pc;

	/* FPU State */
	struct loongarch_fpu fpu FPU_ALIGN;
	/* Which auxiliary state is loaded (KVM_LOONGARCH_AUX_*) */
	unsigned int aux_inuse;

	/* CSR State */
	struct loongarch_csrs *csr;

	/* GPR used as IO source/target */
	u32 io_gpr;

	struct hrtimer swtimer;
	/* Count timer control KVM register */
	u32 count_ctl;

	/* Bitmask of exceptions that are pending */
	unsigned long irq_pending;
	/* Bitmask of pending exceptions to be cleared */
	unsigned long irq_clear;

	/* Cache some mmu pages needed inside spinlock regions */
	struct kvm_mmu_memory_cache mmu_page_cache;

	/* vcpu's vpid is different on each host cpu in an smp system */
	u64 vpid[NR_CPUS];

	/* Period of stable timer tick in ns */
	u64 timer_period;
	/* Frequency of stable timer in Hz */
	u64 timer_mhz;
	/* Stable bias from the raw time */
	u64 timer_bias;
	/* Dynamic nanosecond bias (multiple of timer_period) to avoid overflow */
	s64 timer_dyn_bias;
	/* Save ktime */
	ktime_t stable_ktime_saved;

	u64 core_ext_ioisr[4];

	/* Last CPU the VCPU state was loaded on */
	int last_sched_cpu;
	/* Last CPU the VCPU actually executed guest code on */
	int last_exec_cpu;

	u8 fpu_enabled;
	u8 lsx_enabled;
	/* paravirt steal time */
	struct {
		u64 guest_addr;
		u64 last_steal;
		struct gfn_to_pfn_cache cache;
	} st;
	struct kvm_guest_debug_arch guest_debug;
	/* save host pmu csr */
	u64 perf_ctrl[4];
	u64 perf_cntr[4];

};

static inline unsigned long readl_sw_gcsr(struct loongarch_csrs *csr, int reg)
{
	return csr->csrs[reg];
}

static inline void writel_sw_gcsr(struct loongarch_csrs *csr, int reg, \
		unsigned long val)
{
	csr->csrs[reg] = val;
}

/* Helpers */
static inline bool _kvm_guest_has_fpu(struct kvm_vcpu_arch *arch)
{
	return cpu_has_fpu && arch->fpu_enabled;
}


static inline bool _kvm_guest_has_lsx(struct kvm_vcpu_arch *arch)
{
	return cpu_has_lsx && arch->lsx_enabled;
}

bool _kvm_guest_has_lasx(struct kvm_vcpu *vcpu);
void _kvm_init_fault(void);

/* Debug: dump vcpu state */
int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);

/* MMU handling */
int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long badv, bool write);
void kvm_flush_tlb_all(void);
void _kvm_destroy_mm(struct kvm *kvm);
pgd_t *kvm_pgd_alloc(void);
void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);

enum _kvm_fault_result {
	KVM_LOONGARCH_MAPPED = 0,
	KVM_LOONGARCH_GVA,
	KVM_LOONGARCH_GPA,
	KVM_LOONGARCH_TLB,
	KVM_LOONGARCH_TLBINV,
	KVM_LOONGARCH_TLBMOD,
};

#define KVM_ARCH_WANT_MMU_NOTIFIER
int kvm_unmap_hva_range(struct kvm *kvm,
			unsigned long start, unsigned long end, bool blockable);
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);

static inline void update_pc(struct kvm_vcpu_arch *arch)
{
	arch->pc += 4;
}

/**
 * kvm_is_ifetch_fault() - Find whether a TLBL exception is due to ifetch fault.
 * @vcpu:	Virtual CPU.
 *
 * Returns:	Whether the TLBL exception was likely due to an instruction
 *		fetch fault rather than a data load fault.
 */
static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *arch)
{
	if (arch->pc == arch->badv)
		return true;

	return false;
}

/* Misc */
static inline void kvm_arch_hardware_unsetup(void) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
static inline void kvm_arch_free_memslot(struct kvm *kvm,
		struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {}
static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}

extern int kvm_enter_guest(struct kvm_run *run, struct kvm_vcpu *vcpu);
extern void kvm_exception_entry(void);
#endif /* __LOONGARCH_KVM_HOST_H__ */
+8 −0
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_LOONGARCH64_KVM_TYPES_H
#define _ASM_LOONGARCH64_KVM_TYPES_H

#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 4

#endif /* _ASM_LOONGARCH64_KVM_TYPES_H */
+268 −0
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
 * Copyright (C) 2020  Loongson Technologies, Inc.  All rights reserved.
 * Authors: Sanjay Lal <sanjayl@kymasys.com>
 * Authors: Xing Li <lixing@loongson.cn>
 */

#ifndef __LINUX_KVM_LOONGARCH_H
#define __LINUX_KVM_LOONGARCH_H

#include <linux/types.h>
#ifndef __KERNEL__
#include <stdint.h>
#endif

#define __KVM_HAVE_GUEST_DEBUG
#define KVM_GUESTDBG_USE_SW_BP 0x00010000
#define KVM_GUESTDBG_USE_HW_BP 0x00020000
#define KVM_DATA_HW_BREAKPOINT_NUM 8
#define KVM_INST_HW_BREAKPOINT_NUM 8

/*
 * KVM Loongarch specific structures and definitions.
 *
 * Some parts derived from the x86 version of this file.
 */

#define __KVM_HAVE_READONLY_MEM

#define KVM_COALESCED_MMIO_PAGE_OFFSET 1

/*
 * for KVM_GET_REGS and KVM_SET_REGS
 */
struct kvm_regs {
	/* out (KVM_GET_REGS) / in (KVM_SET_REGS) */
	__u64 gpr[32];
	__u64 pc;
};

/*
 * for KVM_GET_CPUCFG
 */
struct kvm_cpucfg {
	/* out (KVM_GET_CPUCFG) */
	__u32 cpucfg[64];
};

/*
 * for KVM_GET_FPU and KVM_SET_FPU
 */
struct kvm_fpu {
	__u32 fcsr;
	__u32 none;
	__u64 fcc;    /* 8x8 */
	struct kvm_fpureg {
		__u64 val64[4];	//support max 256 bits
	} fpr[32];
};

/*
 * For LOONGARCH, we use KVM_SET_ONE_REG and KVM_GET_ONE_REG to access various
 * registers.  The id field is broken down as follows:
 *
 *  bits[63..52] - As per linux/kvm.h
 *  bits[51..32] - Must be zero.
 *  bits[31..16] - Register set.
 *
 * Register set = 0: GP registers from kvm_regs (see definitions below).
 *
 * Register set = 1: CSR registers.
 *
 * Register set = 2: KVM specific registers (see definitions below).
 *
 * Register set = 3: FPU / SIMD registers (see definitions below).
 *
 * Other sets registers may be added in the future.  Each set would
 * have its own identifier in bits[31..16].
 */

#define KVM_REG_LOONGARCH_GP		(KVM_REG_LOONGARCH | 0x00000ULL)
#define KVM_REG_LOONGARCH_CSR		(KVM_REG_LOONGARCH | 0x10000ULL)
#define KVM_REG_LOONGARCH_KVM		(KVM_REG_LOONGARCH | 0x20000ULL)
#define KVM_REG_LOONGARCH_FPU		(KVM_REG_LOONGARCH | 0x30000ULL)
#define KVM_REG_LOONGARCH_MASK		(KVM_REG_LOONGARCH | 0x30000ULL)
#define KVM_CSR_IDX_MASK		(0x10000 - 1)

/*
 * KVM_REG_LOONGARCH_KVM - KVM specific control registers.
 */

#define KVM_REG_LOONGARCH_COUNTER	(KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 3)
#define KVM_REG_LOONGARCH_VCPU_RESET	(KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 4)

#define __KVM_HAVE_IRQ_LINE

struct kvm_debug_exit_arch {
	__u64 era;
	__u32 fwps;
	__u32 mwps;
	__u32 exception;
};

/* for KVM_SET_GUEST_DEBUG */
struct hw_breakpoint {
    __u64 addr;
    __u64 mask;
    __u32 asid;
    __u32 ctrl;
};

struct kvm_guest_debug_arch {
	struct hw_breakpoint data_breakpoint[KVM_DATA_HW_BREAKPOINT_NUM];
	struct hw_breakpoint inst_breakpoint[KVM_INST_HW_BREAKPOINT_NUM];
	int inst_bp_nums, data_bp_nums;
};

/* definition of registers in kvm_run */
struct kvm_sync_regs {
};

/* dummy definition */
struct kvm_sregs {
};

struct kvm_iocsr_entry {
	__u32 addr;
	__u32 pad;
	__u64 data;
};

struct kvm_csr_entry {
	__u32 index;
	__u32 reserved;
	__u64 data;
};

/* for KVM_GET_MSRS and KVM_SET_MSRS */
struct kvm_msrs {
	__u32 ncsrs; /* number of msrs in entries */
	__u32 pad;

	struct kvm_csr_entry entries[0];
};

struct kvm_loongarch_interrupt {
	/* in */
	__u32 cpu;
	__u32 irq;
};

#define KVM_IRQCHIP_LS7A_IOAPIC	0x0
#define KVM_IRQCHIP_LS3A_GIPI	0x1
#define KVM_IRQCHIP_LS3A_HT_IRQ	0x2
#define KVM_IRQCHIP_LS3A_ROUTE	0x3
#define KVM_IRQCHIP_LS3A_EXTIRQ	0x4
#define KVM_IRQCHIP_LS3A_IPMASK	0x5
#define KVM_NR_IRQCHIPS          1
#define KVM_IRQCHIP_NUM_PINS    64

#define KVM_MAX_CORES			256
#define KVM_EXTIOI_IRQS			(256)
#define KVM_EXTIOI_IRQS_BITMAP_SIZE	(KVM_EXTIOI_IRQS / 8)
/* map to ipnum per 32 irqs */
#define KVM_EXTIOI_IRQS_IPMAP_SIZE	(KVM_EXTIOI_IRQS / 32)
#define KVM_EXTIOI_IRQS_PER_GROUP	32
#define KVM_EXTIOI_IRQS_COREMAP_SIZE	(KVM_EXTIOI_IRQS)
#define KVM_EXTIOI_IRQS_NODETYPE_SIZE	16

struct ls7a_ioapic_state {
	/* 0x000 interrupt id register */
	__u64 int_id;
	/* 0x020 interrupt mask register */
	__u64 int_mask;
	/* 0x040 1=msi */
	__u64 htmsi_en;
	/* 0x060 edge=1 level  =0 */
	__u64 intedge;
	/* 0x080 for clean edge int,set 1 clean,set 0 is noused */
	__u64 intclr;
	/* 0x0c0 */
	__u64 auto_crtl0;
	/* 0x0e0 */
	__u64 auto_crtl1;
	/* 0x100 - 0x140 */
	__u8 route_entry[64];
	/* 0x200 - 0x240 */
	__u8 htmsi_vector[64];
	/* 0x300 */
	__u64 intisr_chip0;
	/* 0x320 */
	__u64 intisr_chip1;
	/* edge detection */
	__u64 last_intirr;
	/* 0x380 interrupt request register */
	__u64 intirr;
	/* 0x3a0 interrupt service register */
	__u64 intisr;
	/* 0x3e0 interrupt level polarity selection register,
	 * 0 for high level tirgger
	 */
	__u64 int_polarity;
};

struct loongarch_gipi_single {
	__u32 status;
	__u32 en;
	__u32 set;
	__u32 clear;
	__u64 buf[4];
};

struct loongarch_gipiState {
	struct loongarch_gipi_single core[KVM_MAX_CORES];
};

struct kvm_loongarch_ls3a_extirq_state {
	union ext_en_r {
		uint64_t reg_u64[KVM_EXTIOI_IRQS_BITMAP_SIZE / 8];
		uint32_t reg_u32[KVM_EXTIOI_IRQS_BITMAP_SIZE / 4];
		uint8_t reg_u8[KVM_EXTIOI_IRQS_BITMAP_SIZE];
	} ext_en_r;
	union bounce_r {
		uint64_t reg_u64[KVM_EXTIOI_IRQS_BITMAP_SIZE / 8];
		uint32_t reg_u32[KVM_EXTIOI_IRQS_BITMAP_SIZE / 4];
		uint8_t reg_u8[KVM_EXTIOI_IRQS_BITMAP_SIZE];
	} bounce_r;
	union ext_isr_r {
		uint64_t reg_u64[KVM_EXTIOI_IRQS_BITMAP_SIZE / 8];
		uint32_t reg_u32[KVM_EXTIOI_IRQS_BITMAP_SIZE / 4];
		uint8_t reg_u8[KVM_EXTIOI_IRQS_BITMAP_SIZE];
	} ext_isr_r;
	union ext_core_isr_r {
		uint64_t reg_u64[KVM_MAX_CORES][KVM_EXTIOI_IRQS_BITMAP_SIZE / 8];
		uint32_t reg_u32[KVM_MAX_CORES][KVM_EXTIOI_IRQS_BITMAP_SIZE / 4];
		uint8_t reg_u8[KVM_MAX_CORES][KVM_EXTIOI_IRQS_BITMAP_SIZE];
	} ext_core_isr_r;
	union ip_map_r {
		uint64_t reg_u64;
		uint32_t reg_u32[KVM_EXTIOI_IRQS_IPMAP_SIZE / 4];
		uint8_t reg_u8[KVM_EXTIOI_IRQS_IPMAP_SIZE];
	} ip_map_r;
	union core_map_r {
		uint64_t reg_u64[KVM_EXTIOI_IRQS_COREMAP_SIZE / 8];
		uint32_t reg_u32[KVM_EXTIOI_IRQS_COREMAP_SIZE / 4];
		uint8_t reg_u8[KVM_EXTIOI_IRQS_COREMAP_SIZE];
	} core_map_r;
	union node_type_r {
		uint64_t reg_u64[KVM_EXTIOI_IRQS_NODETYPE_SIZE / 4];
		uint32_t reg_u32[KVM_EXTIOI_IRQS_NODETYPE_SIZE / 2];
		uint16_t reg_u16[KVM_EXTIOI_IRQS_NODETYPE_SIZE];
		uint8_t reg_u8[KVM_EXTIOI_IRQS_NODETYPE_SIZE * 2];
	} node_type_r;
};

struct loongarch_kvm_irqchip {
	__u16 chip_id;
	__u16 len;
	__u16 vcpu_id;
	__u16 reserved;
	char data[0];
};

#endif /* __LINUX_KVM_LOONGARCH_H */
Loading