Commit b5370613 authored by Jingxian He's avatar Jingxian He Committed by hejingxian
Browse files

gicv3: add lpi support for cvm guest

virtcca inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I9CC0X



--------------------------------

Add lpi support for cvm guest os:
The gic-its driver mapped memory must shared with qemu/kvm.
The cvm guest gic-its driver alloc memory from bounce buffer
to share with qemu/kvm.

Signed-off-by: default avatarJingxian He <hejingxian@huawei.com>
Signed-off-by: default avatarwuweinan <wuweinan@huawei.com>
parent 49c1f9f2
Loading
Loading
Loading
Loading
+208 −21
Original line number Diff line number Diff line
@@ -29,6 +29,10 @@
#include <linux/percpu.h>
#include <linux/slab.h>
#include <linux/syscore_ops.h>
#ifdef CONFIG_CVM_GUEST
#include <linux/swiotlb.h>
#include <asm/cvm_guest.h>
#endif

#include <linux/irqchip.h>
#include <linux/irqchip/arm-gic-v3.h>
@@ -313,6 +317,91 @@ static DEFINE_RAW_SPINLOCK(vmovp_lock);

static DEFINE_IDA(its_vpeid_ida);

#ifdef CONFIG_CVM_GUEST
static struct device cvm_alloc_device;
static LIST_HEAD(cvm_its_nodes);
static raw_spinlock_t cvm_its_lock;

struct its_device_order {
	struct its_device *dev;
	struct list_head entry;
	int itt_order;
};

static inline struct page *its_alloc_shared_pages_node(int node, gfp_t gfp,
			unsigned int order)
{
	return swiotlb_alloc(&cvm_alloc_device, (1 << order) * PAGE_SIZE);
}

static inline struct page *its_alloc_shared_pages(gfp_t gfp, unsigned int order)
{
	return its_alloc_shared_pages_node(NUMA_NO_NODE, gfp, order);
}

static void its_free_shared_pages(void *addr, int order)
{
	if (order < 0)
		return;

	swiotlb_free(&cvm_alloc_device, (struct page *)addr, (1 << order) * PAGE_SIZE);
}

static int add_its_device_order(struct its_device *dev, int itt_order)
{
	struct its_device_order *new;
	unsigned long flags;

	new = kmalloc(sizeof(struct its_device_order), GFP_KERNEL);
	if (!new)
		return -ENOMEM;
	new->dev = dev;
	new->itt_order = itt_order;
	raw_spin_lock_irqsave(&cvm_its_lock, flags);
	list_add_tail(&new->entry, &cvm_its_nodes);
	raw_spin_unlock_irqrestore(&cvm_its_lock, flags);
	return 0;
}

/* get its device order and then free its device order */
static int get_its_device_order(struct its_device *dev)
{
	struct its_device_order *pos, *tmp;
	unsigned long flags;
	int itt_order = -1;

	raw_spin_lock_irqsave(&cvm_its_lock, flags);
	list_for_each_entry_safe(pos, tmp, &cvm_its_nodes, entry) {
		if (pos->dev == dev) {
			itt_order = pos->itt_order;
			list_del(&pos->entry);
			kfree(pos);
			goto found;
		}
	}
found:
	raw_spin_unlock_irqrestore(&cvm_its_lock, flags);
	return itt_order;
}

static void *its_alloc_shared_page_address(struct its_device *dev,
			struct its_node *its, int sz)
{
	struct page *page;
	int itt_order;

	itt_order = get_order(sz);
	if (add_its_device_order(dev, itt_order))
		return NULL;

	page = its_alloc_shared_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
			   itt_order);
	if (!page)
		return NULL;
	return (void *)page_address(page);
}
#endif

static void free_devid_to_rsv_pools(struct its_device *its_dev)
{
	struct rsv_devid_pool *pool = its_dev->devid_pool;
@@ -2447,6 +2536,12 @@ static struct page *its_allocate_prop_table(gfp_t gfp_flags)
{
	struct page *prop_page;

#ifdef CONFIG_CVM_GUEST
	if (is_cvm_world())
		prop_page = its_alloc_shared_pages(gfp_flags,
			get_order(LPI_PROPBASE_SZ));
	else
#endif
		prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ));
	if (!prop_page)
		return NULL;
@@ -2458,6 +2553,12 @@ static struct page *its_allocate_prop_table(gfp_t gfp_flags)

static void its_free_prop_table(struct page *prop_page)
{
#ifdef CONFIG_CVM_GUEST
	if (is_cvm_world())
		its_free_shared_pages(page_address(prop_page),
			get_order(LPI_PROPBASE_SZ));
	else
#endif
		free_pages((unsigned long)page_address(prop_page),
			get_order(LPI_PROPBASE_SZ));
}
@@ -2581,6 +2682,12 @@ static int its_setup_baser(struct its_node *its, struct its_baser *baser,
		order = get_order(GITS_BASER_PAGES_MAX * psz);
	}

#ifdef CONFIG_CVM_GUEST
	if (is_cvm_world())
		page = its_alloc_shared_pages_node(its->numa_node,
			GFP_KERNEL | __GFP_ZERO, order);
	else
#endif
		page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order);
	if (!page)
		return -ENOMEM;
@@ -2594,6 +2701,11 @@ static int its_setup_baser(struct its_node *its, struct its_baser *baser,
		/* 52bit PA is supported only when PageSize=64K */
		if (psz != SZ_64K) {
			pr_err("ITS: no 52bit PA support when psz=%d\n", psz);
#ifdef CONFIG_CVM_GUEST
			if (is_cvm_world())
				its_free_shared_pages(base, order);
			else
#endif
				free_pages((unsigned long)base, order);
			return -ENXIO;
		}
@@ -2648,6 +2760,11 @@ static int its_setup_baser(struct its_node *its, struct its_baser *baser,
		pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n",
		       &its->phys_base, its_base_type_string[type],
		       val, tmp);
#ifdef CONFIG_CVM_GUEST
		if (is_cvm_world())
			its_free_shared_pages(base, order);
		else
#endif
			free_pages((unsigned long)base, order);
		return -ENXIO;
	}
@@ -2787,6 +2904,12 @@ static void its_free_tables(struct its_node *its)

	for (i = 0; i < GITS_BASER_NR_REGS; i++) {
		if (its->tables[i].base) {
#ifdef CONFIG_CVM_GUEST
			if (!is_cvm_world())
				its_free_shared_pages(its->tables[i].base,
					its->tables[i].order);
			else
#endif
				free_pages((unsigned long)its->tables[i].base,
					   its->tables[i].order);
			its->tables[i].base = NULL;
@@ -3051,6 +3174,12 @@ static bool allocate_vpe_l2_table(int cpu, u32 id)

	/* Allocate memory for 2nd level table */
	if (!table[idx]) {
#ifdef CONFIG_CVM_GUEST
		if (is_cvm_world())
			page = its_alloc_shared_pages(GFP_KERNEL | __GFP_ZERO,
				get_order(psz));
		else
#endif
			page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(psz));
		if (!page)
			return false;
@@ -3170,6 +3299,12 @@ static int allocate_vpe_l1_table(void)

	pr_debug("np = %d, npg = %lld, psz = %d, epp = %d, esz = %d\n",
		 np, npg, psz, epp, esz);
#ifdef CONFIG_CVM_GUEST
	if (is_cvm_world())
		page = its_alloc_shared_pages(GFP_ATOMIC | __GFP_ZERO,
			get_order(np * PAGE_SIZE));
	else
#endif
		page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, get_order(np * PAGE_SIZE));
	if (!page)
		return -ENOMEM;
@@ -3218,6 +3353,12 @@ static struct page *its_allocate_pending_table(gfp_t gfp_flags)
{
	struct page *pend_page;

#ifdef CONFIG_CVM_GUEST
	if (is_cvm_world())
		pend_page = its_alloc_shared_pages(gfp_flags | __GFP_ZERO,
			get_order(LPI_PENDBASE_SZ));
	else
#endif
		pend_page = alloc_pages(gfp_flags | __GFP_ZERO,
					get_order(LPI_PENDBASE_SZ));
	if (!pend_page)
@@ -3231,6 +3372,12 @@ static struct page *its_allocate_pending_table(gfp_t gfp_flags)

static void its_free_pending_table(struct page *pt)
{
#ifdef CONFIG_CVM_GUEST
	if (is_cvm_world())
		its_free_shared_pages(page_address(pt),
			get_order(LPI_PENDBASE_SZ));
	else
#endif
		free_pages((unsigned long)page_address(pt), get_order(LPI_PENDBASE_SZ));
}

@@ -3768,6 +3915,13 @@ static bool its_alloc_table_entry(struct its_node *its,

	/* Allocate memory for 2nd level table */
	if (!table[idx]) {
#ifdef CONFIG_CVM_GUEST
		if (is_cvm_world())
			page = its_alloc_shared_pages_node(its->numa_node,
						GFP_KERNEL | __GFP_ZERO,
						get_order(baser->psz));
		else
#endif
			page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
						get_order(baser->psz));
		if (!page)
@@ -3872,6 +4026,11 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
	nr_ites = max(2, nvecs);
	sz = nr_ites * (FIELD_GET(GITS_TYPER_ITT_ENTRY_SIZE, its->typer) + 1);
	sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
#ifdef CONFIG_CVM_GUEST
	if (is_cvm_world())
		itt = its_alloc_shared_page_address(dev, its, sz);
	else
#endif
		itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node);
	if (alloc_lpis) {
		lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis);
@@ -3886,6 +4045,11 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,

	if (!dev || !itt ||  !col_map || (!lpi_map && alloc_lpis)) {
		kfree(dev);
#ifdef CONFIG_CVM_GUEST
		if (is_cvm_world())
			its_free_shared_pages(itt, get_order(sz));
		else
#endif
			kfree(itt);
		kfree(lpi_map);
		kfree(col_map);
@@ -3923,6 +4087,11 @@ static void its_free_device(struct its_device *its_dev)
	list_del(&its_dev->entry);
	raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
	kfree(its_dev->event_map.col_map);
#ifdef CONFIG_CVM_GUEST
	if (is_cvm_world())
		its_free_shared_pages(its_dev->itt, get_its_device_order(its_dev));
	else
#endif
		kfree(its_dev->itt);

	if (its_dev->is_vdev) {
@@ -5594,6 +5763,13 @@ static int __init its_probe_one(struct resource *res,

	its->numa_node = numa_node;

#ifdef CONFIG_CVM_GUEST
	if (is_cvm_world())
		page = its_alloc_shared_pages_node(its->numa_node,
					GFP_KERNEL | __GFP_ZERO,
					get_order(ITS_CMD_QUEUE_SZ));
	else
#endif
		page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
					get_order(ITS_CMD_QUEUE_SZ));
	if (!page) {
@@ -5661,6 +5837,11 @@ static int __init its_probe_one(struct resource *res,
out_free_tables:
	its_free_tables(its);
out_free_cmd:
#ifdef CONFIG_CVM_GUEST
	if (is_cvm_world())
		its_free_shared_pages(its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
	else
#endif
		free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
out_unmap_sgir:
	if (its->sgir_base)
@@ -5957,6 +6138,12 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
	bool has_vtimer_irqbypass = false;
	int err;

#ifdef CONFIG_CVM_GUEST
	if (is_cvm_world()) {
		device_initialize(&cvm_alloc_device);
		raw_spin_lock_init(&cvm_its_lock);
	}
#endif
	gic_rdists = rdists;

	its_parent = parent_domain;