Commit 6bf8819f authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'for-linus-5.12b-rc3-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen fixes from Juergen Gross:
 "Two fix series and a single cleanup:

   - a small cleanup patch to remove unneeded symbol exports

   - a series to cleanup Xen grant handling (avoiding allocations in
     some cases, and using common defines for "invalid" values)

   - a series to address a race issue in Xen event channel handling"

* tag 'for-linus-5.12b-rc3-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  Xen/gntdev: don't needlessly use kvcalloc()
  Xen/gnttab: introduce common INVALID_GRANT_{HANDLE,REF}
  Xen/gntdev: don't needlessly allocate k{,un}map_ops[]
  Xen: drop exports of {set,clear}_foreign_p2m_mapping()
  xen/events: avoid handling the same event on two cpus at the same time
  xen/events: don't unmask an event channel when an eoi is pending
  xen/events: reset affinity of 2-level event when tearing it down
parents f78d76e7 f1d20d86
Loading
Loading
Loading
Loading
+2 −3
Original line number Diff line number Diff line
@@ -11,6 +11,7 @@

#include <xen/xen.h>
#include <xen/interface/memory.h>
#include <xen/grant_table.h>
#include <xen/page.h>
#include <xen/swiotlb-xen.h>

@@ -109,7 +110,7 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
		map_ops[i].status = GNTST_general_error;
		unmap.host_addr = map_ops[i].host_addr,
		unmap.handle = map_ops[i].handle;
		map_ops[i].handle = ~0;
		map_ops[i].handle = INVALID_GRANT_HANDLE;
		if (map_ops[i].flags & GNTMAP_device_map)
			unmap.dev_bus_addr = map_ops[i].dev_bus_addr;
		else
@@ -130,7 +131,6 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,

	return 0;
}
EXPORT_SYMBOL_GPL(set_foreign_p2m_mapping);

int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
			      struct gnttab_unmap_grant_ref *kunmap_ops,
@@ -145,7 +145,6 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,

	return 0;
}
EXPORT_SYMBOL_GPL(clear_foreign_p2m_mapping);

bool __set_phys_to_machine_multi(unsigned long pfn,
		unsigned long mfn, unsigned long nr_pages)
+2 −4
Original line number Diff line number Diff line
@@ -741,7 +741,7 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
		map_ops[i].status = GNTST_general_error;
		unmap[0].host_addr = map_ops[i].host_addr,
		unmap[0].handle = map_ops[i].handle;
		map_ops[i].handle = ~0;
		map_ops[i].handle = INVALID_GRANT_HANDLE;
		if (map_ops[i].flags & GNTMAP_device_map)
			unmap[0].dev_bus_addr = map_ops[i].dev_bus_addr;
		else
@@ -751,7 +751,7 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
			kmap_ops[i].status = GNTST_general_error;
			unmap[1].host_addr = kmap_ops[i].host_addr,
			unmap[1].handle = kmap_ops[i].handle;
			kmap_ops[i].handle = ~0;
			kmap_ops[i].handle = INVALID_GRANT_HANDLE;
			if (kmap_ops[i].flags & GNTMAP_device_map)
				unmap[1].dev_bus_addr = kmap_ops[i].dev_bus_addr;
			else
@@ -776,7 +776,6 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
out:
	return ret;
}
EXPORT_SYMBOL_GPL(set_foreign_p2m_mapping);

int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
			      struct gnttab_unmap_grant_ref *kunmap_ops,
@@ -802,7 +801,6 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,

	return ret;
}
EXPORT_SYMBOL_GPL(clear_foreign_p2m_mapping);

#ifdef CONFIG_XEN_DEBUG_FS
#include <linux/debugfs.h>
+2 −2
Original line number Diff line number Diff line
@@ -26,7 +26,7 @@
#include <xen/platform_pci.h>

#include <asm/xen/swiotlb-xen.h>
#define INVALID_GRANT_REF (0)

#define INVALID_EVTCHN    (-1)

struct pci_bus_entry {
@@ -42,7 +42,7 @@ struct pcifront_device {
	struct list_head root_buses;

	int evtchn;
	int gnt_ref;
	grant_ref_t gnt_ref;

	int irq;

+15 −7
Original line number Diff line number Diff line
@@ -47,6 +47,11 @@ static unsigned evtchn_2l_max_channels(void)
	return EVTCHN_2L_NR_CHANNELS;
}

static void evtchn_2l_remove(evtchn_port_t evtchn, unsigned int cpu)
{
	clear_bit(evtchn, BM(per_cpu(cpu_evtchn_mask, cpu)));
}

static void evtchn_2l_bind_to_cpu(evtchn_port_t evtchn, unsigned int cpu,
				  unsigned int old_cpu)
{
@@ -72,12 +77,6 @@ static bool evtchn_2l_is_pending(evtchn_port_t port)
	return sync_test_bit(port, BM(&s->evtchn_pending[0]));
}

static bool evtchn_2l_test_and_set_mask(evtchn_port_t port)
{
	struct shared_info *s = HYPERVISOR_shared_info;
	return sync_test_and_set_bit(port, BM(&s->evtchn_mask[0]));
}

static void evtchn_2l_mask(evtchn_port_t port)
{
	struct shared_info *s = HYPERVISOR_shared_info;
@@ -355,18 +354,27 @@ static void evtchn_2l_resume(void)
				EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD);
}

static int evtchn_2l_percpu_deinit(unsigned int cpu)
{
	memset(per_cpu(cpu_evtchn_mask, cpu), 0, sizeof(xen_ulong_t) *
			EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD);

	return 0;
}

static const struct evtchn_ops evtchn_ops_2l = {
	.max_channels      = evtchn_2l_max_channels,
	.nr_channels       = evtchn_2l_max_channels,
	.remove            = evtchn_2l_remove,
	.bind_to_cpu       = evtchn_2l_bind_to_cpu,
	.clear_pending     = evtchn_2l_clear_pending,
	.set_pending       = evtchn_2l_set_pending,
	.is_pending        = evtchn_2l_is_pending,
	.test_and_set_mask = evtchn_2l_test_and_set_mask,
	.mask              = evtchn_2l_mask,
	.unmask            = evtchn_2l_unmask,
	.handle_events     = evtchn_2l_handle_events,
	.resume	           = evtchn_2l_resume,
	.percpu_deinit     = evtchn_2l_percpu_deinit,
};

void __init xen_evtchn_2l_init(void)
+100 −30
Original line number Diff line number Diff line
@@ -98,13 +98,19 @@ struct irq_info {
	short refcnt;
	u8 spurious_cnt;
	u8 is_accounted;
	enum xen_irq_type type; /* type */
	short type;		/* type: IRQT_* */
	u8 mask_reason;		/* Why is event channel masked */
#define EVT_MASK_REASON_EXPLICIT	0x01
#define EVT_MASK_REASON_TEMPORARY	0x02
#define EVT_MASK_REASON_EOI_PENDING	0x04
	u8 is_active;		/* Is event just being handled? */
	unsigned irq;
	evtchn_port_t evtchn;   /* event channel */
	unsigned short cpu;     /* cpu bound */
	unsigned short eoi_cpu; /* EOI must happen on this cpu-1 */
	unsigned int irq_epoch; /* If eoi_cpu valid: irq_epoch of event */
	u64 eoi_time;           /* Time in jiffies when to EOI. */
	spinlock_t lock;

	union {
		unsigned short virq;
@@ -154,6 +160,7 @@ static DEFINE_RWLOCK(evtchn_rwlock);
 *   evtchn_rwlock
 *     IRQ-desc lock
 *       percpu eoi_list_lock
 *         irq_info->lock
 */

static LIST_HEAD(xen_irq_list_head);
@@ -304,6 +311,8 @@ static int xen_irq_info_common_setup(struct irq_info *info,
	info->irq = irq;
	info->evtchn = evtchn;
	info->cpu = cpu;
	info->mask_reason = EVT_MASK_REASON_EXPLICIT;
	spin_lock_init(&info->lock);

	ret = set_evtchn_to_irq(evtchn, irq);
	if (ret < 0)
@@ -377,6 +386,7 @@ static int xen_irq_info_pirq_setup(unsigned irq,
static void xen_irq_info_cleanup(struct irq_info *info)
{
	set_evtchn_to_irq(info->evtchn, -1);
	xen_evtchn_port_remove(info->evtchn, info->cpu);
	info->evtchn = 0;
	channels_on_cpu_dec(info);
}
@@ -458,6 +468,34 @@ unsigned int cpu_from_evtchn(evtchn_port_t evtchn)
	return ret;
}

static void do_mask(struct irq_info *info, u8 reason)
{
	unsigned long flags;

	spin_lock_irqsave(&info->lock, flags);

	if (!info->mask_reason)
		mask_evtchn(info->evtchn);

	info->mask_reason |= reason;

	spin_unlock_irqrestore(&info->lock, flags);
}

static void do_unmask(struct irq_info *info, u8 reason)
{
	unsigned long flags;

	spin_lock_irqsave(&info->lock, flags);

	info->mask_reason &= ~reason;

	if (!info->mask_reason)
		unmask_evtchn(info->evtchn);

	spin_unlock_irqrestore(&info->lock, flags);
}

#ifdef CONFIG_X86
static bool pirq_check_eoi_map(unsigned irq)
{
@@ -604,7 +642,7 @@ static void xen_irq_lateeoi_locked(struct irq_info *info, bool spurious)
	}

	info->eoi_time = 0;
	unmask_evtchn(evtchn);
	do_unmask(info, EVT_MASK_REASON_EOI_PENDING);
}

static void xen_irq_lateeoi_worker(struct work_struct *work)
@@ -773,6 +811,12 @@ static void xen_evtchn_close(evtchn_port_t port)
		BUG();
}

static void event_handler_exit(struct irq_info *info)
{
	smp_store_release(&info->is_active, 0);
	clear_evtchn(info->evtchn);
}

static void pirq_query_unmask(int irq)
{
	struct physdev_irq_status_query irq_status;
@@ -791,14 +835,15 @@ static void pirq_query_unmask(int irq)

static void eoi_pirq(struct irq_data *data)
{
	evtchn_port_t evtchn = evtchn_from_irq(data->irq);
	struct irq_info *info = info_for_irq(data->irq);
	evtchn_port_t evtchn = info ? info->evtchn : 0;
	struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
	int rc = 0;

	if (!VALID_EVTCHN(evtchn))
		return;

	clear_evtchn(evtchn);
	event_handler_exit(info);

	if (pirq_needs_eoi(data->irq)) {
		rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
@@ -849,7 +894,8 @@ static unsigned int __startup_pirq(unsigned int irq)
		goto err;

out:
	unmask_evtchn(evtchn);
	do_unmask(info, EVT_MASK_REASON_EXPLICIT);

	eoi_pirq(irq_get_irq_data(irq));

	return 0;
@@ -876,7 +922,7 @@ static void shutdown_pirq(struct irq_data *data)
	if (!VALID_EVTCHN(evtchn))
		return;

	mask_evtchn(evtchn);
	do_mask(info, EVT_MASK_REASON_EXPLICIT);
	xen_evtchn_close(evtchn);
	xen_irq_info_cleanup(info);
}
@@ -1628,6 +1674,8 @@ void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl)
	}

	info = info_for_irq(irq);
	if (xchg_acquire(&info->is_active, 1))
		return;

	dev = (info->type == IRQT_EVTCHN) ? info->u.interdomain : NULL;
	if (dev)
@@ -1720,10 +1768,10 @@ void rebind_evtchn_irq(evtchn_port_t evtchn, int irq)
}

/* Rebind an evtchn so that it gets delivered to a specific cpu */
static int xen_rebind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int tcpu)
static int xen_rebind_evtchn_to_cpu(struct irq_info *info, unsigned int tcpu)
{
	struct evtchn_bind_vcpu bind_vcpu;
	int masked;
	evtchn_port_t evtchn = info ? info->evtchn : 0;

	if (!VALID_EVTCHN(evtchn))
		return -1;
@@ -1739,7 +1787,7 @@ static int xen_rebind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int tcpu)
	 * Mask the event while changing the VCPU binding to prevent
	 * it being delivered on an unexpected VCPU.
	 */
	masked = test_and_set_mask(evtchn);
	do_mask(info, EVT_MASK_REASON_TEMPORARY);

	/*
	 * If this fails, it usually just indicates that we're dealing with a
@@ -1749,8 +1797,7 @@ static int xen_rebind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int tcpu)
	if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
		bind_evtchn_to_cpu(evtchn, tcpu, false);

	if (!masked)
		unmask_evtchn(evtchn);
	do_unmask(info, EVT_MASK_REASON_TEMPORARY);

	return 0;
}
@@ -1789,7 +1836,7 @@ static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
	unsigned int tcpu = select_target_cpu(dest);
	int ret;

	ret = xen_rebind_evtchn_to_cpu(evtchn_from_irq(data->irq), tcpu);
	ret = xen_rebind_evtchn_to_cpu(info_for_irq(data->irq), tcpu);
	if (!ret)
		irq_data_update_effective_affinity(data, cpumask_of(tcpu));

@@ -1798,28 +1845,29 @@ static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,

static void enable_dynirq(struct irq_data *data)
{
	evtchn_port_t evtchn = evtchn_from_irq(data->irq);
	struct irq_info *info = info_for_irq(data->irq);
	evtchn_port_t evtchn = info ? info->evtchn : 0;

	if (VALID_EVTCHN(evtchn))
		unmask_evtchn(evtchn);
		do_unmask(info, EVT_MASK_REASON_EXPLICIT);
}

static void disable_dynirq(struct irq_data *data)
{
	evtchn_port_t evtchn = evtchn_from_irq(data->irq);
	struct irq_info *info = info_for_irq(data->irq);
	evtchn_port_t evtchn = info ? info->evtchn : 0;

	if (VALID_EVTCHN(evtchn))
		mask_evtchn(evtchn);
		do_mask(info, EVT_MASK_REASON_EXPLICIT);
}

static void ack_dynirq(struct irq_data *data)
{
	evtchn_port_t evtchn = evtchn_from_irq(data->irq);

	if (!VALID_EVTCHN(evtchn))
		return;
	struct irq_info *info = info_for_irq(data->irq);
	evtchn_port_t evtchn = info ? info->evtchn : 0;

	clear_evtchn(evtchn);
	if (VALID_EVTCHN(evtchn))
		event_handler_exit(info);
}

static void mask_ack_dynirq(struct irq_data *data)
@@ -1828,18 +1876,39 @@ static void mask_ack_dynirq(struct irq_data *data)
	ack_dynirq(data);
}

static void lateeoi_ack_dynirq(struct irq_data *data)
{
	struct irq_info *info = info_for_irq(data->irq);
	evtchn_port_t evtchn = info ? info->evtchn : 0;

	if (VALID_EVTCHN(evtchn)) {
		do_mask(info, EVT_MASK_REASON_EOI_PENDING);
		event_handler_exit(info);
	}
}

static void lateeoi_mask_ack_dynirq(struct irq_data *data)
{
	struct irq_info *info = info_for_irq(data->irq);
	evtchn_port_t evtchn = info ? info->evtchn : 0;

	if (VALID_EVTCHN(evtchn)) {
		do_mask(info, EVT_MASK_REASON_EXPLICIT);
		event_handler_exit(info);
	}
}

static int retrigger_dynirq(struct irq_data *data)
{
	evtchn_port_t evtchn = evtchn_from_irq(data->irq);
	int masked;
	struct irq_info *info = info_for_irq(data->irq);
	evtchn_port_t evtchn = info ? info->evtchn : 0;

	if (!VALID_EVTCHN(evtchn))
		return 0;

	masked = test_and_set_mask(evtchn);
	do_mask(info, EVT_MASK_REASON_TEMPORARY);
	set_evtchn(evtchn);
	if (!masked)
		unmask_evtchn(evtchn);
	do_unmask(info, EVT_MASK_REASON_TEMPORARY);

	return 1;
}
@@ -1938,10 +2007,11 @@ static void restore_cpu_ipis(unsigned int cpu)
/* Clear an irq's pending state, in preparation for polling on it */
void xen_clear_irq_pending(int irq)
{
	evtchn_port_t evtchn = evtchn_from_irq(irq);
	struct irq_info *info = info_for_irq(irq);
	evtchn_port_t evtchn = info ? info->evtchn : 0;

	if (VALID_EVTCHN(evtchn))
		clear_evtchn(evtchn);
		event_handler_exit(info);
}
EXPORT_SYMBOL(xen_clear_irq_pending);
void xen_set_irq_pending(int irq)
@@ -2053,8 +2123,8 @@ static struct irq_chip xen_lateeoi_chip __read_mostly = {
	.irq_mask		= disable_dynirq,
	.irq_unmask		= enable_dynirq,

	.irq_ack		= mask_ack_dynirq,
	.irq_mask_ack		= mask_ack_dynirq,
	.irq_ack		= lateeoi_ack_dynirq,
	.irq_mask_ack		= lateeoi_mask_ack_dynirq,

	.irq_set_affinity	= set_affinity_irq,
	.irq_retrigger		= retrigger_dynirq,
Loading