Commit 2a8ed7ef authored by Alex Williamson's avatar Alex Williamson
Browse files

Merge branches 'v5.20/vfio/spapr_tce-unused-arg-v1',...

Merge branches 'v5.20/vfio/spapr_tce-unused-arg-v1', 'v5.20/vfio/comment-typo-v1' and 'v5.20/vfio/vfio-ccw-rework-v4' into v5.20/vfio/next
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -8,7 +8,6 @@
 */

#include <linux/vfio.h>
#include <linux/mdev.h>

#include "vfio_ccw_private.h"

+17 −42
Original line number Diff line number Diff line
@@ -14,7 +14,6 @@
#include <linux/init.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/uuid.h>
#include <linux/mdev.h>

#include <asm/isc.h>
@@ -42,13 +41,6 @@ int vfio_ccw_sch_quiesce(struct subchannel *sch)
	DECLARE_COMPLETION_ONSTACK(completion);
	int iretry, ret = 0;

	spin_lock_irq(sch->lock);
	if (!sch->schib.pmcw.ena)
		goto out_unlock;
	ret = cio_disable_subchannel(sch);
	if (ret != -EBUSY)
		goto out_unlock;

	iretry = 255;
	do {

@@ -75,9 +67,7 @@ int vfio_ccw_sch_quiesce(struct subchannel *sch)
		spin_lock_irq(sch->lock);
		ret = cio_disable_subchannel(sch);
	} while (ret == -EBUSY);
out_unlock:
	private->state = VFIO_CCW_STATE_NOT_OPER;
	spin_unlock_irq(sch->lock);

	return ret;
}

@@ -107,9 +97,10 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
	/*
	 * Reset to IDLE only if processing of a channel program
	 * has finished. Do not overwrite a possible processing
	 * state if the final interrupt was for HSCH or CSCH.
	 * state if the interrupt was unsolicited, or if the final
	 * interrupt was for HSCH or CSCH.
	 */
	if (private->mdev && cp_is_finished)
	if (cp_is_finished)
		private->state = VFIO_CCW_STATE_IDLE;

	if (private->io_trigger)
@@ -147,7 +138,7 @@ static struct vfio_ccw_private *vfio_ccw_alloc_private(struct subchannel *sch)

	private->sch = sch;
	mutex_init(&private->io_mutex);
	private->state = VFIO_CCW_STATE_NOT_OPER;
	private->state = VFIO_CCW_STATE_STANDBY;
	INIT_LIST_HEAD(&private->crw);
	INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo);
	INIT_WORK(&private->crw_work, vfio_ccw_crw_todo);
@@ -231,26 +222,15 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)

	dev_set_drvdata(&sch->dev, private);

	spin_lock_irq(sch->lock);
	sch->isc = VFIO_CCW_ISC;
	ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
	spin_unlock_irq(sch->lock);
	ret = mdev_register_device(&sch->dev, &vfio_ccw_mdev_driver);
	if (ret)
		goto out_free;

	private->state = VFIO_CCW_STATE_STANDBY;

	ret = vfio_ccw_mdev_reg(sch);
	if (ret)
		goto out_disable;

	VFIO_CCW_MSG_EVENT(4, "bound to subchannel %x.%x.%04x\n",
			   sch->schid.cssid, sch->schid.ssid,
			   sch->schid.sch_no);
	return 0;

out_disable:
	cio_disable_subchannel(sch);
out_free:
	dev_set_drvdata(&sch->dev, NULL);
	vfio_ccw_free_private(private);
@@ -261,8 +241,8 @@ static void vfio_ccw_sch_remove(struct subchannel *sch)
{
	struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);

	vfio_ccw_sch_quiesce(sch);
	vfio_ccw_mdev_unreg(sch);
	vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_CLOSE);
	mdev_unregister_device(&sch->dev);

	dev_set_drvdata(&sch->dev, NULL);

@@ -275,7 +255,10 @@ static void vfio_ccw_sch_remove(struct subchannel *sch)

static void vfio_ccw_sch_shutdown(struct subchannel *sch)
{
	vfio_ccw_sch_quiesce(sch);
	struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);

	vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_CLOSE);
	vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
}

/**
@@ -301,18 +284,10 @@ static int vfio_ccw_sch_event(struct subchannel *sch, int process)
	if (work_pending(&sch->todo_work))
		goto out_unlock;

	if (cio_update_schib(sch)) {
		vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
	rc = 0;
		goto out_unlock;
	}

	private = dev_get_drvdata(&sch->dev);
	if (private->state == VFIO_CCW_STATE_NOT_OPER) {
		private->state = private->mdev ? VFIO_CCW_STATE_IDLE :
				 VFIO_CCW_STATE_STANDBY;
	}
	rc = 0;
	if (cio_update_schib(sch))
		vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);

out_unlock:
	spin_unlock_irqrestore(sch->lock, flags);
@@ -358,8 +333,8 @@ static int vfio_ccw_chp_event(struct subchannel *sch,
		return 0;

	trace_vfio_ccw_chp_event(private->sch->schid, mask, event);
	VFIO_CCW_MSG_EVENT(2, "%pUl (%x.%x.%04x): mask=0x%x event=%d\n",
			   mdev_uuid(private->mdev), sch->schid.cssid,
	VFIO_CCW_MSG_EVENT(2, "sch %x.%x.%04x: mask=0x%x event=%d\n",
			   sch->schid.cssid,
			   sch->schid.ssid, sch->schid.sch_no,
			   mask, event);

+82 −17
Original line number Diff line number Diff line
@@ -10,7 +10,8 @@
 */

#include <linux/vfio.h>
#include <linux/mdev.h>

#include <asm/isc.h>

#include "ioasm.h"
#include "vfio_ccw_private.h"
@@ -161,8 +162,12 @@ static void fsm_notoper(struct vfio_ccw_private *private,
{
	struct subchannel *sch = private->sch;

	VFIO_CCW_TRACE_EVENT(2, "notoper");
	VFIO_CCW_TRACE_EVENT(2, dev_name(&sch->dev));
	VFIO_CCW_MSG_EVENT(2, "sch %x.%x.%04x: notoper event %x state %x\n",
			   sch->schid.cssid,
			   sch->schid.ssid,
			   sch->schid.sch_no,
			   event,
			   private->state);

	/*
	 * TODO:
@@ -170,6 +175,9 @@ static void fsm_notoper(struct vfio_ccw_private *private,
	 */
	css_sched_sch_todo(sch, SCH_TODO_UNREG);
	private->state = VFIO_CCW_STATE_NOT_OPER;

	/* This is usually handled during CLOSE event */
	cp_free(&private->cp);
}

/*
@@ -242,7 +250,6 @@ static void fsm_io_request(struct vfio_ccw_private *private,
	union orb *orb;
	union scsw *scsw = &private->scsw;
	struct ccw_io_region *io_region = private->io_region;
	struct mdev_device *mdev = private->mdev;
	char *errstr = "request";
	struct subchannel_id schid = get_schid(private);

@@ -256,8 +263,8 @@ static void fsm_io_request(struct vfio_ccw_private *private,
		if (orb->tm.b) {
			io_region->ret_code = -EOPNOTSUPP;
			VFIO_CCW_MSG_EVENT(2,
					   "%pUl (%x.%x.%04x): transport mode\n",
					   mdev_uuid(mdev), schid.cssid,
					   "sch %x.%x.%04x: transport mode\n",
					   schid.cssid,
					   schid.ssid, schid.sch_no);
			errstr = "transport mode";
			goto err_out;
@@ -265,8 +272,8 @@ static void fsm_io_request(struct vfio_ccw_private *private,
		io_region->ret_code = cp_init(&private->cp, orb);
		if (io_region->ret_code) {
			VFIO_CCW_MSG_EVENT(2,
					   "%pUl (%x.%x.%04x): cp_init=%d\n",
					   mdev_uuid(mdev), schid.cssid,
					   "sch %x.%x.%04x: cp_init=%d\n",
					   schid.cssid,
					   schid.ssid, schid.sch_no,
					   io_region->ret_code);
			errstr = "cp init";
@@ -276,8 +283,8 @@ static void fsm_io_request(struct vfio_ccw_private *private,
		io_region->ret_code = cp_prefetch(&private->cp);
		if (io_region->ret_code) {
			VFIO_CCW_MSG_EVENT(2,
					   "%pUl (%x.%x.%04x): cp_prefetch=%d\n",
					   mdev_uuid(mdev), schid.cssid,
					   "sch %x.%x.%04x: cp_prefetch=%d\n",
					   schid.cssid,
					   schid.ssid, schid.sch_no,
					   io_region->ret_code);
			errstr = "cp prefetch";
@@ -289,8 +296,8 @@ static void fsm_io_request(struct vfio_ccw_private *private,
		io_region->ret_code = fsm_io_helper(private);
		if (io_region->ret_code) {
			VFIO_CCW_MSG_EVENT(2,
					   "%pUl (%x.%x.%04x): fsm_io_helper=%d\n",
					   mdev_uuid(mdev), schid.cssid,
					   "sch %x.%x.%04x: fsm_io_helper=%d\n",
					   schid.cssid,
					   schid.ssid, schid.sch_no,
					   io_region->ret_code);
			errstr = "cp fsm_io_helper";
@@ -300,16 +307,16 @@ static void fsm_io_request(struct vfio_ccw_private *private,
		return;
	} else if (scsw->cmd.fctl & SCSW_FCTL_HALT_FUNC) {
		VFIO_CCW_MSG_EVENT(2,
				   "%pUl (%x.%x.%04x): halt on io_region\n",
				   mdev_uuid(mdev), schid.cssid,
				   "sch %x.%x.%04x: halt on io_region\n",
				   schid.cssid,
				   schid.ssid, schid.sch_no);
		/* halt is handled via the async cmd region */
		io_region->ret_code = -EOPNOTSUPP;
		goto err_out;
	} else if (scsw->cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
		VFIO_CCW_MSG_EVENT(2,
				   "%pUl (%x.%x.%04x): clear on io_region\n",
				   mdev_uuid(mdev), schid.cssid,
				   "sch %x.%x.%04x: clear on io_region\n",
				   schid.cssid,
				   schid.ssid, schid.sch_no);
		/* clear is handled via the async cmd region */
		io_region->ret_code = -EOPNOTSUPP;
@@ -366,6 +373,54 @@ static void fsm_irq(struct vfio_ccw_private *private,
		complete(private->completion);
}

static void fsm_open(struct vfio_ccw_private *private,
		     enum vfio_ccw_event event)
{
	struct subchannel *sch = private->sch;
	int ret;

	spin_lock_irq(sch->lock);
	sch->isc = VFIO_CCW_ISC;
	ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
	if (ret)
		goto err_unlock;

	private->state = VFIO_CCW_STATE_IDLE;
	spin_unlock_irq(sch->lock);
	return;

err_unlock:
	spin_unlock_irq(sch->lock);
	vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
}

static void fsm_close(struct vfio_ccw_private *private,
		      enum vfio_ccw_event event)
{
	struct subchannel *sch = private->sch;
	int ret;

	spin_lock_irq(sch->lock);

	if (!sch->schib.pmcw.ena)
		goto err_unlock;

	ret = cio_disable_subchannel(sch);
	if (ret == -EBUSY)
		vfio_ccw_sch_quiesce(sch);
	if (ret)
		goto err_unlock;

	private->state = VFIO_CCW_STATE_STANDBY;
	spin_unlock_irq(sch->lock);
	cp_free(&private->cp);
	return;

err_unlock:
	spin_unlock_irq(sch->lock);
	vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
}

/*
 * Device statemachine
 */
@@ -375,29 +430,39 @@ fsm_func_t *vfio_ccw_jumptable[NR_VFIO_CCW_STATES][NR_VFIO_CCW_EVENTS] = {
		[VFIO_CCW_EVENT_IO_REQ]		= fsm_io_error,
		[VFIO_CCW_EVENT_ASYNC_REQ]	= fsm_async_error,
		[VFIO_CCW_EVENT_INTERRUPT]	= fsm_disabled_irq,
		[VFIO_CCW_EVENT_OPEN]		= fsm_nop,
		[VFIO_CCW_EVENT_CLOSE]		= fsm_nop,
	},
	[VFIO_CCW_STATE_STANDBY] = {
		[VFIO_CCW_EVENT_NOT_OPER]	= fsm_notoper,
		[VFIO_CCW_EVENT_IO_REQ]		= fsm_io_error,
		[VFIO_CCW_EVENT_ASYNC_REQ]	= fsm_async_error,
		[VFIO_CCW_EVENT_INTERRUPT]	= fsm_irq,
		[VFIO_CCW_EVENT_INTERRUPT]	= fsm_disabled_irq,
		[VFIO_CCW_EVENT_OPEN]		= fsm_open,
		[VFIO_CCW_EVENT_CLOSE]		= fsm_notoper,
	},
	[VFIO_CCW_STATE_IDLE] = {
		[VFIO_CCW_EVENT_NOT_OPER]	= fsm_notoper,
		[VFIO_CCW_EVENT_IO_REQ]		= fsm_io_request,
		[VFIO_CCW_EVENT_ASYNC_REQ]	= fsm_async_request,
		[VFIO_CCW_EVENT_INTERRUPT]	= fsm_irq,
		[VFIO_CCW_EVENT_OPEN]		= fsm_notoper,
		[VFIO_CCW_EVENT_CLOSE]		= fsm_close,
	},
	[VFIO_CCW_STATE_CP_PROCESSING] = {
		[VFIO_CCW_EVENT_NOT_OPER]	= fsm_notoper,
		[VFIO_CCW_EVENT_IO_REQ]		= fsm_io_retry,
		[VFIO_CCW_EVENT_ASYNC_REQ]	= fsm_async_retry,
		[VFIO_CCW_EVENT_INTERRUPT]	= fsm_irq,
		[VFIO_CCW_EVENT_OPEN]		= fsm_notoper,
		[VFIO_CCW_EVENT_CLOSE]		= fsm_close,
	},
	[VFIO_CCW_STATE_CP_PENDING] = {
		[VFIO_CCW_EVENT_NOT_OPER]	= fsm_notoper,
		[VFIO_CCW_EVENT_IO_REQ]		= fsm_io_busy,
		[VFIO_CCW_EVENT_ASYNC_REQ]	= fsm_async_request,
		[VFIO_CCW_EVENT_INTERRUPT]	= fsm_irq,
		[VFIO_CCW_EVENT_OPEN]		= fsm_notoper,
		[VFIO_CCW_EVENT_CLOSE]		= fsm_close,
	},
};
+23 −54
Original line number Diff line number Diff line
@@ -21,27 +21,16 @@ static const struct vfio_device_ops vfio_ccw_dev_ops;

static int vfio_ccw_mdev_reset(struct vfio_ccw_private *private)
{
	struct subchannel *sch;
	int ret;

	sch = private->sch;
	/*
	 * TODO:
	 * In the cureent stage, some things like "no I/O running" and "no
	 * interrupt pending" are clear, but we are not sure what other state
	 * we need to care about.
	 * There are still a lot more instructions need to be handled. We
	 * should come back here later.
	 * If the FSM state is seen as Not Operational after closing
	 * and re-opening the mdev, return an error.
	 */
	ret = vfio_ccw_sch_quiesce(sch);
	if (ret)
		return ret;

	ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
	if (!ret)
		private->state = VFIO_CCW_STATE_IDLE;
	vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_CLOSE);
	vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_OPEN);
	if (private->state == VFIO_CCW_STATE_NOT_OPER)
		return -EINVAL;

	return ret;
	return 0;
}

static int vfio_ccw_mdev_notifier(struct notifier_block *nb,
@@ -64,7 +53,6 @@ static int vfio_ccw_mdev_notifier(struct notifier_block *nb,
		if (vfio_ccw_mdev_reset(private))
			return NOTIFY_BAD;

		cp_free(&private->cp);
		return NOTIFY_OK;
	}

@@ -128,11 +116,8 @@ static int vfio_ccw_mdev_probe(struct mdev_device *mdev)
	vfio_init_group_dev(&private->vdev, &mdev->dev,
			    &vfio_ccw_dev_ops);

	private->mdev = mdev;
	private->state = VFIO_CCW_STATE_IDLE;

	VFIO_CCW_MSG_EVENT(2, "mdev %pUl, sch %x.%x.%04x: create\n",
			   mdev_uuid(mdev), private->sch->schid.cssid,
	VFIO_CCW_MSG_EVENT(2, "sch %x.%x.%04x: create\n",
			   private->sch->schid.cssid,
			   private->sch->schid.ssid,
			   private->sch->schid.sch_no);

@@ -145,8 +130,6 @@ static int vfio_ccw_mdev_probe(struct mdev_device *mdev)
err_atomic:
	vfio_uninit_group_dev(&private->vdev);
	atomic_inc(&private->avail);
	private->mdev = NULL;
	private->state = VFIO_CCW_STATE_IDLE;
	return ret;
}

@@ -154,23 +137,16 @@ static void vfio_ccw_mdev_remove(struct mdev_device *mdev)
{
	struct vfio_ccw_private *private = dev_get_drvdata(mdev->dev.parent);

	VFIO_CCW_MSG_EVENT(2, "mdev %pUl, sch %x.%x.%04x: remove\n",
			   mdev_uuid(mdev), private->sch->schid.cssid,
	VFIO_CCW_MSG_EVENT(2, "sch %x.%x.%04x: remove\n",
			   private->sch->schid.cssid,
			   private->sch->schid.ssid,
			   private->sch->schid.sch_no);

	vfio_unregister_group_dev(&private->vdev);

	if ((private->state != VFIO_CCW_STATE_NOT_OPER) &&
	    (private->state != VFIO_CCW_STATE_STANDBY)) {
		if (!vfio_ccw_sch_quiesce(private->sch))
			private->state = VFIO_CCW_STATE_STANDBY;
		/* The state will be NOT_OPER on error. */
	}
	vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_CLOSE);

	vfio_uninit_group_dev(&private->vdev);
	cp_free(&private->cp);
	private->mdev = NULL;
	atomic_inc(&private->avail);
}

@@ -181,6 +157,10 @@ static int vfio_ccw_mdev_open_device(struct vfio_device *vdev)
	unsigned long events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
	int ret;

	/* Device cannot simply be opened again from this state */
	if (private->state == VFIO_CCW_STATE_NOT_OPER)
		return -EINVAL;

	private->nb.notifier_call = vfio_ccw_mdev_notifier;

	ret = vfio_register_notifier(vdev, VFIO_IOMMU_NOTIFY,
@@ -200,6 +180,12 @@ static int vfio_ccw_mdev_open_device(struct vfio_device *vdev)
	if (ret)
		goto out_unregister;

	vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_OPEN);
	if (private->state == VFIO_CCW_STATE_NOT_OPER) {
		ret = -EINVAL;
		goto out_unregister;
	}

	return ret;

out_unregister:
@@ -213,14 +199,7 @@ static void vfio_ccw_mdev_close_device(struct vfio_device *vdev)
	struct vfio_ccw_private *private =
		container_of(vdev, struct vfio_ccw_private, vdev);

	if ((private->state != VFIO_CCW_STATE_NOT_OPER) &&
	    (private->state != VFIO_CCW_STATE_STANDBY)) {
		if (!vfio_ccw_mdev_reset(private))
			private->state = VFIO_CCW_STATE_STANDBY;
		/* The state will be NOT_OPER on error. */
	}

	cp_free(&private->cp);
	vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_CLOSE);
	vfio_ccw_unregister_dev_regions(private);
	vfio_unregister_notifier(vdev, VFIO_IOMMU_NOTIFY, &private->nb);
}
@@ -657,13 +636,3 @@ struct mdev_driver vfio_ccw_mdev_driver = {
	.remove = vfio_ccw_mdev_remove,
	.supported_type_groups  = mdev_type_groups,
};

int vfio_ccw_mdev_reg(struct subchannel *sch)
{
	return mdev_register_device(&sch->dev, &vfio_ccw_mdev_driver);
}

void vfio_ccw_mdev_unreg(struct subchannel *sch)
{
	mdev_unregister_device(&sch->dev);
}
+3 −6
Original line number Diff line number Diff line
@@ -73,7 +73,6 @@ struct vfio_ccw_crw {
 * @state: internal state of the device
 * @completion: synchronization helper of the I/O completion
 * @avail: available for creating a mediated device
 * @mdev: pointer to the mediated device
 * @nb: notifier for vfio events
 * @io_region: MMIO region to input/output I/O arguments/results
 * @io_mutex: protect against concurrent update of I/O regions
@@ -97,7 +96,6 @@ struct vfio_ccw_private {
	int			state;
	struct completion	*completion;
	atomic_t		avail;
	struct mdev_device	*mdev;
	struct notifier_block	nb;
	struct ccw_io_region	*io_region;
	struct mutex		io_mutex;
@@ -119,9 +117,6 @@ struct vfio_ccw_private {
	struct work_struct	crw_work;
} __aligned(8);

int vfio_ccw_mdev_reg(struct subchannel *sch);
void vfio_ccw_mdev_unreg(struct subchannel *sch);

int vfio_ccw_sch_quiesce(struct subchannel *sch);

extern struct mdev_driver vfio_ccw_mdev_driver;
@@ -147,6 +142,8 @@ enum vfio_ccw_event {
	VFIO_CCW_EVENT_IO_REQ,
	VFIO_CCW_EVENT_INTERRUPT,
	VFIO_CCW_EVENT_ASYNC_REQ,
	VFIO_CCW_EVENT_OPEN,
	VFIO_CCW_EVENT_CLOSE,
	/* last element! */
	NR_VFIO_CCW_EVENTS
};
@@ -158,7 +155,7 @@ typedef void (fsm_func_t)(struct vfio_ccw_private *, enum vfio_ccw_event);
extern fsm_func_t *vfio_ccw_jumptable[NR_VFIO_CCW_STATES][NR_VFIO_CCW_EVENTS];

static inline void vfio_ccw_fsm_event(struct vfio_ccw_private *private,
				     int event)
				      enum vfio_ccw_event event)
{
	trace_vfio_ccw_fsm_event(private->sch->schid, private->state, event);
	vfio_ccw_jumptable[private->state][event](private, event);
Loading