Commit 88c5d0a2 authored by Vinod Koul's avatar Vinod Koul
Browse files

Merge branch 'fixes' into next



Signed-off-by: default avatarVinod Koul <vkoul@kernel.org>
parents 49c4959f 6b4b87f2
Loading
Loading
Loading
Loading
+14 −0
Original line number Diff line number Diff line
@@ -292,6 +292,14 @@ struct idxd_desc {
	struct idxd_wq *wq;
};

/*
 * This is software defined error for the completion status. We overload the error code
 * that will never appear in completion status and only SWERR register.
 */
enum idxd_completion_status {
	IDXD_COMP_DESC_ABORT = 0xff,
};

#define confdev_to_idxd(dev) container_of(dev, struct idxd_device, conf_dev)
#define confdev_to_wq(dev) container_of(dev, struct idxd_wq, conf_dev)

@@ -478,4 +486,10 @@ static inline void perfmon_init(void) {}
static inline void perfmon_exit(void) {}
#endif

static inline void complete_desc(struct idxd_desc *desc, enum idxd_complete_type reason)
{
	idxd_dma_complete_txd(desc, reason);
	idxd_free_desc(desc->wq, desc);
}

#endif
+18 −9
Original line number Diff line number Diff line
@@ -245,12 +245,6 @@ static inline bool match_fault(struct idxd_desc *desc, u64 fault_addr)
	return false;
}

static inline void complete_desc(struct idxd_desc *desc, enum idxd_complete_type reason)
{
	idxd_dma_complete_txd(desc, reason);
	idxd_free_desc(desc->wq, desc);
}

static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
				     enum irq_work_type wtype,
				     int *processed, u64 data)
@@ -272,8 +266,16 @@ static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
		reason = IDXD_COMPLETE_DEV_FAIL;

	llist_for_each_entry_safe(desc, t, head, llnode) {
		if (desc->completion->status) {
			if ((desc->completion->status & DSA_COMP_STATUS_MASK) != DSA_COMP_SUCCESS)
		u8 status = desc->completion->status & DSA_COMP_STATUS_MASK;

		if (status) {
			if (unlikely(status == IDXD_COMP_DESC_ABORT)) {
				complete_desc(desc, IDXD_COMPLETE_ABORT);
				(*processed)++;
				continue;
			}

			if (unlikely(status != DSA_COMP_SUCCESS))
				match_fault(desc, data);
			complete_desc(desc, reason);
			(*processed)++;
@@ -329,7 +331,14 @@ static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
	spin_unlock_irqrestore(&irq_entry->list_lock, flags);

	list_for_each_entry(desc, &flist, list) {
		if ((desc->completion->status & DSA_COMP_STATUS_MASK) != DSA_COMP_SUCCESS)
		u8 status = desc->completion->status & DSA_COMP_STATUS_MASK;

		if (unlikely(status == IDXD_COMP_DESC_ABORT)) {
			complete_desc(desc, IDXD_COMPLETE_ABORT);
			continue;
		}

		if (unlikely(status != DSA_COMP_SUCCESS))
			match_fault(desc, data);
		complete_desc(desc, reason);
	}
+67 −8
Original line number Diff line number Diff line
@@ -79,9 +79,64 @@ void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc)
	sbitmap_queue_clear(&wq->sbq, desc->id, cpu);
}

static struct idxd_desc *list_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
					 struct idxd_desc *desc)
{
	struct idxd_desc *d, *n;

	lockdep_assert_held(&ie->list_lock);
	list_for_each_entry_safe(d, n, &ie->work_list, list) {
		if (d == desc) {
			list_del(&d->list);
			return d;
		}
	}

	/*
	 * At this point, the desc needs to be aborted is held by the completion
	 * handler where it has taken it off the pending list but has not added to the
	 * work list. It will be cleaned up by the interrupt handler when it sees the
	 * IDXD_COMP_DESC_ABORT for completion status.
	 */
	return NULL;
}

static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
			     struct idxd_desc *desc)
{
	struct idxd_desc *d, *t, *found = NULL;
	struct llist_node *head;
	unsigned long flags;

	desc->completion->status = IDXD_COMP_DESC_ABORT;
	/*
	 * Grab the list lock so it will block the irq thread handler. This allows the
	 * abort code to locate the descriptor need to be aborted.
	 */
	spin_lock_irqsave(&ie->list_lock, flags);
	head = llist_del_all(&ie->pending_llist);
	if (head) {
		llist_for_each_entry_safe(d, t, head, llnode) {
			if (d == desc) {
				found = desc;
				continue;
			}
			list_add_tail(&desc->list, &ie->work_list);
		}
	}

	if (!found)
		found = list_abort_desc(wq, ie, desc);
	spin_unlock_irqrestore(&ie->list_lock, flags);

	if (found)
		complete_desc(found, IDXD_COMPLETE_ABORT);
}

int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
{
	struct idxd_device *idxd = wq->idxd;
	struct idxd_irq_entry *ie = NULL;
	void __iomem *portal;
	int rc;

@@ -99,6 +154,16 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
	 * even on UP because the recipient is a device.
	 */
	wmb();

	/*
	 * Pending the descriptor to the lockless list for the irq_entry
	 * that we designated the descriptor to.
	 */
	if (desc->hw->flags & IDXD_OP_FLAG_RCI) {
		ie = &idxd->irq_entries[wq->id + 1];
		llist_add(&desc->llnode, &ie->pending_llist);
	}

	if (wq_dedicated(wq)) {
		iosubmit_cmds512(portal, desc->hw, 1);
	} else {
@@ -111,18 +176,12 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
		rc = enqcmds(portal, desc->hw);
		if (rc < 0) {
			percpu_ref_put(&wq->wq_active);
			if (ie)
				llist_abort_desc(wq, ie, desc);
			return rc;
		}
	}

	percpu_ref_put(&wq->wq_active);

	/*
	 * Pending the descriptor to the lockless list for the irq_entry
	 * that we designated the descriptor to.
	 */
	if (desc->hw->flags & IDXD_OP_FLAG_RCI)
		llist_add(&desc->llnode, &idxd->irq_entries[wq->id + 1].pending_llist);

	return 0;
}