Commit c9358de1 authored by Brendan Cunningham's avatar Brendan Cunningham Committed by Jason Gunthorpe
Browse files

IB/hfi1: Fix wrong mmu_node used for user SDMA packet after invalidate

The hfi1 user SDMA pinned-page cache will leave a stale cache entry when
the cache-entry's virtual address range is invalidated but that cache
entry is in-use by an outstanding SDMA request.

Subsequent user SDMA requests with buffers in or spanning the virtual
address range of the stale cache entry will result in packets constructed
from the wrong memory, the physical pages pointed to by the stale cache
entry.

To fix this, remove mmu_rb_node cache entries from the mmu_rb_handler
cache independent of the cache entry's refcount. Add 'struct kref
refcount' to struct mmu_rb_node and manage mmu_rb_node lifetime with
kref_get() and kref_put().

mmu_rb_node.refcount makes sdma_mmu_node.refcount redundant. Remove
'atomic_t refcount' from struct sdma_mmu_node and change sdma_mmu_node
code to use mmu_rb_node.refcount.

Move the mmu_rb_handler destructor call after a
wait-for-SDMA-request-completion call so mmu_rb_nodes that need
mmu_rb_handler's workqueue to queue themselves up for destruction from an
interrupt context may do so.

Fixes: f48ad614 ("IB/hfi1: Move driver out of staging")
Fixes: 00cbce5c ("IB/hfi1: Fix bugs with non-PAGE_SIZE-end multi-iovec user SDMA requests")
Link: https://lore.kernel.org/r/168451393605.3700681.13493776139032178861.stgit@awfm-02.cornelisnetworks.com


Reviewed-by: default avatarDean Luick <dean.luick@cornelisnetworks.com>
Signed-off-by: default avatarBrendan Cunningham <bcunningham@cornelisnetworks.com>
Signed-off-by: default avatarDennis Dalessandro <dennis.dalessandro@cornelisnetworks.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 1cc625ce
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -215,11 +215,11 @@ static int hfi1_ipoib_build_ulp_payload(struct ipoib_txreq *tx,
		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];

		ret = sdma_txadd_page(dd,
				      NULL,
				      txreq,
				      skb_frag_page(frag),
				      frag->bv_offset,
				      skb_frag_size(frag));
				      skb_frag_size(frag),
				      NULL, NULL, NULL);
		if (unlikely(ret))
			break;
	}
+63 −38
Original line number Diff line number Diff line
@@ -19,8 +19,7 @@ static int mmu_notifier_range_start(struct mmu_notifier *,
		const struct mmu_notifier_range *);
static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *,
					   unsigned long, unsigned long);
static void do_remove(struct mmu_rb_handler *handler,
		      struct list_head *del_list);
static void release_immediate(struct kref *refcount);
static void handle_remove(struct work_struct *work);

static const struct mmu_notifier_ops mn_opts = {
@@ -106,7 +105,11 @@ void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler)
	}
	spin_unlock_irqrestore(&handler->lock, flags);

	do_remove(handler, &del_list);
	while (!list_empty(&del_list)) {
		rbnode = list_first_entry(&del_list, struct mmu_rb_node, list);
		list_del(&rbnode->list);
		kref_put(&rbnode->refcount, release_immediate);
	}

	/* Now the mm may be freed. */
	mmdrop(handler->mn.mm);
@@ -134,12 +137,6 @@ int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
	}
	__mmu_int_rb_insert(mnode, &handler->root);
	list_add_tail(&mnode->list, &handler->lru_list);

	ret = handler->ops->insert(handler->ops_arg, mnode);
	if (ret) {
		__mmu_int_rb_remove(mnode, &handler->root);
		list_del(&mnode->list); /* remove from LRU list */
	}
	mnode->handler = handler;
unlock:
	spin_unlock_irqrestore(&handler->lock, flags);
@@ -183,6 +180,48 @@ static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
	return node;
}

/*
 * Must NOT call while holding mnode->handler->lock.
 * mnode->handler->ops->remove() may sleep and mnode->handler->lock is a
 * spinlock.
 */
static void release_immediate(struct kref *refcount)
{
	struct mmu_rb_node *mnode =
		container_of(refcount, struct mmu_rb_node, refcount);
	mnode->handler->ops->remove(mnode->handler->ops_arg, mnode);
}

/* Caller must hold mnode->handler->lock */
static void release_nolock(struct kref *refcount)
{
	struct mmu_rb_node *mnode =
		container_of(refcount, struct mmu_rb_node, refcount);
	list_move(&mnode->list, &mnode->handler->del_list);
	queue_work(mnode->handler->wq, &mnode->handler->del_work);
}

/*
 * struct mmu_rb_node->refcount kref_put() callback.
 * Adds mmu_rb_node to mmu_rb_node->handler->del_list and queues
 * handler->del_work on handler->wq.
 * Does not remove mmu_rb_node from handler->lru_list or handler->rb_root.
 * Acquires mmu_rb_node->handler->lock; do not call while already holding
 * handler->lock.
 */
void hfi1_mmu_rb_release(struct kref *refcount)
{
	struct mmu_rb_node *mnode =
		container_of(refcount, struct mmu_rb_node, refcount);
	struct mmu_rb_handler *handler = mnode->handler;
	unsigned long flags;

	spin_lock_irqsave(&handler->lock, flags);
	list_move(&mnode->list, &mnode->handler->del_list);
	spin_unlock_irqrestore(&handler->lock, flags);
	queue_work(handler->wq, &handler->del_work);
}

void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
{
	struct mmu_rb_node *rbnode, *ptr;
@@ -197,6 +236,10 @@ void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)

	spin_lock_irqsave(&handler->lock, flags);
	list_for_each_entry_safe(rbnode, ptr, &handler->lru_list, list) {
		/* refcount == 1 implies mmu_rb_handler has only rbnode ref */
		if (kref_read(&rbnode->refcount) > 1)
			continue;

		if (handler->ops->evict(handler->ops_arg, rbnode, evict_arg,
					&stop)) {
			__mmu_int_rb_remove(rbnode, &handler->root);
@@ -209,7 +252,7 @@ void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
	spin_unlock_irqrestore(&handler->lock, flags);

	list_for_each_entry_safe(rbnode, ptr, &del_list, list) {
		handler->ops->remove(handler->ops_arg, rbnode);
		kref_put(&rbnode->refcount, release_immediate);
	}
}

@@ -221,7 +264,6 @@ static int mmu_notifier_range_start(struct mmu_notifier *mn,
	struct rb_root_cached *root = &handler->root;
	struct mmu_rb_node *node, *ptr = NULL;
	unsigned long flags;
	bool added = false;

	spin_lock_irqsave(&handler->lock, flags);
	for (node = __mmu_int_rb_iter_first(root, range->start, range->end-1);
@@ -230,38 +272,16 @@ static int mmu_notifier_range_start(struct mmu_notifier *mn,
		ptr = __mmu_int_rb_iter_next(node, range->start,
					     range->end - 1);
		trace_hfi1_mmu_mem_invalidate(node->addr, node->len);
		if (handler->ops->invalidate(handler->ops_arg, node)) {
		/* Remove from rb tree and lru_list. */
		__mmu_int_rb_remove(node, root);
			/* move from LRU list to delete list */
			list_move(&node->list, &handler->del_list);
			added = true;
		}
		list_del_init(&node->list);
		kref_put(&node->refcount, release_nolock);
	}
	spin_unlock_irqrestore(&handler->lock, flags);

	if (added)
		queue_work(handler->wq, &handler->del_work);

	return 0;
}

/*
 * Call the remove function for the given handler and the list.  This
 * is expected to be called with a delete list extracted from handler.
 * The caller should not be holding the handler lock.
 */
static void do_remove(struct mmu_rb_handler *handler,
		      struct list_head *del_list)
{
	struct mmu_rb_node *node;

	while (!list_empty(del_list)) {
		node = list_first_entry(del_list, struct mmu_rb_node, list);
		list_del(&node->list);
		handler->ops->remove(handler->ops_arg, node);
	}
}

/*
 * Work queue function to remove all nodes that have been queued up to
 * be removed.  The key feature is that mm->mmap_lock is not being held
@@ -274,11 +294,16 @@ static void handle_remove(struct work_struct *work)
						del_work);
	struct list_head del_list;
	unsigned long flags;
	struct mmu_rb_node *node;

	/* remove anything that is queued to get removed */
	spin_lock_irqsave(&handler->lock, flags);
	list_replace_init(&handler->del_list, &del_list);
	spin_unlock_irqrestore(&handler->lock, flags);

	do_remove(handler, &del_list);
	while (!list_empty(&del_list)) {
		node = list_first_entry(&del_list, struct mmu_rb_node, list);
		list_del(&node->list);
		handler->ops->remove(handler->ops_arg, node);
	}
}
+3 −0
Original line number Diff line number Diff line
@@ -16,6 +16,7 @@ struct mmu_rb_node {
	struct rb_node node;
	struct mmu_rb_handler *handler;
	struct list_head list;
	struct kref refcount;
};

/*
@@ -61,6 +62,8 @@ int hfi1_mmu_rb_register(void *ops_arg,
void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler);
int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
		       struct mmu_rb_node *mnode);
void hfi1_mmu_rb_release(struct kref *refcount);

void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg);
struct mmu_rb_node *hfi1_mmu_rb_get_first(struct mmu_rb_handler *handler,
					  unsigned long addr,
+18 −5
Original line number Diff line number Diff line
@@ -1593,7 +1593,20 @@ static inline void sdma_unmap_desc(
	struct hfi1_devdata *dd,
	struct sdma_desc *descp)
{
	system_descriptor_complete(dd, descp);
	switch (sdma_mapping_type(descp)) {
	case SDMA_MAP_SINGLE:
		dma_unmap_single(&dd->pcidev->dev, sdma_mapping_addr(descp),
				 sdma_mapping_len(descp), DMA_TO_DEVICE);
		break;
	case SDMA_MAP_PAGE:
		dma_unmap_page(&dd->pcidev->dev, sdma_mapping_addr(descp),
			       sdma_mapping_len(descp), DMA_TO_DEVICE);
		break;
	}

	if (descp->pinning_ctx && descp->ctx_put)
		descp->ctx_put(descp->pinning_ctx);
	descp->pinning_ctx = NULL;
}

/*
@@ -3113,8 +3126,8 @@ int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,

		/* Add descriptor for coalesce buffer */
		tx->desc_limit = MAX_DESC;
		return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, NULL, tx,
					 addr, tx->tlen);
		return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, tx,
					 addr, tx->tlen, NULL, NULL, NULL);
	}

	return 1;
@@ -3157,9 +3170,9 @@ int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
	make_tx_sdma_desc(
		tx,
		SDMA_MAP_NONE,
		NULL,
		dd->sdma_pad_phys,
		sizeof(u32) - (tx->packet_len & (sizeof(u32) - 1)));
		sizeof(u32) - (tx->packet_len & (sizeof(u32) - 1)),
		NULL, NULL, NULL);
	tx->num_desc++;
	_sdma_close_tx(dd, tx);
	return rval;
+32 −15
Original line number Diff line number Diff line
@@ -594,9 +594,11 @@ static inline dma_addr_t sdma_mapping_addr(struct sdma_desc *d)
static inline void make_tx_sdma_desc(
	struct sdma_txreq *tx,
	int type,
	void *pinning_ctx,
	dma_addr_t addr,
	size_t len)
	size_t len,
	void *pinning_ctx,
	void (*ctx_get)(void *),
	void (*ctx_put)(void *))
{
	struct sdma_desc *desc = &tx->descp[tx->num_desc];

@@ -613,7 +615,11 @@ static inline void make_tx_sdma_desc(
				<< SDMA_DESC0_PHY_ADDR_SHIFT) |
			(((u64)len & SDMA_DESC0_BYTE_COUNT_MASK)
				<< SDMA_DESC0_BYTE_COUNT_SHIFT);

	desc->pinning_ctx = pinning_ctx;
	desc->ctx_put = ctx_put;
	if (pinning_ctx && ctx_get)
		ctx_get(pinning_ctx);
}

/* helper to extend txreq */
@@ -645,18 +651,20 @@ static inline void _sdma_close_tx(struct hfi1_devdata *dd,
static inline int _sdma_txadd_daddr(
	struct hfi1_devdata *dd,
	int type,
	void *pinning_ctx,
	struct sdma_txreq *tx,
	dma_addr_t addr,
	u16 len)
	u16 len,
	void *pinning_ctx,
	void (*ctx_get)(void *),
	void (*ctx_put)(void *))
{
	int rval = 0;

	make_tx_sdma_desc(
		tx,
		type,
		pinning_ctx,
		addr, len);
		addr, len,
		pinning_ctx, ctx_get, ctx_put);
	WARN_ON(len > tx->tlen);
	tx->num_desc++;
	tx->tlen -= len;
@@ -676,11 +684,18 @@ static inline int _sdma_txadd_daddr(
/**
 * sdma_txadd_page() - add a page to the sdma_txreq
 * @dd: the device to use for mapping
 * @pinning_ctx: context to be released at descriptor retirement
 * @tx: tx request to which the page is added
 * @page: page to map
 * @offset: offset within the page
 * @len: length in bytes
 * @pinning_ctx: context to be stored on struct sdma_desc .pinning_ctx. Not
 *               added if coalesce buffer is used. E.g. pointer to pinned-page
 *               cache entry for the sdma_desc.
 * @ctx_get: optional function to take reference to @pinning_ctx. Not called if
 *           @pinning_ctx is NULL.
 * @ctx_put: optional function to release reference to @pinning_ctx after
 *           sdma_desc completes. May be called in interrupt context so must
 *           not sleep. Not called if @pinning_ctx is NULL.
 *
 * This is used to add a page/offset/length descriptor.
 *
@@ -692,11 +707,13 @@ static inline int _sdma_txadd_daddr(
 */
static inline int sdma_txadd_page(
	struct hfi1_devdata *dd,
	void *pinning_ctx,
	struct sdma_txreq *tx,
	struct page *page,
	unsigned long offset,
	u16 len)
	u16 len,
	void *pinning_ctx,
	void (*ctx_get)(void *),
	void (*ctx_put)(void *))
{
	dma_addr_t addr;
	int rval;
@@ -720,7 +737,8 @@ static inline int sdma_txadd_page(
		return -ENOSPC;
	}

	return _sdma_txadd_daddr(dd, SDMA_MAP_PAGE, pinning_ctx, tx, addr, len);
	return _sdma_txadd_daddr(dd, SDMA_MAP_PAGE, tx, addr, len,
				 pinning_ctx, ctx_get, ctx_put);
}

/**
@@ -754,8 +772,8 @@ static inline int sdma_txadd_daddr(
			return rval;
	}

	return _sdma_txadd_daddr(dd, SDMA_MAP_NONE, NULL, tx,
				 addr, len);
	return _sdma_txadd_daddr(dd, SDMA_MAP_NONE, tx, addr, len,
				 NULL, NULL, NULL);
}

/**
@@ -801,7 +819,8 @@ static inline int sdma_txadd_kvaddr(
		return -ENOSPC;
	}

	return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, NULL, tx, addr, len);
	return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, tx, addr, len,
				 NULL, NULL, NULL);
}

struct iowait_work;
@@ -1034,6 +1053,4 @@ u16 sdma_get_descq_cnt(void);
extern uint mod_num_sdma;

void sdma_update_lmc(struct hfi1_devdata *dd, u64 mask, u32 lid);

void system_descriptor_complete(struct hfi1_devdata *dd, struct sdma_desc *descp);
#endif
Loading