Commit 7ee4ccf5 authored by Logan Gunthorpe's avatar Logan Gunthorpe Committed by Jens Axboe
Browse files

block: set FOLL_PCI_P2PDMA in bio_map_user_iov()



When a bio's queue supports PCI P2PDMA, set FOLL_PCI_P2PDMA for
iov_iter_get_pages_flags(). This allows PCI P2PDMA pages to be
passed from userspace and enables the NVMe passthru requests to
use P2PDMA pages.

Signed-off-by: default avatarLogan Gunthorpe <logang@deltatee.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarJohn Hubbard <jhubbard@nvidia.com>
Reviewed-by: default avatarChaitanya Kulkarni <kch@nvidia.com>
Link: https://lore.kernel.org/r/20221021174116.7200-8-logang@deltatee.com


Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 5e3e3f2e
Loading
Loading
Loading
Loading
+8 −4
Original line number Diff line number Diff line
@@ -267,6 +267,7 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
{
	unsigned int max_sectors = queue_max_hw_sectors(rq->q);
	unsigned int nr_vecs = iov_iter_npages(iter, BIO_MAX_VECS);
	unsigned int gup_flags = 0;
	struct bio *bio;
	int ret;
	int j;
@@ -278,6 +279,9 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
	if (bio == NULL)
		return -ENOMEM;

	if (blk_queue_pci_p2pdma(rq->q))
		gup_flags |= FOLL_PCI_P2PDMA;

	while (iov_iter_count(iter)) {
		struct page **pages, *stack_pages[UIO_FASTIOV];
		ssize_t bytes;
@@ -286,11 +290,11 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,

		if (nr_vecs <= ARRAY_SIZE(stack_pages)) {
			pages = stack_pages;
			bytes = iov_iter_get_pages2(iter, pages, LONG_MAX,
							nr_vecs, &offs);
			bytes = iov_iter_get_pages(iter, pages, LONG_MAX,
						   nr_vecs, &offs, gup_flags);
		} else {
			bytes = iov_iter_get_pages_alloc2(iter, &pages,
							LONG_MAX, &offs);
			bytes = iov_iter_get_pages_alloc(iter, &pages,
						LONG_MAX, &offs, gup_flags);
		}
		if (unlikely(bytes <= 0)) {
			ret = bytes ? bytes : -EFAULT;