Commit 90e7a6de authored by Maor Gottlieb's avatar Maor Gottlieb Committed by Jason Gunthorpe
Browse files

lib/scatterlist: Provide a dedicated function to support table append

RDMA is the only in-kernel user that uses __sg_alloc_table_from_pages to
append pages dynamically. In the next patch. That mode will be extended
and that function will get more parameters. So separate it into a unique
function to make such change more clear.

Link: https://lore.kernel.org/r/20210824142531.3877007-2-maorg@nvidia.com


Signed-off-by: default avatarMaor Gottlieb <maorg@nvidia.com>
Signed-off-by: default avatarLeon Romanovsky <leonro@nvidia.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 7c60610d
Loading
Loading
Loading
Loading
+6 −7
Original line number Diff line number Diff line
@@ -807,8 +807,8 @@ struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev,
				       struct page **pages, unsigned int nr_pages)
{
	struct sg_table *sg;
	struct scatterlist *sge;
	size_t max_segment = 0;
	int err;

	sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
	if (!sg)
@@ -818,13 +818,12 @@ struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev,
		max_segment = dma_max_mapping_size(dev->dev);
	if (max_segment == 0)
		max_segment = UINT_MAX;
	sge = __sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
	err = sg_alloc_table_from_pages_segment(sg, pages, nr_pages, 0,
						nr_pages << PAGE_SHIFT,
					  max_segment,
					  NULL, 0, GFP_KERNEL);
	if (IS_ERR(sge)) {
						max_segment, GFP_KERNEL);
	if (err) {
		kfree(sg);
		sg = ERR_CAST(sge);
		sg = ERR_PTR(err);
	}
	return sg;
}
+4 −7
Original line number Diff line number Diff line
@@ -133,7 +133,6 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
	unsigned int max_segment = i915_sg_segment_size();
	struct sg_table *st;
	unsigned int sg_page_sizes;
	struct scatterlist *sg;
	struct page **pvec;
	int ret;

@@ -153,13 +152,11 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
	spin_unlock(&i915->mm.notifier_lock);

alloc_table:
	sg = __sg_alloc_table_from_pages(st, pvec, num_pages, 0,
					 num_pages << PAGE_SHIFT, max_segment,
					 NULL, 0, GFP_KERNEL);
	if (IS_ERR(sg)) {
		ret = PTR_ERR(sg);
	ret = sg_alloc_table_from_pages_segment(st, pvec, num_pages, 0,
						num_pages << PAGE_SHIFT,
						max_segment, GFP_KERNEL);
	if (ret)
		goto err;
	}

	ret = i915_gem_gtt_prepare_pages(obj, st);
	if (ret) {
+5 −9
Original line number Diff line number Diff line
@@ -363,7 +363,6 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
	int ret = 0;
	static size_t sgl_size;
	static size_t sgt_size;
	struct scatterlist *sg;

	if (vmw_tt->mapped)
		return 0;
@@ -386,15 +385,12 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
		if (unlikely(ret != 0))
			return ret;

		sg = __sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages,
				vsgt->num_pages, 0,
		ret = sg_alloc_table_from_pages_segment(
			&vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0,
			(unsigned long)vsgt->num_pages << PAGE_SHIFT,
				dma_get_max_seg_size(dev_priv->drm.dev),
				NULL, 0, GFP_KERNEL);
		if (IS_ERR(sg)) {
			ret = PTR_ERR(sg);
			dma_get_max_seg_size(dev_priv->drm.dev), GFP_KERNEL);
		if (ret)
			goto out_sg_alloc_fail;
		}

		if (vsgt->num_pages > vmw_tt->sgt.orig_nents) {
			uint64_t over_alloc =
+2 −2
Original line number Diff line number Diff line
@@ -226,8 +226,8 @@ struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,

		cur_base += ret * PAGE_SIZE;
		npages -= ret;
		sg = __sg_alloc_table_from_pages(&umem->sg_head, page_list, ret,
				0, ret << PAGE_SHIFT,
		sg = sg_alloc_append_table_from_pages(&umem->sg_head, page_list,
				ret, 0, ret << PAGE_SHIFT,
				ib_dma_max_seg_size(device), sg, npages,
				GFP_KERNEL);
		umem->sg_nents = umem->sg_head.nents;
+35 −4
Original line number Diff line number Diff line
@@ -285,14 +285,45 @@ void sg_free_table(struct sg_table *);
int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int,
		     struct scatterlist *, unsigned int, gfp_t, sg_alloc_fn *);
int sg_alloc_table(struct sg_table *, unsigned int, gfp_t);
struct scatterlist *__sg_alloc_table_from_pages(struct sg_table *sgt,
struct scatterlist *sg_alloc_append_table_from_pages(struct sg_table *sgt,
		struct page **pages, unsigned int n_pages, unsigned int offset,
		unsigned long size, unsigned int max_segment,
		struct scatterlist *prv, unsigned int left_pages,
		gfp_t gfp_mask);
int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
int sg_alloc_table_from_pages_segment(struct sg_table *sgt, struct page **pages,
				      unsigned int n_pages, unsigned int offset,
			      unsigned long size, gfp_t gfp_mask);
				      unsigned long size,
				      unsigned int max_segment, gfp_t gfp_mask);

/**
 * sg_alloc_table_from_pages - Allocate and initialize an sg table from
 *			       an array of pages
 * @sgt:	 The sg table header to use
 * @pages:	 Pointer to an array of page pointers
 * @n_pages:	 Number of pages in the pages array
 * @offset:      Offset from start of the first page to the start of a buffer
 * @size:        Number of valid bytes in the buffer (after offset)
 * @gfp_mask:	 GFP allocation mask
 *
 *  Description:
 *    Allocate and initialize an sg table from a list of pages. Contiguous
 *    ranges of the pages are squashed into a single scatterlist node. A user
 *    may provide an offset at a start and a size of valid data in a buffer
 *    specified by the page array. The returned sg table is released by
 *    sg_free_table.
 *
 * Returns:
 *   0 on success, negative error on failure
 */
static inline int sg_alloc_table_from_pages(struct sg_table *sgt,
					    struct page **pages,
					    unsigned int n_pages,
					    unsigned int offset,
					    unsigned long size, gfp_t gfp_mask)
{
	return sg_alloc_table_from_pages_segment(sgt, pages, n_pages, offset,
						 size, UINT_MAX, gfp_mask);
}

#ifdef CONFIG_SGL_ALLOC
struct scatterlist *sgl_alloc_order(unsigned long long length,
Loading