Commit aff1e0b0 authored by Matthew Auld's avatar Matthew Auld Committed by Rodrigo Vivi
Browse files

drm/i915/ttm: fix sg_table construction

If we encounter some monster sized local-memory page that exceeds the
maximum sg length (UINT32_MAX), ensure that don't end up with some
misaligned address in the entry that follows, leading to fireworks
later. Also ensure we have some coverage of this in the selftests.

v2(Chris):
  - Use round_down consistently to avoid udiv errors
v3(Nirmoy):
  - Also update the max_segment in the selftest

Fixes: f701b16d ("drm/i915/ttm: add i915_sg_from_buddy_resource")
Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/6379


Signed-off-by: default avatarMatthew Auld <matthew.auld@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Cc: Nirmoy Das <nirmoy.das@linux.intel.com>
Reviewed-by: default avatarNirmoy Das <nirmoy.das@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220711085859.24198-1-matthew.auld@intel.com


(cherry picked from commit bc99f120)
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent 896dcabd
Loading
Loading
Loading
Loading
+9 −2
Original line number Diff line number Diff line
@@ -620,10 +620,15 @@ i915_ttm_resource_get_st(struct drm_i915_gem_object *obj,
			 struct ttm_resource *res)
{
	struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
	u64 page_alignment;

	if (!i915_ttm_gtt_binds_lmem(res))
		return i915_ttm_tt_get_st(bo->ttm);

	page_alignment = bo->page_alignment << PAGE_SHIFT;
	if (!page_alignment)
		page_alignment = obj->mm.region->min_page_size;

	/*
	 * If CPU mapping differs, we need to add the ttm_tt pages to
	 * the resulting st. Might make sense for GGTT.
@@ -634,7 +639,8 @@ i915_ttm_resource_get_st(struct drm_i915_gem_object *obj,
			struct i915_refct_sgt *rsgt;

			rsgt = intel_region_ttm_resource_to_rsgt(obj->mm.region,
								 res);
								 res,
								 page_alignment);
			if (IS_ERR(rsgt))
				return rsgt;

@@ -643,7 +649,8 @@ i915_ttm_resource_get_st(struct drm_i915_gem_object *obj,
		return i915_refct_sgt_get(obj->ttm.cached_io_rsgt);
	}

	return intel_region_ttm_resource_to_rsgt(obj->mm.region, res);
	return intel_region_ttm_resource_to_rsgt(obj->mm.region, res,
						 page_alignment);
}

static int i915_ttm_truncate(struct drm_i915_gem_object *obj)
+15 −4
Original line number Diff line number Diff line
@@ -68,6 +68,7 @@ void i915_refct_sgt_init(struct i915_refct_sgt *rsgt, size_t size)
 * drm_mm_node
 * @node: The drm_mm_node.
 * @region_start: An offset to add to the dma addresses of the sg list.
 * @page_alignment: Required page alignment for each sg entry. Power of two.
 *
 * Create a struct sg_table, initializing it from a struct drm_mm_node,
 * taking a maximum segment length into account, splitting into segments
@@ -77,15 +78,18 @@ void i915_refct_sgt_init(struct i915_refct_sgt *rsgt, size_t size)
 * error code cast to an error pointer on failure.
 */
struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node,
					      u64 region_start)
					      u64 region_start,
					      u64 page_alignment)
{
	const u64 max_segment = SZ_1G; /* Do we have a limit on this? */
	const u64 max_segment = round_down(UINT_MAX, page_alignment);
	u64 segment_pages = max_segment >> PAGE_SHIFT;
	u64 block_size, offset, prev_end;
	struct i915_refct_sgt *rsgt;
	struct sg_table *st;
	struct scatterlist *sg;

	GEM_BUG_ON(!max_segment);

	rsgt = kmalloc(sizeof(*rsgt), GFP_KERNEL);
	if (!rsgt)
		return ERR_PTR(-ENOMEM);
@@ -112,6 +116,8 @@ struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node,
				sg = __sg_next(sg);

			sg_dma_address(sg) = region_start + offset;
			GEM_BUG_ON(!IS_ALIGNED(sg_dma_address(sg),
					       page_alignment));
			sg_dma_len(sg) = 0;
			sg->length = 0;
			st->nents++;
@@ -138,6 +144,7 @@ struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node,
 * i915_buddy_block list
 * @res: The struct i915_ttm_buddy_resource.
 * @region_start: An offset to add to the dma addresses of the sg list.
 * @page_alignment: Required page alignment for each sg entry. Power of two.
 *
 * Create a struct sg_table, initializing it from struct i915_buddy_block list,
 * taking a maximum segment length into account, splitting into segments
@@ -147,11 +154,12 @@ struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node,
 * error code cast to an error pointer on failure.
 */
struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res,
						     u64 region_start)
						     u64 region_start,
						     u64 page_alignment)
{
	struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res);
	const u64 size = res->num_pages << PAGE_SHIFT;
	const u64 max_segment = rounddown(UINT_MAX, PAGE_SIZE);
	const u64 max_segment = round_down(UINT_MAX, page_alignment);
	struct drm_buddy *mm = bman_res->mm;
	struct list_head *blocks = &bman_res->blocks;
	struct drm_buddy_block *block;
@@ -161,6 +169,7 @@ struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res,
	resource_size_t prev_end;

	GEM_BUG_ON(list_empty(blocks));
	GEM_BUG_ON(!max_segment);

	rsgt = kmalloc(sizeof(*rsgt), GFP_KERNEL);
	if (!rsgt)
@@ -191,6 +200,8 @@ struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res,
					sg = __sg_next(sg);

				sg_dma_address(sg) = region_start + offset;
				GEM_BUG_ON(!IS_ALIGNED(sg_dma_address(sg),
						       page_alignment));
				sg_dma_len(sg) = 0;
				sg->length = 0;
				st->nents++;
+4 −2
Original line number Diff line number Diff line
@@ -213,9 +213,11 @@ static inline void __i915_refct_sgt_init(struct i915_refct_sgt *rsgt,
void i915_refct_sgt_init(struct i915_refct_sgt *rsgt, size_t size);

struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node,
					      u64 region_start);
					      u64 region_start,
					      u64 page_alignment);

struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res,
						     u64 region_start);
						     u64 region_start,
						     u64 page_alignment);

#endif
+7 −3
Original line number Diff line number Diff line
@@ -152,6 +152,7 @@ int intel_region_ttm_fini(struct intel_memory_region *mem)
 * Convert an opaque TTM resource manager resource to a refcounted sg_table.
 * @mem: The memory region.
 * @res: The resource manager resource obtained from the TTM resource manager.
 * @page_alignment: Required page alignment for each sg entry. Power of two.
 *
 * The gem backends typically use sg-tables for operations on the underlying
 * io_memory. So provide a way for the backends to translate the
@@ -161,16 +162,19 @@ int intel_region_ttm_fini(struct intel_memory_region *mem)
 */
struct i915_refct_sgt *
intel_region_ttm_resource_to_rsgt(struct intel_memory_region *mem,
				  struct ttm_resource *res)
				  struct ttm_resource *res,
				  u64 page_alignment)
{
	if (mem->is_range_manager) {
		struct ttm_range_mgr_node *range_node =
			to_ttm_range_mgr_node(res);

		return i915_rsgt_from_mm_node(&range_node->mm_nodes[0],
					      mem->region.start);
					      mem->region.start,
					      page_alignment);
	} else {
		return i915_rsgt_from_buddy_resource(res, mem->region.start);
		return i915_rsgt_from_buddy_resource(res, mem->region.start,
						     page_alignment);
	}
}

+2 −1
Original line number Diff line number Diff line
@@ -24,7 +24,8 @@ int intel_region_ttm_fini(struct intel_memory_region *mem);

struct i915_refct_sgt *
intel_region_ttm_resource_to_rsgt(struct intel_memory_region *mem,
				  struct ttm_resource *res);
				  struct ttm_resource *res,
				  u64 page_alignment);

void intel_region_ttm_resource_free(struct intel_memory_region *mem,
				    struct ttm_resource *res);
Loading