Commit 379989e7 authored by Thomas Hellström's avatar Thomas Hellström
Browse files

drm/ttm/pool: Fix ttm_pool_alloc error path



When hitting an error, the error path forgot to unmap dma mappings and
could call set_pages_wb() on already uncached pages.

Fix this by introducing a common ttm_pool_free_range() function that
does the right thing.

v2:
- Simplify that common function (Christian König)
v3:
- Rename that common function to ttm_pool_free_range() (Christian König)

Fixes: d099fc8f ("drm/ttm: new TT backend allocation pool v3")
Cc: Christian König <christian.koenig@amd.com>
Cc: Dave Airlie <airlied@redhat.com>
Cc: Christian Koenig <christian.koenig@amd.com>
Cc: Huang Rui <ray.huang@amd.com>
Cc: dri-devel@lists.freedesktop.org
Signed-off-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230404200650.11043-2-thomas.hellstrom@linux.intel.com
parent 864b4380
Loading
Loading
Loading
Loading
+51 −30
Original line number Diff line number Diff line
@@ -367,6 +367,43 @@ static int ttm_pool_page_allocated(struct ttm_pool *pool, unsigned int order,
	return 0;
}

/**
 * ttm_pool_free_range() - Free a range of TTM pages
 * @pool: The pool used for allocating.
 * @tt: The struct ttm_tt holding the page pointers.
 * @caching: The page caching mode used by the range.
 * @start_page: index for first page to free.
 * @end_page: index for last page to free + 1.
 *
 * During allocation the ttm_tt page-vector may be populated with ranges of
 * pages with different attributes if allocation hit an error without being
 * able to completely fulfill the allocation. This function can be used
 * to free these individual ranges.
 */
static void ttm_pool_free_range(struct ttm_pool *pool, struct ttm_tt *tt,
				enum ttm_caching caching,
				pgoff_t start_page, pgoff_t end_page)
{
	struct page **pages = tt->pages;
	unsigned int order;
	pgoff_t i, nr;

	for (i = start_page; i < end_page; i += nr, pages += nr) {
		struct ttm_pool_type *pt = NULL;

		order = ttm_pool_page_order(pool, *pages);
		nr = (1UL << order);
		if (tt->dma_address)
			ttm_pool_unmap(pool, tt->dma_address[i], nr);

		pt = ttm_pool_select_type(pool, caching, order);
		if (pt)
			ttm_pool_type_give(pt, *pages);
		else
			ttm_pool_free_page(pool, caching, order, *pages);
	}
}

/**
 * ttm_pool_alloc - Fill a ttm_tt object
 *
@@ -382,12 +419,14 @@ static int ttm_pool_page_allocated(struct ttm_pool *pool, unsigned int order,
int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
		   struct ttm_operation_ctx *ctx)
{
	unsigned long num_pages = tt->num_pages;
	pgoff_t num_pages = tt->num_pages;
	dma_addr_t *dma_addr = tt->dma_address;
	struct page **caching = tt->pages;
	struct page **pages = tt->pages;
	enum ttm_caching page_caching;
	gfp_t gfp_flags = GFP_USER;
	unsigned int i, order;
	pgoff_t caching_divide;
	unsigned int order;
	struct page *p;
	int r;

@@ -410,6 +449,7 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
	     order = min_t(unsigned int, order, __fls(num_pages))) {
		struct ttm_pool_type *pt;

		page_caching = tt->caching;
		pt = ttm_pool_select_type(pool, tt->caching, order);
		p = pt ? ttm_pool_type_take(pt) : NULL;
		if (p) {
@@ -418,6 +458,7 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
			if (r)
				goto error_free_page;

			caching = pages;
			do {
				r = ttm_pool_page_allocated(pool, order, p,
							    &dma_addr,
@@ -426,14 +467,15 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
				if (r)
					goto error_free_page;

				caching = pages;
				if (num_pages < (1 << order))
					break;

				p = ttm_pool_type_take(pt);
			} while (p);
			caching = pages;
		}

		page_caching = ttm_cached;
		while (num_pages >= (1 << order) &&
		       (p = ttm_pool_alloc_page(pool, gfp_flags, order))) {

@@ -442,6 +484,7 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
							   tt->caching);
				if (r)
					goto error_free_page;
				caching = pages;
			}
			r = ttm_pool_page_allocated(pool, order, p, &dma_addr,
						    &num_pages, &pages);
@@ -468,15 +511,13 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
	return 0;

error_free_page:
	ttm_pool_free_page(pool, tt->caching, order, p);
	ttm_pool_free_page(pool, page_caching, order, p);

error_free_all:
	num_pages = tt->num_pages - num_pages;
	for (i = 0; i < num_pages; ) {
		order = ttm_pool_page_order(pool, tt->pages[i]);
		ttm_pool_free_page(pool, tt->caching, order, tt->pages[i]);
		i += 1 << order;
	}
	caching_divide = caching - tt->pages;
	ttm_pool_free_range(pool, tt, tt->caching, 0, caching_divide);
	ttm_pool_free_range(pool, tt, ttm_cached, caching_divide, num_pages);

	return r;
}
@@ -492,27 +533,7 @@ EXPORT_SYMBOL(ttm_pool_alloc);
 */
void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt)
{
	unsigned int i;

	for (i = 0; i < tt->num_pages; ) {
		struct page *p = tt->pages[i];
		unsigned int order, num_pages;
		struct ttm_pool_type *pt;

		order = ttm_pool_page_order(pool, p);
		num_pages = 1ULL << order;
		if (tt->dma_address)
			ttm_pool_unmap(pool, tt->dma_address[i], num_pages);

		pt = ttm_pool_select_type(pool, tt->caching, order);
		if (pt)
			ttm_pool_type_give(pt, tt->pages[i]);
		else
			ttm_pool_free_page(pool, tt->caching, order,
					   tt->pages[i]);

		i += num_pages;
	}
	ttm_pool_free_range(pool, tt, tt->caching, 0, tt->num_pages);

	while (atomic_long_read(&allocated_pages) > page_pool_size)
		ttm_pool_shrink();