Commit 56e51681 authored by Christian König's avatar Christian König
Browse files

drm/ttm: revert "Reduce the number of used allocation orders for TTM pages"



This reverts commit 322458c2.

PMD_SHIFT is not necessary constant on all architectures resulting in
build failures.

Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Acked-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/CAKMK7uHgUuqWJuqmZKrxi2mNiqExhmMif-naYnzUSj-puW-x+A@mail.gmail.com
parent 50e9cc9a
Loading
Loading
Loading
Loading
+11 −19
Original line number Diff line number Diff line
@@ -47,11 +47,6 @@

#include "ttm_module.h"

#define TTM_MAX_ORDER (PMD_SHIFT - PAGE_SHIFT)
#define __TTM_DIM_ORDER (TTM_MAX_ORDER + 1)
/* Some architectures have a weird PMD_SHIFT */
#define TTM_DIM_ORDER (__TTM_DIM_ORDER <= MAX_ORDER ? __TTM_DIM_ORDER : MAX_ORDER)

/**
 * struct ttm_pool_dma - Helper object for coherent DMA mappings
 *
@@ -70,11 +65,11 @@ module_param(page_pool_size, ulong, 0644);

static atomic_long_t allocated_pages;

static struct ttm_pool_type global_write_combined[TTM_DIM_ORDER];
static struct ttm_pool_type global_uncached[TTM_DIM_ORDER];
static struct ttm_pool_type global_write_combined[MAX_ORDER];
static struct ttm_pool_type global_uncached[MAX_ORDER];

static struct ttm_pool_type global_dma32_write_combined[TTM_DIM_ORDER];
static struct ttm_pool_type global_dma32_uncached[TTM_DIM_ORDER];
static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER];
static struct ttm_pool_type global_dma32_uncached[MAX_ORDER];

static spinlock_t shrinker_lock;
static struct list_head shrinker_list;
@@ -449,7 +444,7 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
	else
		gfp_flags |= GFP_HIGHUSER;

	for (order = min_t(unsigned int, TTM_MAX_ORDER, __fls(num_pages));
	for (order = min_t(unsigned int, MAX_ORDER - 1, __fls(num_pages));
	     num_pages;
	     order = min_t(unsigned int, order, __fls(num_pages))) {
		struct ttm_pool_type *pt;
@@ -568,7 +563,7 @@ void ttm_pool_init(struct ttm_pool *pool, struct device *dev,

	if (use_dma_alloc) {
		for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
			for (j = 0; j < TTM_DIM_ORDER; ++j)
			for (j = 0; j < MAX_ORDER; ++j)
				ttm_pool_type_init(&pool->caching[i].orders[j],
						   pool, i, j);
	}
@@ -588,7 +583,7 @@ void ttm_pool_fini(struct ttm_pool *pool)

	if (pool->use_dma_alloc) {
		for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
			for (j = 0; j < TTM_DIM_ORDER; ++j)
			for (j = 0; j < MAX_ORDER; ++j)
				ttm_pool_type_fini(&pool->caching[i].orders[j]);
	}

@@ -642,7 +637,7 @@ static void ttm_pool_debugfs_header(struct seq_file *m)
	unsigned int i;

	seq_puts(m, "\t ");
	for (i = 0; i < TTM_DIM_ORDER; ++i)
	for (i = 0; i < MAX_ORDER; ++i)
		seq_printf(m, " ---%2u---", i);
	seq_puts(m, "\n");
}
@@ -653,7 +648,7 @@ static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt,
{
	unsigned int i;

	for (i = 0; i < TTM_DIM_ORDER; ++i)
	for (i = 0; i < MAX_ORDER; ++i)
		seq_printf(m, " %8u", ttm_pool_type_count(&pt[i]));
	seq_puts(m, "\n");
}
@@ -756,16 +751,13 @@ int ttm_pool_mgr_init(unsigned long num_pages)
{
	unsigned int i;

	BUILD_BUG_ON(TTM_DIM_ORDER > MAX_ORDER);
	BUILD_BUG_ON(TTM_DIM_ORDER < 1);

	if (!page_pool_size)
		page_pool_size = num_pages;

	spin_lock_init(&shrinker_lock);
	INIT_LIST_HEAD(&shrinker_list);

	for (i = 0; i < TTM_DIM_ORDER; ++i) {
	for (i = 0; i < MAX_ORDER; ++i) {
		ttm_pool_type_init(&global_write_combined[i], NULL,
				   ttm_write_combined, i);
		ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i);
@@ -798,7 +790,7 @@ void ttm_pool_mgr_fini(void)
{
	unsigned int i;

	for (i = 0; i < TTM_DIM_ORDER; ++i) {
	for (i = 0; i < MAX_ORDER; ++i) {
		ttm_pool_type_fini(&global_write_combined[i]);
		ttm_pool_type_fini(&global_uncached[i]);