Commit d3bcb4b0 authored by Christian König's avatar Christian König
Browse files

drm/vmwgfx: switch the TTM backends to self alloc

parent beb4c865
Loading
Loading
Loading
Loading
+11 −7
Original line number Diff line number Diff line
@@ -57,6 +57,12 @@ static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man,
	struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man);
	int id;

	mem->mm_node = kmalloc(sizeof(*mem), GFP_KERNEL);
	if (!mem->mm_node)
		return -ENOMEM;

	ttm_resource_init(bo, place, mem->mm_node);

	id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL);
	if (id < 0)
		return id;
@@ -87,13 +93,11 @@ static void vmw_gmrid_man_put_node(struct ttm_resource_manager *man,
{
	struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man);

	if (mem->mm_node) {
	ida_free(&gman->gmr_ida, mem->start);
	spin_lock(&gman->lock);
	gman->used_gmr_pages -= mem->num_pages;
	spin_unlock(&gman->lock);
		mem->mm_node = NULL;
	}
	kfree(mem->mm_node);
}

static const struct ttm_resource_manager_func vmw_gmrid_manager_func;
+20 −17
Original line number Diff line number Diff line
@@ -7,6 +7,7 @@
#include "vmwgfx_drv.h"
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_range_manager.h>

/**
 * struct vmw_thp_manager - Range manager implementing huge page alignment
@@ -54,16 +55,18 @@ static int vmw_thp_get_node(struct ttm_resource_manager *man,
{
	struct vmw_thp_manager *rman = to_thp_manager(man);
	struct drm_mm *mm = &rman->mm;
	struct drm_mm_node *node;
	struct ttm_range_mgr_node *node;
	unsigned long align_pages;
	unsigned long lpfn;
	enum drm_mm_insert_mode mode = DRM_MM_INSERT_BEST;
	int ret;

	node = kzalloc(sizeof(*node), GFP_KERNEL);
	node = kzalloc(struct_size(node, mm_nodes, 1), GFP_KERNEL);
	if (!node)
		return -ENOMEM;

	ttm_resource_init(bo, place, &node->base);

	lpfn = place->lpfn;
	if (!lpfn)
		lpfn = man->size;
@@ -76,8 +79,9 @@ static int vmw_thp_get_node(struct ttm_resource_manager *man,
	if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) {
		align_pages = (HPAGE_PUD_SIZE >> PAGE_SHIFT);
		if (mem->num_pages >= align_pages) {
			ret = vmw_thp_insert_aligned(bo, mm, node, align_pages,
						     place, mem, lpfn, mode);
			ret = vmw_thp_insert_aligned(bo, mm, &node->mm_nodes[0],
						     align_pages, place, mem,
						     lpfn, mode);
			if (!ret)
				goto found_unlock;
		}
@@ -85,14 +89,15 @@ static int vmw_thp_get_node(struct ttm_resource_manager *man,

	align_pages = (HPAGE_PMD_SIZE >> PAGE_SHIFT);
	if (mem->num_pages >= align_pages) {
		ret = vmw_thp_insert_aligned(bo, mm, node, align_pages, place,
					     mem, lpfn, mode);
		ret = vmw_thp_insert_aligned(bo, mm, &node->mm_nodes[0],
					     align_pages, place, mem, lpfn,
					     mode);
		if (!ret)
			goto found_unlock;
	}

	ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages,
					  bo->page_alignment, 0,
	ret = drm_mm_insert_node_in_range(mm, &node->mm_nodes[0],
					  mem->num_pages, bo->page_alignment, 0,
					  place->fpfn, lpfn, mode);
found_unlock:
	spin_unlock(&rman->lock);
@@ -100,8 +105,8 @@ static int vmw_thp_get_node(struct ttm_resource_manager *man,
	if (unlikely(ret)) {
		kfree(node);
	} else {
		mem->mm_node = node;
		mem->start = node->start;
		mem->mm_node = &node->mm_nodes[0];
		mem->start = node->mm_nodes[0].start;
	}

	return ret;
@@ -113,15 +118,13 @@ static void vmw_thp_put_node(struct ttm_resource_manager *man,
			     struct ttm_resource *mem)
{
	struct vmw_thp_manager *rman = to_thp_manager(man);
	struct ttm_range_mgr_node * node = mem->mm_node;

	if (mem->mm_node) {
	spin_lock(&rman->lock);
		drm_mm_remove_node(mem->mm_node);
	drm_mm_remove_node(&node->mm_nodes[0]);
	spin_unlock(&rman->lock);

		kfree(mem->mm_node);
		mem->mm_node = NULL;
	}
	kfree(node);
}

int vmw_thp_init(struct vmw_private *dev_priv)