Commit 3abc6670 authored by Thomas Zimmermann's avatar Thomas Zimmermann
Browse files

drm: Implement drm_need_swiotlb() in drm_cache.c



The function is declared in drm_cache.h. I also removed the curly
braces from the for loop to adhere to kernel coding style.

Signed-off-by: default avatarThomas Zimmermann <tzimmermann@suse.de>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210112081035.6882-3-tzimmermann@suse.de
parent ff28a9f8
Loading
Loading
Loading
Loading
+32 −0
Original line number Original line Diff line number Diff line
@@ -30,6 +30,7 @@


#include <linux/export.h>
#include <linux/export.h>
#include <linux/highmem.h>
#include <linux/highmem.h>
#include <xen/xen.h>


#include <drm/drm_cache.h>
#include <drm/drm_cache.h>


@@ -176,3 +177,34 @@ drm_clflush_virt_range(void *addr, unsigned long length)
#endif
#endif
}
}
EXPORT_SYMBOL(drm_clflush_virt_range);
EXPORT_SYMBOL(drm_clflush_virt_range);

bool drm_need_swiotlb(int dma_bits)
{
	struct resource *tmp;
	resource_size_t max_iomem = 0;

	/*
	 * Xen paravirtual hosts require swiotlb regardless of requested dma
	 * transfer size.
	 *
	 * NOTE: Really, what it requires is use of the dma_alloc_coherent
	 *       allocator used in ttm_dma_populate() instead of
	 *       ttm_populate_and_map_pages(), which bounce buffers so much in
	 *       Xen it leads to swiotlb buffer exhaustion.
	 */
	if (xen_pv_domain())
		return true;

	/*
	 * Enforce dma_alloc_coherent when memory encryption is active as well
	 * for the same reasons as for Xen paravirtual hosts.
	 */
	if (mem_encrypt_active())
		return true;

	for (tmp = iomem_resource.child; tmp; tmp = tmp->sibling)
		max_iomem = max(max_iomem,  tmp->end);

	return max_iomem > ((u64)1 << dma_bits);
}
EXPORT_SYMBOL(drm_need_swiotlb);
+0 −33
Original line number Original line Diff line number Diff line
@@ -37,7 +37,6 @@
#include <linux/highmem.h>
#include <linux/highmem.h>
#include <linux/pci.h>
#include <linux/pci.h>
#include <linux/vmalloc.h>
#include <linux/vmalloc.h>
#include <xen/xen.h>


#include <drm/drm_agpsupport.h>
#include <drm/drm_agpsupport.h>
#include <drm/drm_cache.h>
#include <drm/drm_cache.h>
@@ -138,35 +137,3 @@ void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev)
		iounmap(map->handle);
		iounmap(map->handle);
}
}
EXPORT_SYMBOL(drm_legacy_ioremapfree);
EXPORT_SYMBOL(drm_legacy_ioremapfree);

bool drm_need_swiotlb(int dma_bits)
{
	struct resource *tmp;
	resource_size_t max_iomem = 0;

	/*
	 * Xen paravirtual hosts require swiotlb regardless of requested dma
	 * transfer size.
	 *
	 * NOTE: Really, what it requires is use of the dma_alloc_coherent
	 *       allocator used in ttm_dma_populate() instead of
	 *       ttm_populate_and_map_pages(), which bounce buffers so much in
	 *       Xen it leads to swiotlb buffer exhaustion.
	 */
	if (xen_pv_domain())
		return true;

	/*
	 * Enforce dma_alloc_coherent when memory encryption is active as well
	 * for the same reasons as for Xen paravirtual hosts.
	 */
	if (mem_encrypt_active())
		return true;

	for (tmp = iomem_resource.child; tmp; tmp = tmp->sibling) {
		max_iomem = max(max_iomem,  tmp->end);
	}

	return max_iomem > ((u64)1 << dma_bits);
}
EXPORT_SYMBOL(drm_need_swiotlb);