Commit 9a91e5e0 authored by Dave Airlie's avatar Dave Airlie
Browse files

Merge tag 'amd-drm-next-5.14-2021-05-21' of...

Merge tag 'amd-drm-next-5.14-2021-05-21' of https://gitlab.freedesktop.org/agd5f/linux

 into drm-next

amd-drm-next-5.14-2021-05-21:

amdgpu:
- RAS fixes
- SR-IOV fixes
- More BO management cleanups
- Aldebaran fixes
- Display fixes
- Support for new GPU, Beige Goby
- Backlight fixes

amdkfd:
- RAS fixes
- DMA mapping fixes
- HMM SVM fixes

Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210521045743.4047-1-alexander.deucher@amd.com
parents c99c4d0c 81db370c
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -73,7 +73,8 @@ amdgpu-y += \
	vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o \
	vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o navi10_reg_init.o navi14_reg_init.o \
	arct_reg_init.o navi12_reg_init.o mxgpu_nv.o sienna_cichlid_reg_init.o vangogh_reg_init.o \
	nbio_v7_2.o dimgrey_cavefish_reg_init.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o aldebaran.o
	nbio_v7_2.o dimgrey_cavefish_reg_init.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o aldebaran.o \
	beige_goby_reg_init.o

# add DF block
amdgpu-y += \
+2 −2
Original line number Diff line number Diff line
@@ -670,10 +670,10 @@ int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct kgd_dev *kgd, uint16_t vmid)
	return 0;
}

int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid)
int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid,
				      enum TLB_FLUSH_TYPE flush_type)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
	const uint32_t flush_type = 0;
	bool all_hub = false;

	if (adev->family == AMDGPU_FAMILY_AI)
+21 −6
Original line number Diff line number Diff line
@@ -36,13 +36,26 @@

extern uint64_t amdgpu_amdkfd_total_mem_size;

enum TLB_FLUSH_TYPE {
	TLB_FLUSH_LEGACY = 0,
	TLB_FLUSH_LIGHTWEIGHT,
	TLB_FLUSH_HEAVYWEIGHT
};

struct amdgpu_device;

struct kfd_bo_va_list {
	struct list_head bo_list;
	struct amdgpu_bo_va *bo_va;
	void *kgd_dev;
enum kfd_mem_attachment_type {
	KFD_MEM_ATT_SHARED,	/* Share kgd_mem->bo or another attachment's */
	KFD_MEM_ATT_USERPTR,	/* SG bo to DMA map pages from a userptr bo */
	KFD_MEM_ATT_DMABUF,	/* DMAbuf to DMA map TTM BOs */
};

struct kfd_mem_attachment {
	struct list_head list;
	enum kfd_mem_attachment_type type;
	bool is_mapped;
	struct amdgpu_bo_va *bo_va;
	struct amdgpu_device *adev;
	uint64_t va;
	uint64_t pte_flags;
};
@@ -50,7 +63,8 @@ struct kfd_bo_va_list {
struct kgd_mem {
	struct mutex lock;
	struct amdgpu_bo *bo;
	struct list_head bo_va_list;
	struct dma_buf *dmabuf;
	struct list_head attachments;
	/* protected by amdkfd_process_info.lock */
	struct ttm_validate_buffer validate_list;
	struct ttm_validate_buffer resv_list;
@@ -135,7 +149,8 @@ int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle);
bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd);
int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct kgd_dev *kgd, uint16_t vmid);
int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid);
int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid,
				      enum TLB_FLUSH_TYPE flush_type);

bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid);

+378 −161
Original line number Diff line number Diff line
@@ -72,16 +72,16 @@ static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
	return (struct amdgpu_device *)kgd;
}

static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm,
static bool kfd_mem_is_attached(struct amdgpu_vm *avm,
		struct kgd_mem *mem)
{
	struct kfd_bo_va_list *entry;
	struct kfd_mem_attachment *entry;

	list_for_each_entry(entry, &mem->bo_va_list, bo_list)
	list_for_each_entry(entry, &mem->attachments, list)
		if (entry->bo_va->base.vm == avm)
			return false;

			return true;

	return false;
}

/* Set memory usage limits. Current, limits are
@@ -433,7 +433,8 @@ static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
				mapping_flags |= coherent ?
					AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
			else
				mapping_flags |= AMDGPU_VM_MTYPE_UC;
				mapping_flags |= coherent ?
					AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
		} else {
			mapping_flags |= coherent ?
				AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
@@ -452,7 +453,8 @@ static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
				if (adev->gmc.xgmi.connected_to_cpu)
					snoop = true;
			} else {
				mapping_flags |= AMDGPU_VM_MTYPE_UC;
				mapping_flags |= coherent ?
					AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
				if (amdgpu_xgmi_same_hive(adev, bo_adev))
					snoop = true;
			}
@@ -473,87 +475,318 @@ static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
	return pte_flags;
}

/* add_bo_to_vm - Add a BO to a VM
static int
kfd_mem_dmamap_userptr(struct kgd_mem *mem,
		       struct kfd_mem_attachment *attachment)
{
	enum dma_data_direction direction =
		mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
		DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
	struct ttm_operation_ctx ctx = {.interruptible = true};
	struct amdgpu_bo *bo = attachment->bo_va->base.bo;
	struct amdgpu_device *adev = attachment->adev;
	struct ttm_tt *src_ttm = mem->bo->tbo.ttm;
	struct ttm_tt *ttm = bo->tbo.ttm;
	int ret;

	ttm->sg = kmalloc(sizeof(*ttm->sg), GFP_KERNEL);
	if (unlikely(!ttm->sg))
		return -ENOMEM;

	if (WARN_ON(ttm->num_pages != src_ttm->num_pages))
		return -EINVAL;

	/* Same sequence as in amdgpu_ttm_tt_pin_userptr */
	ret = sg_alloc_table_from_pages(ttm->sg, src_ttm->pages,
					ttm->num_pages, 0,
					(u64)ttm->num_pages << PAGE_SHIFT,
					GFP_KERNEL);
	if (unlikely(ret))
		goto free_sg;

	ret = dma_map_sgtable(adev->dev, ttm->sg, direction, 0);
	if (unlikely(ret))
		goto release_sg;

	drm_prime_sg_to_dma_addr_array(ttm->sg, ttm->dma_address,
				       ttm->num_pages);

	amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
	ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
	if (ret)
		goto unmap_sg;

	return 0;

unmap_sg:
	dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
release_sg:
	pr_err("DMA map userptr failed: %d\n", ret);
	sg_free_table(ttm->sg);
free_sg:
	kfree(ttm->sg);
	ttm->sg = NULL;
	return ret;
}

static int
kfd_mem_dmamap_dmabuf(struct kfd_mem_attachment *attachment)
{
	struct ttm_operation_ctx ctx = {.interruptible = true};
	struct amdgpu_bo *bo = attachment->bo_va->base.bo;

	amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
	return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
}

static int
kfd_mem_dmamap_attachment(struct kgd_mem *mem,
			  struct kfd_mem_attachment *attachment)
{
	switch (attachment->type) {
	case KFD_MEM_ATT_SHARED:
		return 0;
	case KFD_MEM_ATT_USERPTR:
		return kfd_mem_dmamap_userptr(mem, attachment);
	case KFD_MEM_ATT_DMABUF:
		return kfd_mem_dmamap_dmabuf(attachment);
	default:
		WARN_ON_ONCE(1);
	}
	return -EINVAL;
}

static void
kfd_mem_dmaunmap_userptr(struct kgd_mem *mem,
			 struct kfd_mem_attachment *attachment)
{
	enum dma_data_direction direction =
		mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
		DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
	struct ttm_operation_ctx ctx = {.interruptible = false};
	struct amdgpu_bo *bo = attachment->bo_va->base.bo;
	struct amdgpu_device *adev = attachment->adev;
	struct ttm_tt *ttm = bo->tbo.ttm;

	if (unlikely(!ttm->sg))
		return;

	amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
	ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);

	dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
	sg_free_table(ttm->sg);
	ttm->sg = NULL;
}

static void
kfd_mem_dmaunmap_dmabuf(struct kfd_mem_attachment *attachment)
{
	struct ttm_operation_ctx ctx = {.interruptible = true};
	struct amdgpu_bo *bo = attachment->bo_va->base.bo;

	amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
	ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
}

static void
kfd_mem_dmaunmap_attachment(struct kgd_mem *mem,
			    struct kfd_mem_attachment *attachment)
{
	switch (attachment->type) {
	case KFD_MEM_ATT_SHARED:
		break;
	case KFD_MEM_ATT_USERPTR:
		kfd_mem_dmaunmap_userptr(mem, attachment);
		break;
	case KFD_MEM_ATT_DMABUF:
		kfd_mem_dmaunmap_dmabuf(attachment);
		break;
	default:
		WARN_ON_ONCE(1);
	}
}

static int
kfd_mem_attach_userptr(struct amdgpu_device *adev, struct kgd_mem *mem,
		       struct amdgpu_bo **bo)
{
	unsigned long bo_size = mem->bo->tbo.base.size;
	struct drm_gem_object *gobj;
	int ret;

	ret = amdgpu_bo_reserve(mem->bo, false);
	if (ret)
		return ret;

	ret = amdgpu_gem_object_create(adev, bo_size, 1,
				       AMDGPU_GEM_DOMAIN_CPU,
				       0, ttm_bo_type_sg,
				       mem->bo->tbo.base.resv,
				       &gobj);
	if (ret)
		return ret;

	amdgpu_bo_unreserve(mem->bo);

	*bo = gem_to_amdgpu_bo(gobj);
	(*bo)->parent = amdgpu_bo_ref(mem->bo);

	return 0;
}

static int
kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem,
		      struct amdgpu_bo **bo)
{
	struct drm_gem_object *gobj;

	if (!mem->dmabuf) {
		mem->dmabuf = amdgpu_gem_prime_export(&mem->bo->tbo.base,
			mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
				DRM_RDWR : 0);
		if (IS_ERR(mem->dmabuf)) {
			mem->dmabuf = NULL;
			return PTR_ERR(mem->dmabuf);
		}
	}

	gobj = amdgpu_gem_prime_import(&adev->ddev, mem->dmabuf);
	if (IS_ERR(gobj))
		return PTR_ERR(gobj);

	/* Import takes an extra reference on the dmabuf. Drop it now to
	 * avoid leaking it. We only need the one reference in
	 * kgd_mem->dmabuf.
	 */
	dma_buf_put(mem->dmabuf);

	*bo = gem_to_amdgpu_bo(gobj);
	(*bo)->parent = amdgpu_bo_ref(mem->bo);

	return 0;
}

/* kfd_mem_attach - Add a BO to a VM
 *
 * Everything that needs to bo done only once when a BO is first added
 * to a VM. It can later be mapped and unmapped many times without
 * repeating these steps.
 *
 * 0. Create BO for DMA mapping, if needed
 * 1. Allocate and initialize BO VA entry data structure
 * 2. Add BO to the VM
 * 3. Determine ASIC-specific PTE flags
 * 4. Alloc page tables and directories if needed
 * 4a.  Validate new page tables and directories
 */
static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
		struct amdgpu_vm *vm, bool is_aql,
		struct kfd_bo_va_list **p_bo_va_entry)
static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
		struct amdgpu_vm *vm, bool is_aql)
{
	int ret;
	struct kfd_bo_va_list *bo_va_entry;
	struct amdgpu_bo *bo = mem->bo;
	struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
	unsigned long bo_size = mem->bo->tbo.base.size;
	uint64_t va = mem->va;
	struct list_head *list_bo_va = &mem->bo_va_list;
	unsigned long bo_size = bo->tbo.base.size;
	struct kfd_mem_attachment *attachment[2] = {NULL, NULL};
	struct amdgpu_bo *bo[2] = {NULL, NULL};
	int i, ret;

	if (!va) {
		pr_err("Invalid VA when adding BO to VM\n");
		return -EINVAL;
	}

	if (is_aql)
		va += bo_size;

	bo_va_entry = kzalloc(sizeof(*bo_va_entry), GFP_KERNEL);
	if (!bo_va_entry)
		return -ENOMEM;
	for (i = 0; i <= is_aql; i++) {
		attachment[i] = kzalloc(sizeof(*attachment[i]), GFP_KERNEL);
		if (unlikely(!attachment[i])) {
			ret = -ENOMEM;
			goto unwind;
		}

		pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
			 va + bo_size, vm);

		if (adev == bo_adev || (mem->domain == AMDGPU_GEM_DOMAIN_VRAM &&
					amdgpu_xgmi_same_hive(adev, bo_adev))) {
			/* Mappings on the local GPU and VRAM mappings in the
			 * local hive share the original BO
			 */
			attachment[i]->type = KFD_MEM_ATT_SHARED;
			bo[i] = mem->bo;
			drm_gem_object_get(&bo[i]->tbo.base);
		} else if (i > 0) {
			/* Multiple mappings on the same GPU share the BO */
			attachment[i]->type = KFD_MEM_ATT_SHARED;
			bo[i] = bo[0];
			drm_gem_object_get(&bo[i]->tbo.base);
		} else if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) {
			/* Create an SG BO to DMA-map userptrs on other GPUs */
			attachment[i]->type = KFD_MEM_ATT_USERPTR;
			ret = kfd_mem_attach_userptr(adev, mem, &bo[i]);
			if (ret)
				goto unwind;
		} else if (mem->domain == AMDGPU_GEM_DOMAIN_GTT &&
			   mem->bo->tbo.type != ttm_bo_type_sg) {
			/* GTT BOs use DMA-mapping ability of dynamic-attach
			 * DMA bufs. TODO: The same should work for VRAM on
			 * large-BAR GPUs.
			 */
			attachment[i]->type = KFD_MEM_ATT_DMABUF;
			ret = kfd_mem_attach_dmabuf(adev, mem, &bo[i]);
			if (ret)
				goto unwind;
		} else {
			/* FIXME: Need to DMA-map other BO types:
			 * large-BAR VRAM, doorbells, MMIO remap
			 */
			attachment[i]->type = KFD_MEM_ATT_SHARED;
			bo[i] = mem->bo;
			drm_gem_object_get(&bo[i]->tbo.base);
		}

		/* Add BO to VM internal data structures */
	bo_va_entry->bo_va = amdgpu_vm_bo_add(adev, vm, bo);
	if (!bo_va_entry->bo_va) {
		ret = -EINVAL;
		attachment[i]->bo_va = amdgpu_vm_bo_add(adev, vm, bo[i]);
		if (unlikely(!attachment[i]->bo_va)) {
			ret = -ENOMEM;
			pr_err("Failed to add BO object to VM. ret == %d\n",
			       ret);
		goto err_vmadd;
			goto unwind;
		}

	bo_va_entry->va = va;
	bo_va_entry->pte_flags = get_pte_flags(adev, mem);
	bo_va_entry->kgd_dev = (void *)adev;
	list_add(&bo_va_entry->bo_list, list_bo_va);

	if (p_bo_va_entry)
		*p_bo_va_entry = bo_va_entry;
		attachment[i]->va = va;
		attachment[i]->pte_flags = get_pte_flags(adev, mem);
		attachment[i]->adev = adev;
		list_add(&attachment[i]->list, &mem->attachments);

	/* Allocate validate page tables if needed */
	ret = vm_validate_pt_pd_bos(vm);
	if (ret) {
		pr_err("validate_pt_pd_bos() failed\n");
		goto err_alloc_pts;
		va += bo_size;
	}

	return 0;

err_alloc_pts:
	amdgpu_vm_bo_rmv(adev, bo_va_entry->bo_va);
	list_del(&bo_va_entry->bo_list);
err_vmadd:
	kfree(bo_va_entry);
unwind:
	for (; i >= 0; i--) {
		if (!attachment[i])
			continue;
		if (attachment[i]->bo_va) {
			amdgpu_vm_bo_rmv(adev, attachment[i]->bo_va);
			list_del(&attachment[i]->list);
		}
		if (bo[i])
			drm_gem_object_put(&bo[i]->tbo.base);
		kfree(attachment[i]);
	}
	return ret;
}

static void remove_bo_from_vm(struct amdgpu_device *adev,
		struct kfd_bo_va_list *entry, unsigned long size)
static void kfd_mem_detach(struct kfd_mem_attachment *attachment)
{
	pr_debug("\t remove VA 0x%llx - 0x%llx in entry %p\n",
			entry->va,
			entry->va + size, entry);
	amdgpu_vm_bo_rmv(adev, entry->bo_va);
	list_del(&entry->bo_list);
	kfree(entry);
	struct amdgpu_bo *bo = attachment->bo_va->base.bo;

	pr_debug("\t remove VA 0x%llx in entry %p\n",
			attachment->va, attachment);
	amdgpu_vm_bo_rmv(attachment->adev, attachment->bo_va);
	drm_gem_object_put(&bo->tbo.base);
	list_del(&attachment->list);
	kfree(attachment);
}

static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
@@ -728,7 +961,7 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
				struct bo_vm_reservation_context *ctx)
{
	struct amdgpu_bo *bo = mem->bo;
	struct kfd_bo_va_list *entry;
	struct kfd_mem_attachment *entry;
	unsigned int i;
	int ret;

@@ -740,7 +973,7 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
	INIT_LIST_HEAD(&ctx->list);
	INIT_LIST_HEAD(&ctx->duplicates);

	list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
	list_for_each_entry(entry, &mem->attachments, list) {
		if ((vm && vm != entry->bo_va->base.vm) ||
			(entry->is_mapped != map_type
			&& map_type != BO_VM_ALL))
@@ -762,7 +995,7 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
	list_add(&ctx->kfd_bo.tv.head, &ctx->list);

	i = 0;
	list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
	list_for_each_entry(entry, &mem->attachments, list) {
		if ((vm && vm != entry->bo_va->base.vm) ||
			(entry->is_mapped != map_type
			&& map_type != BO_VM_ALL))
@@ -816,11 +1049,12 @@ static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
	return ret;
}

static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
				struct kfd_bo_va_list *entry,
static void unmap_bo_from_gpuvm(struct kgd_mem *mem,
				struct kfd_mem_attachment *entry,
				struct amdgpu_sync *sync)
{
	struct amdgpu_bo_va *bo_va = entry->bo_va;
	struct amdgpu_device *adev = entry->adev;
	struct amdgpu_vm *vm = bo_va->base.vm;

	amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
@@ -829,15 +1063,20 @@ static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,

	amdgpu_sync_fence(sync, bo_va->last_pt_update);

	return 0;
	kfd_mem_dmaunmap_attachment(mem, entry);
}

static int update_gpuvm_pte(struct amdgpu_device *adev,
		struct kfd_bo_va_list *entry,
static int update_gpuvm_pte(struct kgd_mem *mem,
			    struct kfd_mem_attachment *entry,
			    struct amdgpu_sync *sync)
{
	int ret;
	struct amdgpu_bo_va *bo_va = entry->bo_va;
	struct amdgpu_device *adev = entry->adev;
	int ret;

	ret = kfd_mem_dmamap_attachment(mem, entry);
	if (ret)
		return ret;

	/* Update the page tables  */
	ret = amdgpu_vm_bo_update(adev, bo_va, false);
@@ -849,14 +1088,15 @@ static int update_gpuvm_pte(struct amdgpu_device *adev,
	return amdgpu_sync_fence(sync, bo_va->last_pt_update);
}

static int map_bo_to_gpuvm(struct amdgpu_device *adev,
		struct kfd_bo_va_list *entry, struct amdgpu_sync *sync,
static int map_bo_to_gpuvm(struct kgd_mem *mem,
			   struct kfd_mem_attachment *entry,
			   struct amdgpu_sync *sync,
			   bool no_update_pte)
{
	int ret;

	/* Set virtual address for the allocation */
	ret = amdgpu_vm_bo_map(adev, entry->bo_va, entry->va, 0,
	ret = amdgpu_vm_bo_map(entry->adev, entry->bo_va, entry->va, 0,
			       amdgpu_bo_size(entry->bo_va->base.bo),
			       entry->pte_flags);
	if (ret) {
@@ -868,7 +1108,7 @@ static int map_bo_to_gpuvm(struct amdgpu_device *adev,
	if (no_update_pte)
		return 0;

	ret = update_gpuvm_pte(adev, entry, sync);
	ret = update_gpuvm_pte(mem, entry, sync);
	if (ret) {
		pr_err("update_gpuvm_pte() failed\n");
		goto update_gpuvm_pte_failed;
@@ -877,7 +1117,7 @@ static int map_bo_to_gpuvm(struct amdgpu_device *adev,
	return 0;

update_gpuvm_pte_failed:
	unmap_bo_from_gpuvm(adev, entry, sync);
	unmap_bo_from_gpuvm(mem, entry, sync);
	return ret;
}

@@ -1194,7 +1434,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
		ret = -ENOMEM;
		goto err;
	}
	INIT_LIST_HEAD(&(*mem)->bo_va_list);
	INIT_LIST_HEAD(&(*mem)->attachments);
	mutex_init(&(*mem)->lock);
	(*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);

@@ -1283,7 +1523,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
{
	struct amdkfd_process_info *process_info = mem->process_info;
	unsigned long bo_size = mem->bo->tbo.base.size;
	struct kfd_bo_va_list *entry, *tmp;
	struct kfd_mem_attachment *entry, *tmp;
	struct bo_vm_reservation_context ctx;
	struct ttm_validate_buffer *bo_list_entry;
	unsigned int mapped_to_gpu_memory;
@@ -1326,13 +1566,12 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
	pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
		mem->va + bo_size * (1 + mem->aql_queue));

	/* Remove from VM internal data structures */
	list_for_each_entry_safe(entry, tmp, &mem->bo_va_list, bo_list)
		remove_bo_from_vm((struct amdgpu_device *)entry->kgd_dev,
				entry, bo_size);

	ret = unreserve_bo_and_vms(&ctx, false, false);

	/* Remove from VM internal data structures */
	list_for_each_entry_safe(entry, tmp, &mem->attachments, list)
		kfd_mem_detach(entry);

	/* Free the sync object */
	amdgpu_sync_free(&mem->sync);

@@ -1357,6 +1596,8 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(

	/* Free the BO*/
	drm_vma_node_revoke(&mem->bo->tbo.base.vma_node, drm_priv);
	if (mem->dmabuf)
		dma_buf_put(mem->dmabuf);
	drm_gem_object_put(&mem->bo->tbo.base);
	mutex_destroy(&mem->lock);
	kfree(mem);
@@ -1372,10 +1613,8 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
	int ret;
	struct amdgpu_bo *bo;
	uint32_t domain;
	struct kfd_bo_va_list *entry;
	struct kfd_mem_attachment *entry;
	struct bo_vm_reservation_context ctx;
	struct kfd_bo_va_list *bo_va_entry = NULL;
	struct kfd_bo_va_list *bo_va_entry_aql = NULL;
	unsigned long bo_size;
	bool is_invalid_userptr = false;

@@ -1411,6 +1650,12 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
			mem->va + bo_size * (1 + mem->aql_queue),
			avm, domain_string(domain));

	if (!kfd_mem_is_attached(avm, mem)) {
		ret = kfd_mem_attach(adev, mem, avm, mem->aql_queue);
		if (ret)
			goto out;
	}

	ret = reserve_bo_and_vm(mem, avm, &ctx);
	if (unlikely(ret))
		goto out;
@@ -1424,22 +1669,9 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
	    bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
		is_invalid_userptr = true;

	if (check_if_add_bo_to_vm(avm, mem)) {
		ret = add_bo_to_vm(adev, mem, avm, false,
				&bo_va_entry);
		if (ret)
			goto add_bo_to_vm_failed;
		if (mem->aql_queue) {
			ret = add_bo_to_vm(adev, mem, avm,
					true, &bo_va_entry_aql);
			if (ret)
				goto add_bo_to_vm_failed_aql;
		}
	} else {
	ret = vm_validate_pt_pd_bos(avm);
	if (unlikely(ret))
			goto add_bo_to_vm_failed;
	}
		goto out_unreserve;

	if (mem->mapped_to_gpu_memory == 0 &&
	    !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
@@ -1450,27 +1682,28 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
		ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
		if (ret) {
			pr_debug("Validate failed\n");
			goto map_bo_to_gpuvm_failed;
			goto out_unreserve;
		}
	}

	list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
		if (entry->bo_va->base.vm == avm && !entry->is_mapped) {
	list_for_each_entry(entry, &mem->attachments, list) {
		if (entry->bo_va->base.vm != avm || entry->is_mapped)
			continue;

		pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
					entry->va, entry->va + bo_size,
					entry);
			 entry->va, entry->va + bo_size, entry);

			ret = map_bo_to_gpuvm(adev, entry, ctx.sync,
		ret = map_bo_to_gpuvm(mem, entry, ctx.sync,
				      is_invalid_userptr);
		if (ret) {
			pr_err("Failed to map bo to gpuvm\n");
				goto map_bo_to_gpuvm_failed;
			goto out_unreserve;
		}

		ret = vm_update_pds(avm, ctx.sync);
		if (ret) {
			pr_err("Failed to update page directories\n");
				goto map_bo_to_gpuvm_failed;
			goto out_unreserve;
		}

		entry->is_mapped = true;
@@ -1478,7 +1711,6 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
		pr_debug("\t INC mapping count %d\n",
			 mem->mapped_to_gpu_memory);
	}
	}

	if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->tbo.pin_count)
		amdgpu_bo_fence(bo,
@@ -1488,13 +1720,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(

	goto out;

map_bo_to_gpuvm_failed:
	if (bo_va_entry_aql)
		remove_bo_from_vm(adev, bo_va_entry_aql, bo_size);
add_bo_to_vm_failed_aql:
	if (bo_va_entry)
		remove_bo_from_vm(adev, bo_va_entry, bo_size);
add_bo_to_vm_failed:
out_unreserve:
	unreserve_bo_and_vms(&ctx, false, false);
out:
	mutex_unlock(&mem->process_info->lock);
@@ -1505,11 +1731,10 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
		struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv)
{
	struct amdgpu_device *adev = get_amdgpu_device(kgd);
	struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
	struct amdkfd_process_info *process_info = avm->process_info;
	unsigned long bo_size = mem->bo->tbo.base.size;
	struct kfd_bo_va_list *entry;
	struct kfd_mem_attachment *entry;
	struct bo_vm_reservation_context ctx;
	int ret;

@@ -1533,27 +1758,20 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
		mem->va + bo_size * (1 + mem->aql_queue),
		avm);

	list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
		if (entry->bo_va->base.vm == avm && entry->is_mapped) {
	list_for_each_entry(entry, &mem->attachments, list) {
		if (entry->bo_va->base.vm != avm || !entry->is_mapped)
			continue;

		pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
					entry->va,
					entry->va + bo_size,
					entry);
			 entry->va, entry->va + bo_size, entry);

			ret = unmap_bo_from_gpuvm(adev, entry, ctx.sync);
			if (ret == 0) {
		unmap_bo_from_gpuvm(mem, entry, ctx.sync);
		entry->is_mapped = false;
			} else {
				pr_err("failed to unmap VA 0x%llx\n",
						mem->va);
				goto unreserve_out;
			}

		mem->mapped_to_gpu_memory--;
		pr_debug("\t DEC mapping count %d\n",
			 mem->mapped_to_gpu_memory);
	}
	}

	/* If BO is unmapped from all VMs, unfence it. It can be evicted if
	 * required.
@@ -1701,7 +1919,7 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
	if (mmap_offset)
		*mmap_offset = amdgpu_bo_mmap_offset(bo);

	INIT_LIST_HEAD(&(*mem)->bo_va_list);
	INIT_LIST_HEAD(&(*mem)->attachments);
	mutex_init(&(*mem)->lock);

	(*mem)->alloc_flags =
@@ -1898,7 +2116,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
	list_for_each_entry_safe(mem, tmp_mem,
				 &process_info->userptr_inval_list,
				 validate_list.head) {
		struct kfd_bo_va_list *bo_va_entry;
		struct kfd_mem_attachment *attachment;

		bo = mem->bo;

@@ -1921,13 +2139,12 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
		 * VM faults if the GPU tries to access the invalid
		 * memory.
		 */
		list_for_each_entry(bo_va_entry, &mem->bo_va_list, bo_list) {
			if (!bo_va_entry->is_mapped)
		list_for_each_entry(attachment, &mem->attachments, list) {
			if (!attachment->is_mapped)
				continue;

			ret = update_gpuvm_pte((struct amdgpu_device *)
					       bo_va_entry->kgd_dev,
					       bo_va_entry, &sync);
			kfd_mem_dmaunmap_attachment(mem, attachment);
			ret = update_gpuvm_pte(mem, attachment, &sync);
			if (ret) {
				pr_err("%s: update PTE failed\n", __func__);
				/* make sure this gets validated again */
@@ -2108,7 +2325,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)

		struct amdgpu_bo *bo = mem->bo;
		uint32_t domain = mem->domain;
		struct kfd_bo_va_list *bo_va_entry;
		struct kfd_mem_attachment *attachment;

		total_size += amdgpu_bo_size(bo);

@@ -2128,12 +2345,12 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
			pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
			goto validate_map_fail;
		}
		list_for_each_entry(bo_va_entry, &mem->bo_va_list,
				    bo_list) {
			ret = update_gpuvm_pte((struct amdgpu_device *)
					      bo_va_entry->kgd_dev,
					      bo_va_entry,
					      &sync_obj);
		list_for_each_entry(attachment, &mem->attachments, list) {
			if (!attachment->is_mapped)
				continue;

			kfd_mem_dmaunmap_attachment(mem, attachment);
			ret = update_gpuvm_pte(mem, attachment, &sync_obj);
			if (ret) {
				pr_debug("Memory eviction: update PTE failed. Try again\n");
				goto validate_map_fail;
@@ -2208,7 +2425,7 @@ int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem
		return -ENOMEM;

	mutex_init(&(*mem)->lock);
	INIT_LIST_HEAD(&(*mem)->bo_va_list);
	INIT_LIST_HEAD(&(*mem)->attachments);
	(*mem)->bo = amdgpu_bo_ref(gws_bo);
	(*mem)->domain = AMDGPU_GEM_DOMAIN_GWS;
	(*mem)->process_info = process_info;
+3 −0
Original line number Diff line number Diff line
@@ -1828,6 +1828,9 @@ int amdgpu_atombios_init(struct amdgpu_device *adev)
	if (adev->is_atom_fw) {
		amdgpu_atomfirmware_scratch_regs_init(adev);
		amdgpu_atomfirmware_allocate_fb_scratch(adev);
		/* cached firmware_flags for further usage */
		adev->mode_info.firmware_flags =
			amdgpu_atomfirmware_query_firmware_capability(adev);
	} else {
		amdgpu_atombios_scratch_regs_init(adev);
		amdgpu_atombios_allocate_fb_scratch(adev);
Loading