Commit 55c7d6a9 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'drm-next-2022-12-23' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "Holiday fixes!

  Two batches from amd, and one group of i915 changes.

  amdgpu:
   - Spelling fix
   - BO pin fix
   - Properly handle polaris 10/11 overlap asics
   - GMC9 fix
   - SR-IOV suspend fix
   - DCN 3.1.4 fix
   - KFD userptr locking fix
   - SMU13.x fixes
   - GDS/GWS/OA handling fix
   - Reserved VMID handling fixes
   - FRU EEPROM fix
   - BO validation fixes
   - Avoid large variable on the stack
   - S0ix fixes
   - SMU 13.x fixes
   - VCN fix
   - Add missing fence reference

  amdkfd:
   - Fix init vm error handling
   - Fix double release of compute pasid

  i915
   - Documentation fixes
   - OA-perf related fix
   - VLV/CHV HDMI/DP audio fix
   - Display DDI/Transcoder fix
   - Migrate fixes"

* tag 'drm-next-2022-12-23' of git://anongit.freedesktop.org/drm/drm: (39 commits)
  drm/amdgpu: grab extra fence reference for drm_sched_job_add_dependency
  drm/amdgpu: enable VCN DPG for GC IP v11.0.4
  drm/amdgpu: skip mes self test after s0i3 resume for MES IP v11.0
  drm/amd/pm: correct the fan speed retrieving in PWM for some SMU13 asics
  drm/amd/pm: bump SMU13.0.0 driver_if header to version 0x34
  drm/amdgpu: skip MES for S0ix as well since it's part of GFX
  drm/amd/pm: avoid large variable on kernel stack
  drm/amdkfd: Fix double release compute pasid
  drm/amdkfd: Fix kfd_process_device_init_vm error handling
  drm/amd/pm: update SMU13.0.0 reported maximum shader clock
  drm/amd/pm: correct SMU13.0.0 pstate profiling clock settings
  drm/amd/pm: enable GPO dynamic control support for SMU13.0.7
  drm/amd/pm: enable GPO dynamic control support for SMU13.0.0
  drm/amdgpu: revert "generally allow over-commit during BO allocation"
  drm/amdgpu: Remove unnecessary domain argument
  drm/amdgpu: Fix size validation for non-exclusive domains (v4)
  drm/amdgpu: Check if fru_addr is not NULL (v2)
  drm/i915/ttm: consider CCS for backup objects
  drm/i915/migrate: fix corner case in CCS aux copying
  drm/amdgpu: rework reserved VMID handling
  ...
parents 06d65a6f fe8f5b2f
Loading
Loading
Loading
Loading
+12 −5
Original line number Diff line number Diff line
@@ -29,6 +29,7 @@
#include <linux/mm.h>
#include <linux/kthread.h>
#include <linux/workqueue.h>
#include <linux/mmu_notifier.h>
#include <kgd_kfd_interface.h>
#include <drm/ttm/ttm_execbuf_util.h>
#include "amdgpu_sync.h"
@@ -65,6 +66,7 @@ struct kgd_mem {
	struct mutex lock;
	struct amdgpu_bo *bo;
	struct dma_buf *dmabuf;
	struct hmm_range *range;
	struct list_head attachments;
	/* protected by amdkfd_process_info.lock */
	struct ttm_validate_buffer validate_list;
@@ -75,7 +77,7 @@ struct kgd_mem {

	uint32_t alloc_flags;

	atomic_t invalid;
	uint32_t invalid;
	struct amdkfd_process_info *process_info;

	struct amdgpu_sync sync;
@@ -131,7 +133,8 @@ struct amdkfd_process_info {
	struct amdgpu_amdkfd_fence *eviction_fence;

	/* MMU-notifier related fields */
	atomic_t evicted_bos;
	struct mutex notifier_lock;
	uint32_t evicted_bos;
	struct delayed_work restore_userptr_work;
	struct pid *pid;
	bool block_mmu_notifications;
@@ -180,7 +183,8 @@ int kfd_debugfs_kfd_mem_limits(struct seq_file *m, void *data);
bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm);
struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f);
int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo);
int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm);
int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni,
				unsigned long cur_seq, struct kgd_mem *mem);
#else
static inline
bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
@@ -201,7 +205,8 @@ int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
}

static inline
int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm)
int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni,
				unsigned long cur_seq, struct kgd_mem *mem)
{
	return 0;
}
@@ -265,8 +270,10 @@ int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct amdgpu_device *adev, bool is_
	(&((struct amdgpu_fpriv *)					\
		((struct drm_file *)(drm_priv))->driver_priv)->vm)

int amdgpu_amdkfd_gpuvm_set_vm_pasid(struct amdgpu_device *adev,
				     struct file *filp, u32 pasid);
int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
					struct file *filp, u32 pasid,
					struct file *filp,
					void **process_info,
					struct dma_fence **ef);
void amdgpu_amdkfd_gpuvm_release_process_vm(struct amdgpu_device *adev,
+168 −83
Original line number Diff line number Diff line
@@ -964,7 +964,9 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
		 * later stage when it is scheduled by another ioctl called by
		 * CRIU master process for the target pid for restore.
		 */
		atomic_inc(&mem->invalid);
		mutex_lock(&process_info->notifier_lock);
		mem->invalid++;
		mutex_unlock(&process_info->notifier_lock);
		mutex_unlock(&process_info->lock);
		return 0;
	}
@@ -1301,6 +1303,7 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
			return -ENOMEM;

		mutex_init(&info->lock);
		mutex_init(&info->notifier_lock);
		INIT_LIST_HEAD(&info->vm_list_head);
		INIT_LIST_HEAD(&info->kfd_bo_list);
		INIT_LIST_HEAD(&info->userptr_valid_list);
@@ -1317,7 +1320,6 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
		}

		info->pid = get_task_pid(current->group_leader, PIDTYPE_PID);
		atomic_set(&info->evicted_bos, 0);
		INIT_DELAYED_WORK(&info->restore_userptr_work,
				  amdgpu_amdkfd_restore_userptr_worker);

@@ -1372,6 +1374,7 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
		put_pid(info->pid);
create_evict_fence_fail:
		mutex_destroy(&info->lock);
		mutex_destroy(&info->notifier_lock);
		kfree(info);
	}
	return ret;
@@ -1426,10 +1429,9 @@ static void amdgpu_amdkfd_gpuvm_unpin_bo(struct amdgpu_bo *bo)
	amdgpu_bo_unreserve(bo);
}

int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
					   struct file *filp, u32 pasid,
					   void **process_info,
					   struct dma_fence **ef)
int amdgpu_amdkfd_gpuvm_set_vm_pasid(struct amdgpu_device *adev,
				     struct file *filp, u32 pasid)

{
	struct amdgpu_fpriv *drv_priv;
	struct amdgpu_vm *avm;
@@ -1440,10 +1442,6 @@ int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
		return ret;
	avm = &drv_priv->vm;

	/* Already a compute VM? */
	if (avm->process_info)
		return -EINVAL;

	/* Free the original amdgpu allocated pasid,
	 * will be replaced with kfd allocated pasid.
	 */
@@ -1452,14 +1450,36 @@ int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
		amdgpu_vm_set_pasid(adev, avm, 0);
	}

	/* Convert VM into a compute VM */
	ret = amdgpu_vm_make_compute(adev, avm);
	ret = amdgpu_vm_set_pasid(adev, avm, pasid);
	if (ret)
		return ret;

	ret = amdgpu_vm_set_pasid(adev, avm, pasid);
	return 0;
}

int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
					   struct file *filp,
					   void **process_info,
					   struct dma_fence **ef)
{
	struct amdgpu_fpriv *drv_priv;
	struct amdgpu_vm *avm;
	int ret;

	ret = amdgpu_file_to_fpriv(filp, &drv_priv);
	if (ret)
		return ret;
	avm = &drv_priv->vm;

	/* Already a compute VM? */
	if (avm->process_info)
		return -EINVAL;

	/* Convert VM into a compute VM */
	ret = amdgpu_vm_make_compute(adev, avm);
	if (ret)
		return ret;

	/* Initialize KFD part of the VM and process info */
	ret = init_kfd_vm(avm, process_info, ef);
	if (ret)
@@ -1496,6 +1516,7 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
		cancel_delayed_work_sync(&process_info->restore_userptr_work);
		put_pid(process_info->pid);
		mutex_destroy(&process_info->lock);
		mutex_destroy(&process_info->notifier_lock);
		kfree(process_info);
	}
}
@@ -1548,7 +1569,9 @@ int amdgpu_amdkfd_criu_resume(void *p)

	mutex_lock(&pinfo->lock);
	pr_debug("scheduling work\n");
	atomic_inc(&pinfo->evicted_bos);
	mutex_lock(&pinfo->notifier_lock);
	pinfo->evicted_bos++;
	mutex_unlock(&pinfo->notifier_lock);
	if (!READ_ONCE(pinfo->block_mmu_notifications)) {
		ret = -EINVAL;
		goto out_unlock;
@@ -1773,8 +1796,13 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
	list_del(&bo_list_entry->head);
	mutex_unlock(&process_info->lock);

	/* No more MMU notifiers */
	/* Cleanup user pages and MMU notifiers */
	if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) {
		amdgpu_hmm_unregister(mem->bo);
		mutex_lock(&process_info->notifier_lock);
		amdgpu_ttm_tt_discard_user_pages(mem->bo->tbo.ttm, mem->range);
		mutex_unlock(&process_info->notifier_lock);
	}

	ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
	if (unlikely(ret))
@@ -1864,6 +1892,16 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
	 */
	mutex_lock(&mem->process_info->lock);

	/* Lock notifier lock. If we find an invalid userptr BO, we can be
	 * sure that the MMU notifier is no longer running
	 * concurrently and the queues are actually stopped
	 */
	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
		mutex_lock(&mem->process_info->notifier_lock);
		is_invalid_userptr = !!mem->invalid;
		mutex_unlock(&mem->process_info->notifier_lock);
	}

	mutex_lock(&mem->lock);

	domain = mem->domain;
@@ -2241,34 +2279,38 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev,
 *
 * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
 * cannot do any memory allocations, and cannot take any locks that
 * are held elsewhere while allocating memory. Therefore this is as
 * simple as possible, using atomic counters.
 * are held elsewhere while allocating memory.
 *
 * It doesn't do anything to the BO itself. The real work happens in
 * restore, where we get updated page addresses. This function only
 * ensures that GPU access to the BO is stopped.
 */
int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
				struct mm_struct *mm)
int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni,
				unsigned long cur_seq, struct kgd_mem *mem)
{
	struct amdkfd_process_info *process_info = mem->process_info;
	int evicted_bos;
	int r = 0;

	/* Do not process MMU notifications until stage-4 IOCTL is received */
	/* Do not process MMU notifications during CRIU restore until
	 * KFD_CRIU_OP_RESUME IOCTL is received
	 */
	if (READ_ONCE(process_info->block_mmu_notifications))
		return 0;

	atomic_inc(&mem->invalid);
	evicted_bos = atomic_inc_return(&process_info->evicted_bos);
	if (evicted_bos == 1) {
	mutex_lock(&process_info->notifier_lock);
	mmu_interval_set_seq(mni, cur_seq);

	mem->invalid++;
	if (++process_info->evicted_bos == 1) {
		/* First eviction, stop the queues */
		r = kgd2kfd_quiesce_mm(mm, KFD_QUEUE_EVICTION_TRIGGER_USERPTR);
		r = kgd2kfd_quiesce_mm(mni->mm,
				       KFD_QUEUE_EVICTION_TRIGGER_USERPTR);
		if (r)
			pr_err("Failed to quiesce KFD\n");
		schedule_delayed_work(&process_info->restore_userptr_work,
			msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
	}
	mutex_unlock(&process_info->notifier_lock);

	return r;
}
@@ -2285,19 +2327,43 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
	struct kgd_mem *mem, *tmp_mem;
	struct amdgpu_bo *bo;
	struct ttm_operation_ctx ctx = { false, false };
	int invalid, ret;
	uint32_t invalid;
	int ret = 0;

	/* Move all invalidated BOs to the userptr_inval_list and
	 * release their user pages by migration to the CPU domain
	 */
	mutex_lock(&process_info->notifier_lock);

	/* Move all invalidated BOs to the userptr_inval_list */
	list_for_each_entry_safe(mem, tmp_mem,
				 &process_info->userptr_valid_list,
				 validate_list.head)
		if (mem->invalid)
			list_move_tail(&mem->validate_list.head,
				       &process_info->userptr_inval_list);

	/* Go through userptr_inval_list and update any invalid user_pages */
	list_for_each_entry(mem, &process_info->userptr_inval_list,
			    validate_list.head) {
		if (!atomic_read(&mem->invalid))
			continue; /* BO is still valid */
		invalid = mem->invalid;
		if (!invalid)
			/* BO hasn't been invalidated since the last
			 * revalidation attempt. Keep its page list.
			 */
			continue;

		bo = mem->bo;

		amdgpu_ttm_tt_discard_user_pages(bo->tbo.ttm, mem->range);
		mem->range = NULL;

		/* BO reservations and getting user pages (hmm_range_fault)
		 * must happen outside the notifier lock
		 */
		mutex_unlock(&process_info->notifier_lock);

		/* Move the BO to system (CPU) domain if necessary to unmap
		 * and free the SG table
		 */
		if (bo->tbo.resource->mem_type != TTM_PL_SYSTEM) {
			if (amdgpu_bo_reserve(bo, true))
				return -EAGAIN;
			amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
@@ -2308,31 +2374,11 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
				       __func__);
				return -EAGAIN;
			}

		list_move_tail(&mem->validate_list.head,
			       &process_info->userptr_inval_list);
		}

	if (list_empty(&process_info->userptr_inval_list))
		return 0; /* All evicted userptr BOs were freed */

	/* Go through userptr_inval_list and update any invalid user_pages */
	list_for_each_entry(mem, &process_info->userptr_inval_list,
			    validate_list.head) {
		struct hmm_range *range;

		invalid = atomic_read(&mem->invalid);
		if (!invalid)
			/* BO hasn't been invalidated since the last
			 * revalidation attempt. Keep its BO list.
			 */
			continue;

		bo = mem->bo;

		/* Get updated user pages */
		ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages,
						   &range);
						   &mem->range);
		if (ret) {
			pr_debug("Failed %d to get user pages\n", ret);

@@ -2345,30 +2391,32 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
			 */
			if (ret != -EFAULT)
				return ret;
		} else {

			/*
			 * FIXME: Cannot ignore the return code, must hold
			 * notifier_lock
			 */
			amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range);
			ret = 0;
		}

		mutex_lock(&process_info->notifier_lock);

		/* Mark the BO as valid unless it was invalidated
		 * again concurrently.
		 */
		if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid)
			return -EAGAIN;
		if (mem->invalid != invalid) {
			ret = -EAGAIN;
			goto unlock_out;
		}
		mem->invalid = 0;
	}

	return 0;
unlock_out:
	mutex_unlock(&process_info->notifier_lock);

	return ret;
}

/* Validate invalid userptr BOs
 *
 * Validates BOs on the userptr_inval_list, and moves them back to the
 * userptr_valid_list. Also updates GPUVM page tables with new page
 * addresses and waits for the page table updates to complete.
 * Validates BOs on the userptr_inval_list. Also updates GPUVM page tables
 * with new page addresses and waits for the page table updates to complete.
 */
static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
{
@@ -2439,9 +2487,6 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
			}
		}

		list_move_tail(&mem->validate_list.head,
			       &process_info->userptr_valid_list);

		/* Update mapping. If the BO was not validated
		 * (because we couldn't get user pages), this will
		 * clear the page table entries, which will result in
@@ -2457,7 +2502,9 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
			if (ret) {
				pr_err("%s: update PTE failed\n", __func__);
				/* make sure this gets validated again */
				atomic_inc(&mem->invalid);
				mutex_lock(&process_info->notifier_lock);
				mem->invalid++;
				mutex_unlock(&process_info->notifier_lock);
				goto unreserve_out;
			}
		}
@@ -2477,6 +2524,36 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
	return ret;
}

/* Confirm that all user pages are valid while holding the notifier lock
 *
 * Moves valid BOs from the userptr_inval_list back to userptr_val_list.
 */
static int confirm_valid_user_pages_locked(struct amdkfd_process_info *process_info)
{
	struct kgd_mem *mem, *tmp_mem;
	int ret = 0;

	list_for_each_entry_safe(mem, tmp_mem,
				 &process_info->userptr_inval_list,
				 validate_list.head) {
		bool valid = amdgpu_ttm_tt_get_user_pages_done(
				mem->bo->tbo.ttm, mem->range);

		mem->range = NULL;
		if (!valid) {
			WARN(!mem->invalid, "Invalid BO not marked invalid");
			ret = -EAGAIN;
			continue;
		}
		WARN(mem->invalid, "Valid BO is marked invalid");

		list_move_tail(&mem->validate_list.head,
			       &process_info->userptr_valid_list);
	}

	return ret;
}

/* Worker callback to restore evicted userptr BOs
 *
 * Tries to update and validate all userptr BOs. If successful and no
@@ -2491,9 +2568,11 @@ static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
			     restore_userptr_work);
	struct task_struct *usertask;
	struct mm_struct *mm;
	int evicted_bos;
	uint32_t evicted_bos;

	evicted_bos = atomic_read(&process_info->evicted_bos);
	mutex_lock(&process_info->notifier_lock);
	evicted_bos = process_info->evicted_bos;
	mutex_unlock(&process_info->notifier_lock);
	if (!evicted_bos)
		return;

@@ -2516,9 +2595,6 @@ static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
	 * and we can just restart the queues.
	 */
	if (!list_empty(&process_info->userptr_inval_list)) {
		if (atomic_read(&process_info->evicted_bos) != evicted_bos)
			goto unlock_out; /* Concurrent eviction, try again */

		if (validate_invalid_user_pages(process_info))
			goto unlock_out;
	}
@@ -2527,10 +2603,17 @@ static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
	 * be a first eviction that calls quiesce_mm. The eviction
	 * reference counting inside KFD will handle this case.
	 */
	if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) !=
	    evicted_bos)
		goto unlock_out;
	evicted_bos = 0;
	mutex_lock(&process_info->notifier_lock);
	if (process_info->evicted_bos != evicted_bos)
		goto unlock_notifier_out;

	if (confirm_valid_user_pages_locked(process_info)) {
		WARN(1, "User pages unexpectedly invalid");
		goto unlock_notifier_out;
	}

	process_info->evicted_bos = evicted_bos = 0;

	if (kgd2kfd_resume_mm(mm)) {
		pr_err("%s: Failed to resume KFD\n", __func__);
		/* No recovery from this failure. Probably the CP is
@@ -2538,6 +2621,8 @@ static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
		 */
	}

unlock_notifier_out:
	mutex_unlock(&process_info->notifier_lock);
unlock_out:
	mutex_unlock(&process_info->lock);

+8 −2
Original line number Diff line number Diff line
@@ -3016,14 +3016,15 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
			continue;
		}

		/* skip suspend of gfx and psp for S0ix
		/* skip suspend of gfx/mes and psp for S0ix
		 * gfx is in gfxoff state, so on resume it will exit gfxoff just
		 * like at runtime. PSP is also part of the always on hardware
		 * so no need to suspend it.
		 */
		if (adev->in_s0ix &&
		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX))
		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_MES))
			continue;

		/* XXX handle errors */
@@ -4112,6 +4113,11 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)

	adev->in_suspend = true;

	/* Evict the majority of BOs before grabbing the full access */
	r = amdgpu_device_evict_resources(adev);
	if (r)
		return r;

	if (amdgpu_sriov_vf(adev)) {
		amdgpu_virt_fini_data_exchange(adev);
		r = amdgpu_virt_request_full_gpu(adev, false);
+11 −2
Original line number Diff line number Diff line
@@ -2039,6 +2039,15 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
			 "See modparam exp_hw_support\n");
		return -ENODEV;
	}
	/* differentiate between P10 and P11 asics with the same DID */
	if (pdev->device == 0x67FF &&
	    (pdev->revision == 0xE3 ||
	     pdev->revision == 0xE7 ||
	     pdev->revision == 0xF3 ||
	     pdev->revision == 0xF7)) {
		flags &= ~AMD_ASIC_MASK;
		flags |= CHIP_POLARIS10;
	}

	/* Due to hardware bugs, S/G Display on raven requires a 1:1 IOMMU mapping,
	 * however, SME requires an indirect IOMMU mapping because the encryption
@@ -2108,12 +2117,12 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,

	pci_set_drvdata(pdev, ddev);

	ret = amdgpu_driver_load_kms(adev, ent->driver_data);
	ret = amdgpu_driver_load_kms(adev, flags);
	if (ret)
		goto err_pci;

retry_init:
	ret = drm_dev_register(ddev, ent->driver_data);
	ret = drm_dev_register(ddev, flags);
	if (ret == -EAGAIN && ++retry <= 3) {
		DRM_INFO("retry init %d\n", retry);
		/* Don't request EX mode too frequently which is attacking */
+4 −2
Original line number Diff line number Diff line
@@ -64,6 +64,7 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev, u32 *fru_addr)
			    sizeof(atom_ctx->vbios_version)) ||
		    strnstr(atom_ctx->vbios_version, "D163",
			    sizeof(atom_ctx->vbios_version))) {
			if (fru_addr)
				*fru_addr = FRU_EEPROM_MADDR_6;
			return true;
		} else {
@@ -83,6 +84,7 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev, u32 *fru_addr)
				    sizeof(atom_ctx->vbios_version))) {
				return false;
			} else {
				if (fru_addr)
					*fru_addr = FRU_EEPROM_MADDR_6;
				return true;
			}
Loading