Commit 4efdddbc authored by Daniel Vetter's avatar Daniel Vetter
Browse files

Merge tag 'amd-drm-next-5.17-2022-01-12' of...

Merge tag 'amd-drm-next-5.17-2022-01-12' of https://gitlab.freedesktop.org/agd5f/linux

 into drm-next

amd-drm-next-5.17-2022-01-12:

amdgpu:
- SR-IOV fixes
- Suspend/resume fixes
- Display fixes
- DMCUB fixes
- DP alt mode fixes
- RAS fixes
- UBSAN fix
- Navy Flounder VCN fix
- ttm resource manager cleanup
- default_groups change for kobj_type
- vkms fix
- Aldebaran fixes

amdkfd:
- SDMA ECC interrupt fix
- License clarification
- Pointer check fix
- DQM fixes for hawaii
- default_groups change for kobj_type
- Typo fixes

Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220113030537.5758-1-alexander.deucher@amd.com
parents 820e690e 5eb877b2
Loading
Loading
Loading
Loading
+0 −7
Original line number Diff line number Diff line
@@ -514,13 +514,6 @@ int amdgpu_amdkfd_get_dmabuf_info(struct amdgpu_device *adev, int dma_buf_fd,
	return r;
}

uint64_t amdgpu_amdkfd_get_vram_usage(struct amdgpu_device *adev)
{
	struct ttm_resource_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);

	return amdgpu_vram_mgr_usage(vram_man);
}

uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct amdgpu_device *dst,
					  struct amdgpu_device *src)
{
+0 −1
Original line number Diff line number Diff line
@@ -223,7 +223,6 @@ int amdgpu_amdkfd_get_dmabuf_info(struct amdgpu_device *adev, int dma_buf_fd,
				  uint64_t *bo_size, void *metadata_buffer,
				  size_t buffer_size, uint32_t *metadata_size,
				  uint32_t *flags);
uint64_t amdgpu_amdkfd_get_vram_usage(struct amdgpu_device *adev);
uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct amdgpu_device *dst,
					  struct amdgpu_device *src);
int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct amdgpu_device *dst,
+2 −3
Original line number Diff line number Diff line
@@ -298,7 +298,6 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
{
	s64 time_us, increment_us;
	u64 free_vram, total_vram, used_vram;
	struct ttm_resource_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
	/* Allow a maximum of 200 accumulated ms. This is basically per-IB
	 * throttling.
	 *
@@ -315,7 +314,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
	}

	total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size);
	used_vram = amdgpu_vram_mgr_usage(vram_man);
	used_vram = amdgpu_vram_mgr_usage(&adev->mman.vram_mgr);
	free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;

	spin_lock(&adev->mm_stats.lock);
@@ -362,7 +361,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
	if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) {
		u64 total_vis_vram = adev->gmc.visible_vram_size;
		u64 used_vis_vram =
		  amdgpu_vram_mgr_vis_usage(vram_man);
		  amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr);

		if (used_vis_vram < total_vis_vram) {
			u64 free_vis_vram = total_vis_vram - used_vis_vram;
+18 −18
Original line number Diff line number Diff line
@@ -552,7 +552,7 @@ void amdgpu_device_wreg(struct amdgpu_device *adev,
}

/**
 * amdgpu_mm_wreg_mmio_rlc -  write register either with mmio or with RLC path if in range
 * amdgpu_mm_wreg_mmio_rlc -  write register either with direct/indirect mmio or with RLC path if in range
 *
 * this function is invoked only the debugfs register access
 */
@@ -567,6 +567,8 @@ void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
	    adev->gfx.rlc.funcs->is_rlcg_access_range) {
		if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
			return adev->gfx.rlc.funcs->sriov_wreg(adev, reg, v, 0, 0);
	} else if ((reg * 4) >= adev->rmmio_size) {
		adev->pcie_wreg(adev, reg * 4, v);
	} else {
		writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
	}
@@ -1448,7 +1450,7 @@ static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
			adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
		break;
	default:
		return -EINVAL;
		break;
	}

	return 0;
@@ -3496,9 +3498,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
	mutex_init(&adev->psp.mutex);
	mutex_init(&adev->notifier_lock);

	r = amdgpu_device_init_apu_flags(adev);
	if (r)
		return r;
	 amdgpu_device_init_apu_flags(adev);

	r = amdgpu_device_check_arguments(adev);
	if (r)
@@ -3833,6 +3833,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,

static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
{

	/* Clear all CPU mappings pointing to this device */
	unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);

@@ -3913,6 +3914,8 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)

void amdgpu_device_fini_sw(struct amdgpu_device *adev)
{
	int idx;

	amdgpu_fence_driver_sw_fini(adev);
	amdgpu_device_ip_fini(adev);
	release_firmware(adev->firmware.gpu_info_fw);
@@ -3937,6 +3940,14 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
	if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
		vga_client_unregister(adev->pdev);

	if (drm_dev_enter(adev_to_drm(adev), &idx)) {

		iounmap(adev->rmmio);
		adev->rmmio = NULL;
		amdgpu_device_doorbell_fini(adev);
		drm_dev_exit(idx);
	}

	if (IS_ENABLED(CONFIG_PERF_EVENTS))
		amdgpu_pmu_fini(adev);
	if (adev->mman.discovery_bin)
@@ -3957,8 +3968,8 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
 */
static void amdgpu_device_evict_resources(struct amdgpu_device *adev)
{
	/* No need to evict vram on APUs for suspend to ram */
	if (adev->in_s3 && (adev->flags & AMD_IS_APU))
	/* No need to evict vram on APUs for suspend to ram or s2idle */
	if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU))
		return;

	if (amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM))
@@ -4005,16 +4016,11 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
	if (!adev->in_s0ix)
		amdgpu_amdkfd_suspend(adev, adev->in_runpm);

	/* First evict vram memory */
	amdgpu_device_evict_resources(adev);

	amdgpu_fence_driver_hw_fini(adev);

	amdgpu_device_ip_suspend_phase2(adev);
	/* This second call to evict device resources is to evict
	 * the gart page table using the CPU.
	 */
	amdgpu_device_evict_resources(adev);

	return 0;
}
@@ -4359,8 +4365,6 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
		goto error;

	amdgpu_virt_init_data_exchange(adev);
	/* we need recover gart prior to run SMC/CP/SDMA resume */
	amdgpu_gtt_mgr_recover(ttm_manager_type(&adev->mman.bdev, TTM_PL_TT));

	r = amdgpu_device_fw_loading(adev);
	if (r)
@@ -4680,10 +4684,6 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
					amdgpu_inc_vram_lost(tmp_adev);
				}

				r = amdgpu_gtt_mgr_recover(ttm_manager_type(&tmp_adev->mman.bdev, TTM_PL_TT));
				if (r)
					goto out;

				r = amdgpu_device_fw_loading(tmp_adev);
				if (r)
					return r;
+2 −1
Original line number Diff line number Diff line
@@ -550,7 +550,8 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
	}
	/* some IP discovery tables on Navy Flounder don't have this set correctly */
	if ((adev->ip_versions[UVD_HWIP][1] == IP_VERSION(3, 0, 1)) &&
	    (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 2)))
	    (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 2)) &&
	    (adev->pdev->revision != 0xFF))
		adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
	if (vcn_harvest_count == adev->vcn.num_vcn_inst) {
		adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
Loading