Commit 45be2048 authored by Dave Airlie's avatar Dave Airlie
Browse files

Merge tag 'amd-drm-next-6.3-2023-01-06' of https://gitlab.freedesktop.org/agd5f/linux into drm-next

amd-drm-next-6.3-2023-01-06:

amdgpu:
- secure display support for multiple displays
- DML optimizations
- DCN 3.2 updates
- PSR updates
- DP 2.1 updates
- SR-IOV RAS updates
- VCN RAS support
- SMU 13.x updates
- Switch 1 element arrays to flexible arrays
- Add RAS support for DF 4.3
- Stack size improvements
- S0ix rework
- Soft reset fix
- Allow 0 as a vram limit on APUs
- Display fixes
- Misc code cleanups
- Documentation fixes
- Handle profiling modes for SMU13.x

amdkfd:
- Error handling fixes
- PASID fixes

radeon:
- Switch 1 element arrays to flexible arrays

drm:
- Add DP adaptive sync DPCD definitions

UAPI:
- Add new INFO queries for peak and min sclk/mclk for profile modes on newer chips
  Proposed mesa patch: https://gitlab.freedesktop.org/mesa/drm/-/merge_requests/278



Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230106222037.7870-1-alexander.deucher@amd.com
parents 03a0a104 f6e856e7
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -81,7 +81,8 @@ amdgpu-y += \
# add DF block
amdgpu-y += \
	df_v1_7.o \
	df_v3_6.o
	df_v3_6.o \
	df_v4_3.o

# add GMC block
amdgpu-y += \
+10 −4
Original line number Diff line number Diff line
@@ -149,7 +149,7 @@ struct amdgpu_watchdog_timer
 * Modules parameters.
 */
extern int amdgpu_modeset;
extern int amdgpu_vram_limit;
extern unsigned int amdgpu_vram_limit;
extern int amdgpu_vis_vram_limit;
extern int amdgpu_gart_size;
extern int amdgpu_gtt_size;
@@ -194,6 +194,7 @@ extern int amdgpu_emu_mode;
extern uint amdgpu_smu_memory_pool_size;
extern int amdgpu_smu_pptable_id;
extern uint amdgpu_dc_feature_mask;
extern uint amdgpu_freesync_vid_mode;
extern uint amdgpu_dc_debug_mask;
extern uint amdgpu_dc_visual_confirm;
extern uint amdgpu_dm_abm_level;
@@ -607,7 +608,7 @@ int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
				struct drm_file *filp);

/* VRAM scratch page for HDP bug, default vram page */
struct amdgpu_vram_scratch {
struct amdgpu_mem_scratch {
	struct amdgpu_bo		*robj;
	volatile uint32_t		*ptr;
	u64				gpu_addr;
@@ -754,6 +755,11 @@ struct amdgpu_mqd {
#define AMDGPU_PRODUCT_NAME_LEN 64
struct amdgpu_reset_domain;

/*
 * Non-zero (true) if the GPU has VRAM. Zero (false) otherwise.
 */
#define AMDGPU_HAS_VRAM(_adev) ((_adev)->gmc.real_vram_size)

struct amdgpu_device {
	struct device			*dev;
	struct pci_dev			*pdev;
@@ -847,7 +853,7 @@ struct amdgpu_device {

	/* memory management */
	struct amdgpu_mman		mman;
	struct amdgpu_vram_scratch	vram_scratch;
	struct amdgpu_mem_scratch	mem_scratch;
	struct amdgpu_wb		wb;
	atomic64_t			num_bytes_moved;
	atomic64_t			num_evictions;
@@ -869,7 +875,7 @@ struct amdgpu_device {
	struct amdgpu_vkms_output       *amdgpu_vkms_output;
	struct amdgpu_mode_info		mode_info;
	/* For pre-DCE11. DCE11 and later are in "struct amdgpu_device->dm" */
	struct work_struct		hotplug_work;
	struct delayed_work         hotplug_work;
	struct amdgpu_irq_src		crtc_irq;
	struct amdgpu_irq_src		vline0_irq;
	struct amdgpu_irq_src		vupdate_irq;
+21 −1
Original line number Diff line number Diff line
@@ -996,13 +996,33 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
		}
	}

	if (amdgpu_connector->detected_hpd_without_ddc) {
		force = true;
		amdgpu_connector->detected_hpd_without_ddc = false;
	}

	if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) {
		ret = connector->status;
		goto exit;
	}

	if (amdgpu_connector->ddc_bus)
	if (amdgpu_connector->ddc_bus) {
		dret = amdgpu_display_ddc_probe(amdgpu_connector, false);

		/* Sometimes the pins required for the DDC probe on DVI
		 * connectors don't make contact at the same time that the ones
		 * for HPD do. If the DDC probe fails even though we had an HPD
		 * signal, try again later
		 */
		if (!dret && !force &&
		    amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd)) {
			DRM_DEBUG_KMS("hpd detected without ddc, retrying in 1 second\n");
			amdgpu_connector->detected_hpd_without_ddc = true;
			schedule_delayed_work(&adev->hotplug_work,
					      msecs_to_jiffies(1000));
			goto exit;
		}
	}
	if (dret) {
		amdgpu_connector->detected_by_load = false;
		amdgpu_connector_free_edid(connector);
+31 −41
Original line number Diff line number Diff line
@@ -924,32 +924,33 @@ static int amdgpu_device_asic_init(struct amdgpu_device *adev)
}

/**
 * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
 * amdgpu_device_mem_scratch_init - allocate the VRAM scratch page
 *
 * @adev: amdgpu_device pointer
 *
 * Allocates a scratch page of VRAM for use by various things in the
 * driver.
 */
static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
static int amdgpu_device_mem_scratch_init(struct amdgpu_device *adev)
{
	return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
				       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
				       &adev->vram_scratch.robj,
				       &adev->vram_scratch.gpu_addr,
				       (void **)&adev->vram_scratch.ptr);
	return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, PAGE_SIZE,
				       AMDGPU_GEM_DOMAIN_VRAM |
				       AMDGPU_GEM_DOMAIN_GTT,
				       &adev->mem_scratch.robj,
				       &adev->mem_scratch.gpu_addr,
				       (void **)&adev->mem_scratch.ptr);
}

/**
 * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
 * amdgpu_device_mem_scratch_fini - Free the VRAM scratch page
 *
 * @adev: amdgpu_device pointer
 *
 * Frees the VRAM scratch page.
 */
static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
static void amdgpu_device_mem_scratch_fini(struct amdgpu_device *adev)
{
	amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
	amdgpu_bo_free_kernel(&adev->mem_scratch.robj, NULL, NULL);
}

/**
@@ -2390,9 +2391,9 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
			if (amdgpu_sriov_vf(adev))
				amdgpu_virt_exchange_data(adev);

			r = amdgpu_device_vram_scratch_init(adev);
			r = amdgpu_device_mem_scratch_init(adev);
			if (r) {
				DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
				DRM_ERROR("amdgpu_mem_scratch_init failed %d\n", r);
				goto init_failed;
			}
			r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
@@ -2410,7 +2411,8 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
			/* right after GMC hw init, we create CSA */
			if (amdgpu_mcbp) {
				r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
								AMDGPU_GEM_DOMAIN_VRAM,
							       AMDGPU_GEM_DOMAIN_VRAM |
							       AMDGPU_GEM_DOMAIN_GTT,
							       AMDGPU_CSA_SIZE);
				if (r) {
					DRM_ERROR("allocate CSA failed %d\n", r);
@@ -2581,9 +2583,10 @@ int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
		i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
		if (!adev->ip_blocks[i].status.late_initialized)
			continue;
		/* skip CG for GFX on S0ix */
		/* skip CG for GFX, SDMA on S0ix */
		if (adev->in_s0ix &&
		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
			continue;
		/* skip CG for VCE/UVD, it's handled specially */
		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
@@ -2617,9 +2620,10 @@ int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
		i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
		if (!adev->ip_blocks[i].status.late_initialized)
			continue;
		/* skip PG for GFX on S0ix */
		/* skip PG for GFX, SDMA on S0ix */
		if (adev->in_s0ix &&
		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
			continue;
		/* skip CG for VCE/UVD, it's handled specially */
		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
@@ -2871,7 +2875,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
			amdgpu_ucode_free_bo(adev);
			amdgpu_free_static_csa(&adev->virt.csa_obj);
			amdgpu_device_wb_fini(adev);
			amdgpu_device_vram_scratch_fini(adev);
			amdgpu_device_mem_scratch_fini(adev);
			amdgpu_ib_pool_fini(adev);
		}

@@ -3027,6 +3031,12 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_MES))
			continue;

		/* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */
		if (adev->in_s0ix &&
		    (adev->ip_versions[SDMA0_HWIP][0] >= IP_VERSION(5, 0, 0)) &&
		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
			continue;

		/* XXX handle errors */
		r = adev->ip_blocks[i].version->funcs->suspend(adev);
		/* XXX handle errors */
@@ -3227,15 +3237,6 @@ static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
			return r;
		}
		adev->ip_blocks[i].status.hw = true;

		if (adev->in_s0ix && adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
			/* disable gfxoff for IP resume. The gfxoff will be re-enabled in
			 * amdgpu_device_resume() after IP resume.
			 */
			amdgpu_gfx_off_ctrl(adev, false);
			DRM_DEBUG("will disable gfxoff for re-initializing other blocks\n");
		}

	}

	return 0;
@@ -4221,13 +4222,6 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
	/* Make sure IB tests flushed */
	flush_delayed_work(&adev->delayed_init_work);

	if (adev->in_s0ix) {
		/* re-enable gfxoff after IP resume. This re-enables gfxoff after
		 * it was disabled for IP resume in amdgpu_device_ip_resume_phase2().
		 */
		amdgpu_gfx_off_ctrl(adev, true);
		DRM_DEBUG("will enable gfxoff for the mission mode\n");
	}
	if (fbcon)
		drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);

@@ -4608,11 +4602,6 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
	if (!amdgpu_ras_is_poison_mode_supported(adev))
		return true;

	if (!amdgpu_device_ip_check_soft_reset(adev)) {
		dev_info(adev->dev,"Timeout, but no hardware hang detected.\n");
		return false;
	}

	if (amdgpu_sriov_vf(adev))
		return true;

@@ -4737,7 +4726,8 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
		if (!need_full_reset)
			need_full_reset = amdgpu_device_ip_need_full_reset(adev);

		if (!need_full_reset && amdgpu_gpu_recovery) {
		if (!need_full_reset && amdgpu_gpu_recovery &&
		    amdgpu_device_ip_check_soft_reset(adev)) {
			amdgpu_device_ip_pre_soft_reset(adev);
			r = amdgpu_device_ip_soft_reset(adev);
			amdgpu_device_ip_post_soft_reset(adev);
+4 −0
Original line number Diff line number Diff line
@@ -33,6 +33,7 @@
#include "gmc_v9_0.h"
#include "df_v1_7.h"
#include "df_v3_6.h"
#include "df_v4_3.h"
#include "nbio_v6_1.h"
#include "nbio_v7_0.h"
#include "nbio_v7_4.h"
@@ -2329,6 +2330,9 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
	case IP_VERSION(3, 5, 2):
		adev->df.funcs = &df_v1_7_funcs;
		break;
	case IP_VERSION(4, 3, 0):
		adev->df.funcs = &df_v4_3_funcs;
		break;
	default:
		break;
	}
Loading