Commit 38d741cb authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'drm-fixes-2022-04-29' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "Another relatively quiet week, amdgpu leads the way, some i915 display
  fixes, and a single sunxi fix.

  amdgpu:
   - Runtime pm fix
   - DCN memory leak fix in error path
   - SI DPM deadlock fix
   - S0ix fix

  amdkfd:
   - GWS fix
   - GWS support for CRIU

  i915:
   - Fix #5284: Backlight control regression on XMG Core 15 e21
   - Fix black display plane on Acer One AO532h
   - Two smaller display fixes

  sunxi:
   - Single fix removing applying PHYS_OFFSET twice"

* tag 'drm-fixes-2022-04-29' of git://anongit.freedesktop.org/drm/drm:
  drm/amdgpu: keep mmhub clock gating being enabled during s2idle suspend
  drm/amd/pm: fix the deadlock issue observed on SI
  drm/amd/display: Fix memory leak in dcn21_clock_source_create
  drm/amdgpu: don't runtime suspend if there are displays attached (v3)
  drm/amdkfd: CRIU add support for GWS queues
  drm/amdkfd: Fix GWS queue count
  drm/sun4i: Remove obsolete references to PHYS_OFFSET
  drm/i915/fbc: Consult hw.crtc instead of uapi.crtc
  drm/i915: Fix SEL_FETCH_PLANE_*(PIPE_B+) register addresses
  drm/i915: Check EDID for HDR static metadata when choosing blc
  drm/i915: Fix DISP_POS_Y and DISP_HEIGHT defines
parents 249aca0d 9d9f7207
Loading
Loading
Loading
Loading
+70 −35
Original line number Diff line number Diff line
@@ -2395,6 +2395,71 @@ static int amdgpu_pmops_restore(struct device *dev)
	return amdgpu_device_resume(drm_dev, true);
}

static int amdgpu_runtime_idle_check_display(struct device *dev)
{
	struct pci_dev *pdev = to_pci_dev(dev);
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
	struct amdgpu_device *adev = drm_to_adev(drm_dev);

	if (adev->mode_info.num_crtc) {
		struct drm_connector *list_connector;
		struct drm_connector_list_iter iter;
		int ret = 0;

		/* XXX: Return busy if any displays are connected to avoid
		 * possible display wakeups after runtime resume due to
		 * hotplug events in case any displays were connected while
		 * the GPU was in suspend.  Remove this once that is fixed.
		 */
		mutex_lock(&drm_dev->mode_config.mutex);
		drm_connector_list_iter_begin(drm_dev, &iter);
		drm_for_each_connector_iter(list_connector, &iter) {
			if (list_connector->status == connector_status_connected) {
				ret = -EBUSY;
				break;
			}
		}
		drm_connector_list_iter_end(&iter);
		mutex_unlock(&drm_dev->mode_config.mutex);

		if (ret)
			return ret;

		if (amdgpu_device_has_dc_support(adev)) {
			struct drm_crtc *crtc;

			drm_for_each_crtc(crtc, drm_dev) {
				drm_modeset_lock(&crtc->mutex, NULL);
				if (crtc->state->active)
					ret = -EBUSY;
				drm_modeset_unlock(&crtc->mutex);
				if (ret < 0)
					break;
			}
		} else {
			mutex_lock(&drm_dev->mode_config.mutex);
			drm_modeset_lock(&drm_dev->mode_config.connection_mutex, NULL);

			drm_connector_list_iter_begin(drm_dev, &iter);
			drm_for_each_connector_iter(list_connector, &iter) {
				if (list_connector->dpms ==  DRM_MODE_DPMS_ON) {
					ret = -EBUSY;
					break;
				}
			}

			drm_connector_list_iter_end(&iter);

			drm_modeset_unlock(&drm_dev->mode_config.connection_mutex);
			mutex_unlock(&drm_dev->mode_config.mutex);
		}
		if (ret)
			return ret;
	}

	return 0;
}

static int amdgpu_pmops_runtime_suspend(struct device *dev)
{
	struct pci_dev *pdev = to_pci_dev(dev);
@@ -2407,6 +2472,10 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
		return -EBUSY;
	}

	ret = amdgpu_runtime_idle_check_display(dev);
	if (ret)
		return ret;

	/* wait for all rings to drain before suspending */
	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
		struct amdgpu_ring *ring = adev->rings[i];
@@ -2516,41 +2585,7 @@ static int amdgpu_pmops_runtime_idle(struct device *dev)
		return -EBUSY;
	}

	if (amdgpu_device_has_dc_support(adev)) {
		struct drm_crtc *crtc;

		drm_for_each_crtc(crtc, drm_dev) {
			drm_modeset_lock(&crtc->mutex, NULL);
			if (crtc->state->active)
				ret = -EBUSY;
			drm_modeset_unlock(&crtc->mutex);
			if (ret < 0)
				break;
		}

	} else {
		struct drm_connector *list_connector;
		struct drm_connector_list_iter iter;

		mutex_lock(&drm_dev->mode_config.mutex);
		drm_modeset_lock(&drm_dev->mode_config.connection_mutex, NULL);

		drm_connector_list_iter_begin(drm_dev, &iter);
		drm_for_each_connector_iter(list_connector, &iter) {
			if (list_connector->dpms ==  DRM_MODE_DPMS_ON) {
				ret = -EBUSY;
				break;
			}
		}

		drm_connector_list_iter_end(&iter);

		drm_modeset_unlock(&drm_dev->mode_config.connection_mutex);
		mutex_unlock(&drm_dev->mode_config.mutex);
	}

	if (ret == -EBUSY)
		DRM_DEBUG_DRIVER("failing to power off - crtc active\n");
	ret = amdgpu_runtime_idle_check_display(dev);

	pm_runtime_mark_last_busy(dev);
	pm_runtime_autosuspend(dev);
+10 −0
Original line number Diff line number Diff line
@@ -1151,6 +1151,16 @@ static int gmc_v10_0_set_clockgating_state(void *handle,
	int r;
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;

	/*
	 * The issue mmhub can't disconnect from DF with MMHUB clock gating being disabled
	 * is a new problem observed at DF 3.0.3, however with the same suspend sequence not
	 * seen any issue on the DF 3.0.2 series platform.
	 */
	if (adev->in_s0ix && adev->ip_versions[DF_HWIP][0] > IP_VERSION(3, 0, 2)) {
		dev_dbg(adev->dev, "keep mmhub clock gating being enabled for s0ix\n");
		return 0;
	}

	r = adev->mmhub.funcs->set_clockgating(adev, state);
	if (r)
		return r;
+37 −46
Original line number Diff line number Diff line
@@ -130,19 +130,33 @@ void program_sh_mem_settings(struct device_queue_manager *dqm,
}

static void increment_queue_count(struct device_queue_manager *dqm,
			enum kfd_queue_type type)
				  struct qcm_process_device *qpd,
				  struct queue *q)
{
	dqm->active_queue_count++;
	if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ)
	if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
	    q->properties.type == KFD_QUEUE_TYPE_DIQ)
		dqm->active_cp_queue_count++;

	if (q->properties.is_gws) {
		dqm->gws_queue_count++;
		qpd->mapped_gws_queue = true;
	}
}

static void decrement_queue_count(struct device_queue_manager *dqm,
			enum kfd_queue_type type)
				  struct qcm_process_device *qpd,
				  struct queue *q)
{
	dqm->active_queue_count--;
	if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ)
	if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
	    q->properties.type == KFD_QUEUE_TYPE_DIQ)
		dqm->active_cp_queue_count--;

	if (q->properties.is_gws) {
		dqm->gws_queue_count--;
		qpd->mapped_gws_queue = false;
	}
}

/*
@@ -412,7 +426,7 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
	list_add(&q->list, &qpd->queues_list);
	qpd->queue_count++;
	if (q->properties.is_active)
		increment_queue_count(dqm, q->properties.type);
		increment_queue_count(dqm, qpd, q);

	/*
	 * Unconditionally increment this counter, regardless of the queue's
@@ -601,13 +615,8 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
		deallocate_vmid(dqm, qpd, q);
	}
	qpd->queue_count--;
	if (q->properties.is_active) {
		decrement_queue_count(dqm, q->properties.type);
		if (q->properties.is_gws) {
			dqm->gws_queue_count--;
			qpd->mapped_gws_queue = false;
		}
	}
	if (q->properties.is_active)
		decrement_queue_count(dqm, qpd, q);

	return retval;
}
@@ -700,12 +709,11 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q,
	 * dqm->active_queue_count to determine whether a new runlist must be
	 * uploaded.
	 */
	if (q->properties.is_active && !prev_active)
		increment_queue_count(dqm, q->properties.type);
	else if (!q->properties.is_active && prev_active)
		decrement_queue_count(dqm, q->properties.type);

	if (q->gws && !q->properties.is_gws) {
	if (q->properties.is_active && !prev_active) {
		increment_queue_count(dqm, &pdd->qpd, q);
	} else if (!q->properties.is_active && prev_active) {
		decrement_queue_count(dqm, &pdd->qpd, q);
	} else if (q->gws && !q->properties.is_gws) {
		if (q->properties.is_active) {
			dqm->gws_queue_count++;
			pdd->qpd.mapped_gws_queue = true;
@@ -767,11 +775,7 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
		mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
				q->properties.type)];
		q->properties.is_active = false;
		decrement_queue_count(dqm, q->properties.type);
		if (q->properties.is_gws) {
			dqm->gws_queue_count--;
			qpd->mapped_gws_queue = false;
		}
		decrement_queue_count(dqm, qpd, q);

		if (WARN_ONCE(!dqm->sched_running, "Evict when stopped\n"))
			continue;
@@ -817,7 +821,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
			continue;

		q->properties.is_active = false;
		decrement_queue_count(dqm, q->properties.type);
		decrement_queue_count(dqm, qpd, q);
	}
	pdd->last_evict_timestamp = get_jiffies_64();
	retval = execute_queues_cpsch(dqm,
@@ -888,11 +892,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
		mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
				q->properties.type)];
		q->properties.is_active = true;
		increment_queue_count(dqm, q->properties.type);
		if (q->properties.is_gws) {
			dqm->gws_queue_count++;
			qpd->mapped_gws_queue = true;
		}
		increment_queue_count(dqm, qpd, q);

		if (WARN_ONCE(!dqm->sched_running, "Restore when stopped\n"))
			continue;
@@ -950,7 +950,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
			continue;

		q->properties.is_active = true;
		increment_queue_count(dqm, q->properties.type);
		increment_queue_count(dqm, &pdd->qpd, q);
	}
	retval = execute_queues_cpsch(dqm,
				KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
@@ -1378,7 +1378,7 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
			dqm->total_queue_count);

	list_add(&kq->list, &qpd->priv_queue_list);
	increment_queue_count(dqm, kq->queue->properties.type);
	increment_queue_count(dqm, qpd, kq->queue);
	qpd->is_debug = true;
	execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
	dqm_unlock(dqm);
@@ -1392,7 +1392,7 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
{
	dqm_lock(dqm);
	list_del(&kq->list);
	decrement_queue_count(dqm, kq->queue->properties.type);
	decrement_queue_count(dqm, qpd, kq->queue);
	qpd->is_debug = false;
	execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
	/*
@@ -1467,7 +1467,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
	qpd->queue_count++;

	if (q->properties.is_active) {
		increment_queue_count(dqm, q->properties.type);
		increment_queue_count(dqm, qpd, q);

		execute_queues_cpsch(dqm,
				KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
@@ -1683,15 +1683,11 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
	list_del(&q->list);
	qpd->queue_count--;
	if (q->properties.is_active) {
		decrement_queue_count(dqm, q->properties.type);
		decrement_queue_count(dqm, qpd, q);
		retval = execute_queues_cpsch(dqm,
				KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
		if (retval == -ETIME)
			qpd->reset_wavefronts = true;
		if (q->properties.is_gws) {
			dqm->gws_queue_count--;
			qpd->mapped_gws_queue = false;
		}
	}

	/*
@@ -1932,7 +1928,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
	/* Clean all kernel queues */
	list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) {
		list_del(&kq->list);
		decrement_queue_count(dqm, kq->queue->properties.type);
		decrement_queue_count(dqm, qpd, kq->queue);
		qpd->is_debug = false;
		dqm->total_queue_count--;
		filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES;
@@ -1945,13 +1941,8 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
		else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
			deallocate_sdma_queue(dqm, q);

		if (q->properties.is_active) {
			decrement_queue_count(dqm, q->properties.type);
			if (q->properties.is_gws) {
				dqm->gws_queue_count--;
				qpd->mapped_gws_queue = false;
			}
		}
		if (q->properties.is_active)
			decrement_queue_count(dqm, qpd, q);

		dqm->total_queue_count--;
	}
+1 −1
Original line number Diff line number Diff line
@@ -1103,7 +1103,7 @@ struct kfd_criu_queue_priv_data {
	uint32_t priority;
	uint32_t q_percent;
	uint32_t doorbell_id;
	uint32_t is_gws;
	uint32_t gws;
	uint32_t sdma_id;
	uint32_t eop_ring_buffer_size;
	uint32_t ctx_save_restore_area_size;
+7 −3
Original line number Diff line number Diff line
@@ -636,6 +636,8 @@ static int criu_checkpoint_queue(struct kfd_process_device *pdd,
	q_data->ctx_save_restore_area_size =
		q->properties.ctx_save_restore_area_size;

	q_data->gws = !!q->gws;

	ret = pqm_checkpoint_mqd(&pdd->process->pqm, q->properties.queue_id, mqd, ctl_stack);
	if (ret) {
		pr_err("Failed checkpoint queue_mqd (%d)\n", ret);
@@ -743,7 +745,6 @@ static void set_queue_properties_from_criu(struct queue_properties *qp,
					  struct kfd_criu_queue_priv_data *q_data)
{
	qp->is_interop = false;
	qp->is_gws = q_data->is_gws;
	qp->queue_percent = q_data->q_percent;
	qp->priority = q_data->priority;
	qp->queue_address = q_data->q_address;
@@ -826,12 +827,15 @@ int kfd_criu_restore_queue(struct kfd_process *p,
				NULL);
	if (ret) {
		pr_err("Failed to create new queue err:%d\n", ret);
		ret = -EINVAL;
		goto exit;
	}

	if (q_data->gws)
		ret = pqm_set_gws(&p->pqm, q_data->q_id, pdd->dev->gws);

exit:
	if (ret)
		pr_err("Failed to create queue (%d)\n", ret);
		pr_err("Failed to restore queue (%d)\n", ret);
	else
		pr_debug("Queue id %d was restored successfully\n", queue_id);

Loading