Commit ebfc2533 authored by Evan Quan's avatar Evan Quan Committed by Alex Deucher
Browse files

drm/amd/pm: do not expose the smu_context structure used internally in power



This can cover the power implementation details. And as what did for
powerplay framework, we hook the smu_context to adev->powerplay.pp_handle.

Signed-off-by: default avatarEvan Quan <evan.quan@amd.com>
Reviewed-by: default avatarLijo Lazar <lijo.lazar@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 7689dab4
Loading
Loading
Loading
Loading
+0 −6
Original line number Diff line number Diff line
@@ -99,7 +99,6 @@
#include "amdgpu_gem.h"
#include "amdgpu_doorbell.h"
#include "amdgpu_amdkfd.h"
#include "amdgpu_smu.h"
#include "amdgpu_discovery.h"
#include "amdgpu_mes.h"
#include "amdgpu_umc.h"
@@ -949,11 +948,6 @@ struct amdgpu_device {

	/* powerplay */
	struct amd_powerplay		powerplay;

	/* smu */
	struct smu_context		smu;

	/* dpm */
	struct amdgpu_pm		pm;
	u32				cg_flags;
	u32				pg_flags;
+3 −0
Original line number Diff line number Diff line
@@ -25,6 +25,9 @@
#define __KGD_PP_INTERFACE_H__

extern const struct amdgpu_ip_block_version pp_smu_ip_block;
extern const struct amdgpu_ip_block_version smu_v11_0_ip_block;
extern const struct amdgpu_ip_block_version smu_v12_0_ip_block;
extern const struct amdgpu_ip_block_version smu_v13_0_ip_block;

enum smu_event_type {
	SMU_EVENT_RESET_COMPLETE = 0,
+34 −16
Original line number Diff line number Diff line
@@ -31,6 +31,7 @@
#include "amdgpu_display.h"
#include "hwmgr.h"
#include <linux/power_supply.h>
#include "amdgpu_smu.h"

#define amdgpu_dpm_enable_bapm(adev, e) \
		((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e)))
@@ -213,7 +214,7 @@ int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)

bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev)
{
	struct smu_context *smu = &adev->smu;
	struct smu_context *smu = adev->powerplay.pp_handle;

	if (is_support_sw_smu(adev))
		return smu_mode1_reset_is_support(smu);
@@ -223,7 +224,7 @@ bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev)

int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
{
	struct smu_context *smu = &adev->smu;
	struct smu_context *smu = adev->powerplay.pp_handle;

	if (is_support_sw_smu(adev))
		return smu_mode1_reset(smu);
@@ -276,7 +277,7 @@ int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,

int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en)
{
	struct smu_context *smu = &adev->smu;
	struct smu_context *smu = adev->powerplay.pp_handle;

	if (is_support_sw_smu(adev))
		return smu_allow_xgmi_power_down(smu, en);
@@ -341,7 +342,7 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
		mutex_unlock(&adev->pm.mutex);

		if (is_support_sw_smu(adev))
			smu_set_ac_dc(&adev->smu);
			smu_set_ac_dc(adev->powerplay.pp_handle);
	}
}

@@ -426,12 +427,14 @@ int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_versio

int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable)
{
	return smu_handle_passthrough_sbr(&adev->smu, enable);
	return smu_handle_passthrough_sbr(adev->powerplay.pp_handle, enable);
}

int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size)
{
	return smu_send_hbm_bad_pages_num(&adev->smu, size);
	struct smu_context *smu = adev->powerplay.pp_handle;

	return smu_send_hbm_bad_pages_num(smu, size);
}

int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
@@ -444,7 +447,7 @@ int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,

	switch (type) {
	case PP_SCLK:
		return smu_get_dpm_freq_range(&adev->smu, SMU_SCLK, min, max);
		return smu_get_dpm_freq_range(adev->powerplay.pp_handle, SMU_SCLK, min, max);
	default:
		return -EINVAL;
	}
@@ -455,12 +458,14 @@ int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev,
				   uint32_t min,
				   uint32_t max)
{
	struct smu_context *smu = adev->powerplay.pp_handle;

	if (!is_support_sw_smu(adev))
		return -EOPNOTSUPP;

	switch (type) {
	case PP_SCLK:
		return smu_set_soft_freq_range(&adev->smu, SMU_SCLK, min, max);
		return smu_set_soft_freq_range(smu, SMU_SCLK, min, max);
	default:
		return -EINVAL;
	}
@@ -468,33 +473,41 @@ int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev,

int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev)
{
	struct smu_context *smu = adev->powerplay.pp_handle;

	if (!is_support_sw_smu(adev))
		return 0;

	return smu_write_watermarks_table(&adev->smu);
	return smu_write_watermarks_table(smu);
}

int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev,
			      enum smu_event_type event,
			      uint64_t event_arg)
{
	struct smu_context *smu = adev->powerplay.pp_handle;

	if (!is_support_sw_smu(adev))
		return -EOPNOTSUPP;

	return smu_wait_for_event(&adev->smu, event, event_arg);
	return smu_wait_for_event(smu, event, event_arg);
}

int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value)
{
	struct smu_context *smu = adev->powerplay.pp_handle;

	if (!is_support_sw_smu(adev))
		return -EOPNOTSUPP;

	return smu_get_status_gfxoff(&adev->smu, value);
	return smu_get_status_gfxoff(smu, value);
}

uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev)
{
	return atomic64_read(&adev->smu.throttle_int_counter);
	struct smu_context *smu = adev->powerplay.pp_handle;

	return atomic64_read(&smu->throttle_int_counter);
}

/* amdgpu_dpm_gfx_state_change - Handle gfx power state change set
@@ -516,10 +529,12 @@ void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev,
int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev,
			    void *umc_ecc)
{
	struct smu_context *smu = adev->powerplay.pp_handle;

	if (!is_support_sw_smu(adev))
		return -EOPNOTSUPP;

	return smu_get_ecc_info(&adev->smu, umc_ecc);
	return smu_get_ecc_info(smu, umc_ecc);
}

struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev,
@@ -943,9 +958,10 @@ int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev,
int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev)
{
	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
	struct smu_context *smu = adev->powerplay.pp_handle;

	if ((is_support_sw_smu(adev) && adev->smu.od_enabled) ||
	    (is_support_sw_smu(adev) && adev->smu.is_apu) ||
	if ((is_support_sw_smu(adev) && smu->od_enabled) ||
	    (is_support_sw_smu(adev) && smu->is_apu) ||
		(!is_support_sw_smu(adev) && hwmgr->od_enabled))
		return true;

@@ -968,7 +984,9 @@ int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,

int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev)
{
	return adev->smu.cpu_core_num;
	struct smu_context *smu = adev->powerplay.pp_handle;

	return smu->cpu_core_num;
}

void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev)
+1 −1
Original line number Diff line number Diff line
@@ -2869,7 +2869,7 @@ static ssize_t amdgpu_hwmon_show_power_label(struct device *dev,
	int limit_type = to_sensor_dev_attr(attr)->index;

	return sysfs_emit(buf, "%s\n",
		limit_type == SMU_FAST_PPT_LIMIT ? "fastPPT" : "slowPPT");
		limit_type == PP_PWR_TYPE_FAST ? "fastPPT" : "slowPPT");
}

static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
+0 −4
Original line number Diff line number Diff line
@@ -1390,10 +1390,6 @@ int smu_mode1_reset(struct smu_context *smu);

extern const struct amd_ip_funcs smu_ip_funcs;

extern const struct amdgpu_ip_block_version smu_v11_0_ip_block;
extern const struct amdgpu_ip_block_version smu_v12_0_ip_block;
extern const struct amdgpu_ip_block_version smu_v13_0_ip_block;

bool is_support_sw_smu(struct amdgpu_device *adev);
bool is_support_cclk_dpm(struct amdgpu_device *adev);
int smu_write_watermarks_table(struct smu_context *smu);
Loading