Skip to content

Commit

Permalink
drm/amd/pm: fix and simplify workload handling
Browse files Browse the repository at this point in the history
smu->workload_mask is IP specific and should not be messed with in
the common code. The mask bits vary across SMU versions.

Move all handling of smu->workload_mask in to the backends and
simplify the code.  Store the user's preference in smu->power_profile_mode
which will be reflected in sysfs.  For internal driver profile
switches for KFD or VCN, just update the workload mask so that the
user's preference is retained.  Remove all of the extra now unused
workload related elements in the smu structure.

v2: use refcounts for workload profiles
v3: rework based on feedback from Lijo
v4: fix the refcount on failure, drop backend mask
v5: rework custom handling
v6: handle failure cleanup with custom profile
v7: Update documentation

Reviewed-by: Lijo Lazar <lijo.lazar@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Cc: Kenneth Feng <kenneth.feng@amd.com>
Cc: Lijo Lazar <lijo.lazar@amd.com>
Cc: stable@vger.kernel.org # 6.11.x
  • Loading branch information
Alex Deucher committed Dec 2, 2024
1 parent c3d06a3 commit 1443dd3
Show file tree
Hide file tree
Showing 13 changed files with 741 additions and 517 deletions.
6 changes: 5 additions & 1 deletion drivers/gpu/drm/amd/pm/amdgpu_pm.c
Original file line number Diff line number Diff line change
Expand Up @@ -1361,7 +1361,11 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
* create a custom set of heuristics, write a string of numbers to the file
* starting with the number of the custom profile along with a setting
* for each heuristic parameter. Due to differences across asic families
* the heuristic parameters vary from family to family.
* the heuristic parameters vary from family to family. Additionally,
* you can apply the custom heuristics to different clock domains. Each
* clock domain is considered a distinct operation so if you modify the
* gfxclk heuristics and then the memclk heuristics, the all of the
* custom heuristics will be retained until you switch to another profile.
*
*/

Expand Down
150 changes: 93 additions & 57 deletions drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,10 @@ static int smu_set_power_limit(void *handle, uint32_t limit);
static int smu_set_fan_speed_rpm(void *handle, uint32_t speed);
static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
static int smu_set_mp1_state(void *handle, enum pp_mp1_state mp1_state);
static void smu_power_profile_mode_get(struct smu_context *smu,
enum PP_SMC_POWER_PROFILE profile_mode);
static void smu_power_profile_mode_put(struct smu_context *smu,
enum PP_SMC_POWER_PROFILE profile_mode);

static int smu_sys_get_pp_feature_mask(void *handle,
char *buf)
Expand Down Expand Up @@ -1259,35 +1263,19 @@ static int smu_sw_init(struct amdgpu_ip_block *ip_block)
INIT_WORK(&smu->interrupt_work, smu_interrupt_work_fn);
atomic64_set(&smu->throttle_int_counter, 0);
smu->watermarks_bitmap = 0;
smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;

atomic_set(&smu->smu_power.power_gate.vcn_gated, 1);
atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1);
atomic_set(&smu->smu_power.power_gate.vpe_gated, 1);
atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1);

smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;

if (smu->is_apu ||
!smu_is_workload_profile_available(smu, PP_SMC_POWER_PROFILE_FULLSCREEN3D))
smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
else
smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D];

smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
smu->power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
smu_power_profile_mode_get(smu, smu->power_profile_mode);

smu->display_config = &adev->pm.pm_display_cfg;

smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
Expand Down Expand Up @@ -1340,6 +1328,11 @@ static int smu_sw_fini(struct amdgpu_ip_block *ip_block)
return ret;
}

if (smu->custom_profile_params) {
kfree(smu->custom_profile_params);
smu->custom_profile_params = NULL;
}

smu_fini_microcode(smu);

return 0;
Expand Down Expand Up @@ -2124,6 +2117,9 @@ static int smu_suspend(struct amdgpu_ip_block *ip_block)
if (!ret)
adev->gfx.gfx_off_entrycount = count;

/* clear this on suspend so it will get reprogrammed on resume */
smu->workload_mask = 0;

return 0;
}

Expand Down Expand Up @@ -2236,25 +2232,49 @@ static int smu_enable_umd_pstate(void *handle,
}

static int smu_bump_power_profile_mode(struct smu_context *smu,
long *param,
uint32_t param_size)
long *custom_params,
u32 custom_params_max_idx)
{
int ret = 0;
u32 workload_mask = 0;
int i, ret = 0;

for (i = 0; i < PP_SMC_POWER_PROFILE_COUNT; i++) {
if (smu->workload_refcount[i])
workload_mask |= 1 << i;
}

if (smu->workload_mask == workload_mask)
return 0;

if (smu->ppt_funcs->set_power_profile_mode)
ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size);
ret = smu->ppt_funcs->set_power_profile_mode(smu, workload_mask,
custom_params,
custom_params_max_idx);

if (!ret)
smu->workload_mask = workload_mask;

return ret;
}

static void smu_power_profile_mode_get(struct smu_context *smu,
enum PP_SMC_POWER_PROFILE profile_mode)
{
smu->workload_refcount[profile_mode]++;
}

static void smu_power_profile_mode_put(struct smu_context *smu,
enum PP_SMC_POWER_PROFILE profile_mode)
{
if (smu->workload_refcount[profile_mode])
smu->workload_refcount[profile_mode]--;
}

static int smu_adjust_power_state_dynamic(struct smu_context *smu,
enum amd_dpm_forced_level level,
bool skip_display_settings,
bool init)
bool skip_display_settings)
{
int ret = 0;
int index = 0;
long workload[1];
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);

if (!skip_display_settings) {
Expand Down Expand Up @@ -2291,14 +2311,8 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
}

if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
index = fls(smu->workload_mask);
index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
workload[0] = smu->workload_setting[index];

if (init || smu->power_profile_mode != workload[0])
smu_bump_power_profile_mode(smu, workload, 0);
}
smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
smu_bump_power_profile_mode(smu, NULL, 0);

return ret;
}
Expand All @@ -2317,13 +2331,13 @@ static int smu_handle_task(struct smu_context *smu,
ret = smu_pre_display_config_changed(smu);
if (ret)
return ret;
ret = smu_adjust_power_state_dynamic(smu, level, false, false);
ret = smu_adjust_power_state_dynamic(smu, level, false);
break;
case AMD_PP_TASK_COMPLETE_INIT:
ret = smu_adjust_power_state_dynamic(smu, level, true, true);
ret = smu_adjust_power_state_dynamic(smu, level, true);
break;
case AMD_PP_TASK_READJUST_POWER_STATE:
ret = smu_adjust_power_state_dynamic(smu, level, true, false);
ret = smu_adjust_power_state_dynamic(smu, level, true);
break;
default:
break;
Expand All @@ -2345,34 +2359,33 @@ static int smu_handle_dpm_task(void *handle,

static int smu_switch_power_profile(void *handle,
enum PP_SMC_POWER_PROFILE type,
bool en)
bool enable)
{
struct smu_context *smu = handle;
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
long workload[1];
uint32_t index;
int ret;

if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;

if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
return -EINVAL;

if (!en) {
smu->workload_mask &= ~(1 << smu->workload_prority[type]);
index = fls(smu->workload_mask);
index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
workload[0] = smu->workload_setting[index];
} else {
smu->workload_mask |= (1 << smu->workload_prority[type]);
index = fls(smu->workload_mask);
index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
workload[0] = smu->workload_setting[index];
}

if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
smu_bump_power_profile_mode(smu, workload, 0);
smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
if (enable)
smu_power_profile_mode_get(smu, type);
else
smu_power_profile_mode_put(smu, type);
ret = smu_bump_power_profile_mode(smu, NULL, 0);
if (ret) {
if (enable)
smu_power_profile_mode_put(smu, type);
else
smu_power_profile_mode_get(smu, type);
return ret;
}
}

return 0;
}
Expand Down Expand Up @@ -3064,12 +3077,35 @@ static int smu_set_power_profile_mode(void *handle,
uint32_t param_size)
{
struct smu_context *smu = handle;
bool custom = false;
int ret = 0;

if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
!smu->ppt_funcs->set_power_profile_mode)
return -EOPNOTSUPP;

return smu_bump_power_profile_mode(smu, param, param_size);
if (param[param_size] == PP_SMC_POWER_PROFILE_CUSTOM) {
custom = true;
/* clear frontend mask so custom changes propogate */
smu->workload_mask = 0;
}

if ((param[param_size] != smu->power_profile_mode) || custom) {
/* clear the old user preference */
smu_power_profile_mode_put(smu, smu->power_profile_mode);
/* set the new user preference */
smu_power_profile_mode_get(smu, param[param_size]);
ret = smu_bump_power_profile_mode(smu,
custom ? param : NULL,
custom ? param_size : 0);
if (ret)
smu_power_profile_mode_put(smu, param[param_size]);
else
/* store the user's preference */
smu->power_profile_mode = param[param_size];
}

return ret;
}

static int smu_get_fan_control_mode(void *handle, u32 *fan_mode)
Expand Down
15 changes: 10 additions & 5 deletions drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
Original file line number Diff line number Diff line change
Expand Up @@ -556,11 +556,13 @@ struct smu_context {
uint32_t hard_min_uclk_req_from_dal;
bool disable_uclk_switch;

/* asic agnostic workload mask */
uint32_t workload_mask;
uint32_t workload_prority[WORKLOAD_POLICY_MAX];
uint32_t workload_setting[WORKLOAD_POLICY_MAX];
/* default/user workload preference */
uint32_t power_profile_mode;
uint32_t default_power_profile_mode;
uint32_t workload_refcount[PP_SMC_POWER_PROFILE_COUNT];
/* backend specific custom workload settings */
long *custom_profile_params;
bool pm_enabled;
bool is_apu;

Expand Down Expand Up @@ -731,9 +733,12 @@ struct pptable_funcs {
* @set_power_profile_mode: Set a power profile mode. Also used to
* create/set custom power profile modes.
* &input: Power profile mode parameters.
* &size: Size of &input.
* &workload_mask: mask of workloads to enable
* &custom_params: custom profile parameters
* &custom_params_max_idx: max valid idx into custom_params
*/
int (*set_power_profile_mode)(struct smu_context *smu, long *input, uint32_t size);
int (*set_power_profile_mode)(struct smu_context *smu, u32 workload_mask,
long *custom_params, u32 custom_params_max_idx);

/**
* @dpm_set_vcn_enable: Enable/disable VCN engine dynamic power
Expand Down
Loading

0 comments on commit 1443dd3

Please sign in to comment.