Skip to content

Commit

Permalink
drm/amd/pm: add inst to smu_dpm_set_vcn_enable
Browse files Browse the repository at this point in the history
First, add an instance parameter to smu_dpm_set_vcn_enable() function,
and calling dpm_set_vcn_enable() with this given instance.

Second, modify vcn_gated to be an array, to track the gating status
for each vcn instance separately.

With these 2 changes, smu_dpm_set_vcn_enable() will check and set the
gating status for the given vcn instance ONLY.

v2: remove duplicated functions.

remove for-loop in dpm_set_vcn_enable(), and temporarily move it to
to smu_dpm_set_power_gate(), in order to keep the exact same logic as
before, until further separation in next patch.

v3: add instance number in error message.

v4: declaring i at the top of the function.

Signed-off-by: Boyuan Zhang <boyuan.zhang@amd.com>
Acked-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
  • Loading branch information
Boyuan Zhang authored and Alex Deucher committed Dec 10, 2024
1 parent 8aaf166 commit 15df736
Show file tree
Hide file tree
Showing 2 changed files with 47 additions and 30 deletions.
75 changes: 46 additions & 29 deletions drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
Original file line number Diff line number Diff line change
Expand Up @@ -238,11 +238,11 @@ static bool is_vcn_enabled(struct amdgpu_device *adev)
}

static int smu_dpm_set_vcn_enable(struct smu_context *smu,
bool enable)
bool enable,
int inst)
{
struct smu_power_context *smu_power = &smu->smu_power;
struct smu_power_gate *power_gate = &smu_power->power_gate;
struct amdgpu_device *adev = smu->adev;
int ret = 0;

/*
Expand All @@ -254,14 +254,12 @@ static int smu_dpm_set_vcn_enable(struct smu_context *smu,
if (!smu->ppt_funcs->dpm_set_vcn_enable)
return 0;

if (atomic_read(&power_gate->vcn_gated) ^ enable)
if (atomic_read(&power_gate->vcn_gated[inst]) ^ enable)
return 0;

for (int i = 0; i < adev->vcn.num_vcn_inst; i++) {
ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable, i);
if (ret)
return ret;
}
ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable, inst);
if (!ret)
atomic_set(&power_gate->vcn_gated[inst], !enable);

return ret;
}
Expand Down Expand Up @@ -363,7 +361,8 @@ static int smu_dpm_set_power_gate(void *handle,
bool gate)
{
struct smu_context *smu = handle;
int ret = 0;
struct amdgpu_device *adev = smu->adev;
int i, ret = 0;

if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) {
dev_WARN(smu->adev->dev,
Expand All @@ -379,10 +378,12 @@ static int smu_dpm_set_power_gate(void *handle,
*/
case AMD_IP_BLOCK_TYPE_UVD:
case AMD_IP_BLOCK_TYPE_VCN:
ret = smu_dpm_set_vcn_enable(smu, !gate);
if (ret)
dev_err(smu->adev->dev, "Failed to power %s VCN!\n",
gate ? "gate" : "ungate");
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
ret = smu_dpm_set_vcn_enable(smu, !gate, i);
if (ret)
dev_err(smu->adev->dev, "Failed to power %s VCN instance %d!\n",
gate ? "gate" : "ungate", i);
}
break;
case AMD_IP_BLOCK_TYPE_GFX:
ret = smu_gfx_off_control(smu, gate);
Expand Down Expand Up @@ -785,21 +786,25 @@ static int smu_set_default_dpm_table(struct smu_context *smu)
struct amdgpu_device *adev = smu->adev;
struct smu_power_context *smu_power = &smu->smu_power;
struct smu_power_gate *power_gate = &smu_power->power_gate;
int vcn_gate, jpeg_gate;
int vcn_gate[AMDGPU_MAX_VCN_INSTANCES], jpeg_gate, i;
int ret = 0;

if (!smu->ppt_funcs->set_default_dpm_table)
return 0;

if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
vcn_gate = atomic_read(&power_gate->vcn_gated);
if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
for (i = 0; i < adev->vcn.num_vcn_inst; i++)
vcn_gate[i] = atomic_read(&power_gate->vcn_gated[i]);
}
if (adev->pg_flags & AMD_PG_SUPPORT_JPEG)
jpeg_gate = atomic_read(&power_gate->jpeg_gated);

if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
ret = smu_dpm_set_vcn_enable(smu, true);
if (ret)
return ret;
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
ret = smu_dpm_set_vcn_enable(smu, true, i);
if (ret)
return ret;
}
}

if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
Expand All @@ -816,8 +821,10 @@ static int smu_set_default_dpm_table(struct smu_context *smu)
if (adev->pg_flags & AMD_PG_SUPPORT_JPEG)
smu_dpm_set_jpeg_enable(smu, !jpeg_gate);
err_out:
if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
smu_dpm_set_vcn_enable(smu, !vcn_gate);
if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
for (i = 0; i < adev->vcn.num_vcn_inst; i++)
smu_dpm_set_vcn_enable(smu, !vcn_gate[i], i);
}

return ret;
}
Expand Down Expand Up @@ -1271,7 +1278,7 @@ static int smu_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
struct smu_context *smu = adev->powerplay.pp_handle;
int ret;
int i, ret;

smu->pool_size = adev->pm.smu_prv_buffer_size;
smu->smu_feature.feature_num = SMU_FEATURE_MAX;
Expand All @@ -1283,7 +1290,8 @@ static int smu_sw_init(struct amdgpu_ip_block *ip_block)
atomic64_set(&smu->throttle_int_counter, 0);
smu->watermarks_bitmap = 0;

atomic_set(&smu->smu_power.power_gate.vcn_gated, 1);
for (i = 0; i < adev->vcn.num_vcn_inst; i++)
atomic_set(&smu->smu_power.power_gate.vcn_gated[i], 1);
atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1);
atomic_set(&smu->smu_power.power_gate.vpe_gated, 1);
atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1);
Expand Down Expand Up @@ -1813,7 +1821,7 @@ static int smu_start_smc_engine(struct smu_context *smu)

static int smu_hw_init(struct amdgpu_ip_block *ip_block)
{
int ret;
int i, ret;
struct amdgpu_device *adev = ip_block->adev;
struct smu_context *smu = adev->powerplay.pp_handle;

Expand All @@ -1839,7 +1847,8 @@ static int smu_hw_init(struct amdgpu_ip_block *ip_block)
ret = smu_set_gfx_imu_enable(smu);
if (ret)
return ret;
smu_dpm_set_vcn_enable(smu, true);
for (i = 0; i < adev->vcn.num_vcn_inst; i++)
smu_dpm_set_vcn_enable(smu, true, i);
smu_dpm_set_jpeg_enable(smu, true);
smu_dpm_set_vpe_enable(smu, true);
smu_dpm_set_umsch_mm_enable(smu, true);
Expand Down Expand Up @@ -2037,12 +2046,13 @@ static int smu_hw_fini(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
struct smu_context *smu = adev->powerplay.pp_handle;
int ret;
int i, ret;

if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
return 0;

smu_dpm_set_vcn_enable(smu, false);
for (i = 0; i < adev->vcn.num_vcn_inst; i++)
smu_dpm_set_vcn_enable(smu, false, i);
smu_dpm_set_jpeg_enable(smu, false);
smu_dpm_set_vpe_enable(smu, false);
smu_dpm_set_umsch_mm_enable(smu, false);
Expand Down Expand Up @@ -2982,9 +2992,10 @@ static int smu_read_sensor(void *handle,
int *size_arg)
{
struct smu_context *smu = handle;
struct amdgpu_device *adev = smu->adev;
struct smu_umd_pstate_table *pstate_table =
&smu->pstate_table;
int ret = 0;
int i, ret = 0;
uint32_t *size, size_val;

if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
Expand Down Expand Up @@ -3030,7 +3041,13 @@ static int smu_read_sensor(void *handle,
*size = 4;
break;
case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
*(uint32_t *)data = atomic_read(&smu->smu_power.power_gate.vcn_gated) ? 0 : 1;
*(uint32_t *)data = 0;
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
if (!atomic_read(&smu->smu_power.power_gate.vcn_gated[i])) {
*(uint32_t *)data = 1;
break;
}
}
*size = 4;
break;
case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
Expand Down
2 changes: 1 addition & 1 deletion drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
Original file line number Diff line number Diff line change
Expand Up @@ -399,7 +399,7 @@ struct smu_dpm_context {
struct smu_power_gate {
bool uvd_gated;
bool vce_gated;
atomic_t vcn_gated;
atomic_t vcn_gated[AMDGPU_MAX_VCN_INSTANCES];
atomic_t jpeg_gated;
atomic_t vpe_gated;
atomic_t umsch_mm_gated;
Expand Down

0 comments on commit 15df736

Please sign in to comment.