Skip to content

Commit

Permalink
Merge tag 'amd-drm-fixes-5.18-2022-04-27' of https://gitlab.freedeskt…
Browse files Browse the repository at this point in the history
…op.org/agd5f/linux into drm-fixes

amd-drm-fixes-5.18-2022-04-27:

amdgpu:
- Runtime pm fix
- DCN memory leak fix in error path
- SI DPM deadlock fix
- S0ix fix

amdkfd:
- GWS fix
- GWS support for CRIU

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220428023232.5794-1-alexander.deucher@amd.com
  • Loading branch information
Dave Airlie committed Apr 29, 2022
2 parents 22c73ba + fb8cc33 commit 9d9f720
Show file tree
Hide file tree
Showing 10 changed files with 165 additions and 140 deletions.
105 changes: 70 additions & 35 deletions drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
Original file line number Diff line number Diff line change
Expand Up @@ -2395,6 +2395,71 @@ static int amdgpu_pmops_restore(struct device *dev)
return amdgpu_device_resume(drm_dev, true);
}

static int amdgpu_runtime_idle_check_display(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
struct amdgpu_device *adev = drm_to_adev(drm_dev);

if (adev->mode_info.num_crtc) {
struct drm_connector *list_connector;
struct drm_connector_list_iter iter;
int ret = 0;

/* XXX: Return busy if any displays are connected to avoid
* possible display wakeups after runtime resume due to
* hotplug events in case any displays were connected while
* the GPU was in suspend. Remove this once that is fixed.
*/
mutex_lock(&drm_dev->mode_config.mutex);
drm_connector_list_iter_begin(drm_dev, &iter);
drm_for_each_connector_iter(list_connector, &iter) {
if (list_connector->status == connector_status_connected) {
ret = -EBUSY;
break;
}
}
drm_connector_list_iter_end(&iter);
mutex_unlock(&drm_dev->mode_config.mutex);

if (ret)
return ret;

if (amdgpu_device_has_dc_support(adev)) {
struct drm_crtc *crtc;

drm_for_each_crtc(crtc, drm_dev) {
drm_modeset_lock(&crtc->mutex, NULL);
if (crtc->state->active)
ret = -EBUSY;
drm_modeset_unlock(&crtc->mutex);
if (ret < 0)
break;
}
} else {
mutex_lock(&drm_dev->mode_config.mutex);
drm_modeset_lock(&drm_dev->mode_config.connection_mutex, NULL);

drm_connector_list_iter_begin(drm_dev, &iter);
drm_for_each_connector_iter(list_connector, &iter) {
if (list_connector->dpms == DRM_MODE_DPMS_ON) {
ret = -EBUSY;
break;
}
}

drm_connector_list_iter_end(&iter);

drm_modeset_unlock(&drm_dev->mode_config.connection_mutex);
mutex_unlock(&drm_dev->mode_config.mutex);
}
if (ret)
return ret;
}

return 0;
}

static int amdgpu_pmops_runtime_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
Expand All @@ -2407,6 +2472,10 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
return -EBUSY;
}

ret = amdgpu_runtime_idle_check_display(dev);
if (ret)
return ret;

/* wait for all rings to drain before suspending */
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i];
Expand Down Expand Up @@ -2516,41 +2585,7 @@ static int amdgpu_pmops_runtime_idle(struct device *dev)
return -EBUSY;
}

if (amdgpu_device_has_dc_support(adev)) {
struct drm_crtc *crtc;

drm_for_each_crtc(crtc, drm_dev) {
drm_modeset_lock(&crtc->mutex, NULL);
if (crtc->state->active)
ret = -EBUSY;
drm_modeset_unlock(&crtc->mutex);
if (ret < 0)
break;
}

} else {
struct drm_connector *list_connector;
struct drm_connector_list_iter iter;

mutex_lock(&drm_dev->mode_config.mutex);
drm_modeset_lock(&drm_dev->mode_config.connection_mutex, NULL);

drm_connector_list_iter_begin(drm_dev, &iter);
drm_for_each_connector_iter(list_connector, &iter) {
if (list_connector->dpms == DRM_MODE_DPMS_ON) {
ret = -EBUSY;
break;
}
}

drm_connector_list_iter_end(&iter);

drm_modeset_unlock(&drm_dev->mode_config.connection_mutex);
mutex_unlock(&drm_dev->mode_config.mutex);
}

if (ret == -EBUSY)
DRM_DEBUG_DRIVER("failing to power off - crtc active\n");
ret = amdgpu_runtime_idle_check_display(dev);

pm_runtime_mark_last_busy(dev);
pm_runtime_autosuspend(dev);
Expand Down
10 changes: 10 additions & 0 deletions drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
Original file line number Diff line number Diff line change
Expand Up @@ -1151,6 +1151,16 @@ static int gmc_v10_0_set_clockgating_state(void *handle,
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;

/*
* The issue mmhub can't disconnect from DF with MMHUB clock gating being disabled
* is a new problem observed at DF 3.0.3, however with the same suspend sequence not
* seen any issue on the DF 3.0.2 series platform.
*/
if (adev->in_s0ix && adev->ip_versions[DF_HWIP][0] > IP_VERSION(3, 0, 2)) {
dev_dbg(adev->dev, "keep mmhub clock gating being enabled for s0ix\n");
return 0;
}

r = adev->mmhub.funcs->set_clockgating(adev, state);
if (r)
return r;
Expand Down
83 changes: 37 additions & 46 deletions drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
Original file line number Diff line number Diff line change
Expand Up @@ -130,19 +130,33 @@ void program_sh_mem_settings(struct device_queue_manager *dqm,
}

static void increment_queue_count(struct device_queue_manager *dqm,
enum kfd_queue_type type)
struct qcm_process_device *qpd,
struct queue *q)
{
dqm->active_queue_count++;
if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ)
if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
q->properties.type == KFD_QUEUE_TYPE_DIQ)
dqm->active_cp_queue_count++;

if (q->properties.is_gws) {
dqm->gws_queue_count++;
qpd->mapped_gws_queue = true;
}
}

static void decrement_queue_count(struct device_queue_manager *dqm,
enum kfd_queue_type type)
struct qcm_process_device *qpd,
struct queue *q)
{
dqm->active_queue_count--;
if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ)
if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
q->properties.type == KFD_QUEUE_TYPE_DIQ)
dqm->active_cp_queue_count--;

if (q->properties.is_gws) {
dqm->gws_queue_count--;
qpd->mapped_gws_queue = false;
}
}

/*
Expand Down Expand Up @@ -412,7 +426,7 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
list_add(&q->list, &qpd->queues_list);
qpd->queue_count++;
if (q->properties.is_active)
increment_queue_count(dqm, q->properties.type);
increment_queue_count(dqm, qpd, q);

/*
* Unconditionally increment this counter, regardless of the queue's
Expand Down Expand Up @@ -601,13 +615,8 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
deallocate_vmid(dqm, qpd, q);
}
qpd->queue_count--;
if (q->properties.is_active) {
decrement_queue_count(dqm, q->properties.type);
if (q->properties.is_gws) {
dqm->gws_queue_count--;
qpd->mapped_gws_queue = false;
}
}
if (q->properties.is_active)
decrement_queue_count(dqm, qpd, q);

return retval;
}
Expand Down Expand Up @@ -700,12 +709,11 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q,
* dqm->active_queue_count to determine whether a new runlist must be
* uploaded.
*/
if (q->properties.is_active && !prev_active)
increment_queue_count(dqm, q->properties.type);
else if (!q->properties.is_active && prev_active)
decrement_queue_count(dqm, q->properties.type);

if (q->gws && !q->properties.is_gws) {
if (q->properties.is_active && !prev_active) {
increment_queue_count(dqm, &pdd->qpd, q);
} else if (!q->properties.is_active && prev_active) {
decrement_queue_count(dqm, &pdd->qpd, q);
} else if (q->gws && !q->properties.is_gws) {
if (q->properties.is_active) {
dqm->gws_queue_count++;
pdd->qpd.mapped_gws_queue = true;
Expand Down Expand Up @@ -767,11 +775,7 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
q->properties.type)];
q->properties.is_active = false;
decrement_queue_count(dqm, q->properties.type);
if (q->properties.is_gws) {
dqm->gws_queue_count--;
qpd->mapped_gws_queue = false;
}
decrement_queue_count(dqm, qpd, q);

if (WARN_ONCE(!dqm->sched_running, "Evict when stopped\n"))
continue;
Expand Down Expand Up @@ -817,7 +821,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
continue;

q->properties.is_active = false;
decrement_queue_count(dqm, q->properties.type);
decrement_queue_count(dqm, qpd, q);
}
pdd->last_evict_timestamp = get_jiffies_64();
retval = execute_queues_cpsch(dqm,
Expand Down Expand Up @@ -888,11 +892,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
q->properties.type)];
q->properties.is_active = true;
increment_queue_count(dqm, q->properties.type);
if (q->properties.is_gws) {
dqm->gws_queue_count++;
qpd->mapped_gws_queue = true;
}
increment_queue_count(dqm, qpd, q);

if (WARN_ONCE(!dqm->sched_running, "Restore when stopped\n"))
continue;
Expand Down Expand Up @@ -950,7 +950,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
continue;

q->properties.is_active = true;
increment_queue_count(dqm, q->properties.type);
increment_queue_count(dqm, &pdd->qpd, q);
}
retval = execute_queues_cpsch(dqm,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
Expand Down Expand Up @@ -1378,7 +1378,7 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
dqm->total_queue_count);

list_add(&kq->list, &qpd->priv_queue_list);
increment_queue_count(dqm, kq->queue->properties.type);
increment_queue_count(dqm, qpd, kq->queue);
qpd->is_debug = true;
execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
dqm_unlock(dqm);
Expand All @@ -1392,7 +1392,7 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
{
dqm_lock(dqm);
list_del(&kq->list);
decrement_queue_count(dqm, kq->queue->properties.type);
decrement_queue_count(dqm, qpd, kq->queue);
qpd->is_debug = false;
execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
/*
Expand Down Expand Up @@ -1467,7 +1467,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
qpd->queue_count++;

if (q->properties.is_active) {
increment_queue_count(dqm, q->properties.type);
increment_queue_count(dqm, qpd, q);

execute_queues_cpsch(dqm,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
Expand Down Expand Up @@ -1683,15 +1683,11 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
list_del(&q->list);
qpd->queue_count--;
if (q->properties.is_active) {
decrement_queue_count(dqm, q->properties.type);
decrement_queue_count(dqm, qpd, q);
retval = execute_queues_cpsch(dqm,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
if (retval == -ETIME)
qpd->reset_wavefronts = true;
if (q->properties.is_gws) {
dqm->gws_queue_count--;
qpd->mapped_gws_queue = false;
}
}

/*
Expand Down Expand Up @@ -1932,7 +1928,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
/* Clean all kernel queues */
list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) {
list_del(&kq->list);
decrement_queue_count(dqm, kq->queue->properties.type);
decrement_queue_count(dqm, qpd, kq->queue);
qpd->is_debug = false;
dqm->total_queue_count--;
filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES;
Expand All @@ -1945,13 +1941,8 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
deallocate_sdma_queue(dqm, q);

if (q->properties.is_active) {
decrement_queue_count(dqm, q->properties.type);
if (q->properties.is_gws) {
dqm->gws_queue_count--;
qpd->mapped_gws_queue = false;
}
}
if (q->properties.is_active)
decrement_queue_count(dqm, qpd, q);

dqm->total_queue_count--;
}
Expand Down
2 changes: 1 addition & 1 deletion drivers/gpu/drm/amd/amdkfd/kfd_priv.h
Original file line number Diff line number Diff line change
Expand Up @@ -1103,7 +1103,7 @@ struct kfd_criu_queue_priv_data {
uint32_t priority;
uint32_t q_percent;
uint32_t doorbell_id;
uint32_t is_gws;
uint32_t gws;
uint32_t sdma_id;
uint32_t eop_ring_buffer_size;
uint32_t ctx_save_restore_area_size;
Expand Down
10 changes: 7 additions & 3 deletions drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
Original file line number Diff line number Diff line change
Expand Up @@ -636,6 +636,8 @@ static int criu_checkpoint_queue(struct kfd_process_device *pdd,
q_data->ctx_save_restore_area_size =
q->properties.ctx_save_restore_area_size;

q_data->gws = !!q->gws;

ret = pqm_checkpoint_mqd(&pdd->process->pqm, q->properties.queue_id, mqd, ctl_stack);
if (ret) {
pr_err("Failed checkpoint queue_mqd (%d)\n", ret);
Expand Down Expand Up @@ -743,7 +745,6 @@ static void set_queue_properties_from_criu(struct queue_properties *qp,
struct kfd_criu_queue_priv_data *q_data)
{
qp->is_interop = false;
qp->is_gws = q_data->is_gws;
qp->queue_percent = q_data->q_percent;
qp->priority = q_data->priority;
qp->queue_address = q_data->q_address;
Expand Down Expand Up @@ -826,12 +827,15 @@ int kfd_criu_restore_queue(struct kfd_process *p,
NULL);
if (ret) {
pr_err("Failed to create new queue err:%d\n", ret);
ret = -EINVAL;
goto exit;
}

if (q_data->gws)
ret = pqm_set_gws(&p->pqm, q_data->q_id, pdd->dev->gws);

exit:
if (ret)
pr_err("Failed to create queue (%d)\n", ret);
pr_err("Failed to restore queue (%d)\n", ret);
else
pr_debug("Queue id %d was restored successfully\n", queue_id);

Expand Down
1 change: 1 addition & 0 deletions drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
Original file line number Diff line number Diff line change
Expand Up @@ -997,6 +997,7 @@ static struct clock_source *dcn21_clock_source_create(
return &clk_src->base;
}

kfree(clk_src);
BREAK_TO_DEBUGGER();
return NULL;
}
Expand Down
Loading

0 comments on commit 9d9f720

Please sign in to comment.